summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.credo.exs1
-rw-r--r--.devcontainer/Dockerfile47
-rw-r--r--.devcontainer/devcontainer.json12
-rw-r--r--.devcontainer/docker-compose.yaml52
-rw-r--r--.github/ISSUE_TEMPLATE/config.yml14
-rw-r--r--.gitignore4
-rw-r--r--INSTALL.Unix.md6
-rw-r--r--Makefile27
-rw-r--r--Makefile.win15
-rw-r--r--NOTICE2
-rw-r--r--README.rst4
-rw-r--r--build-aux/Jenkinsfile.full109
-rw-r--r--build-aux/Jenkinsfile.pr23
-rwxr-xr-xbuild-aux/show-test-results.py70
-rw-r--r--config/integration.exs3
-rwxr-xr-xconfigure3
-rw-r--r--dev/boot_node.erl148
-rwxr-xr-xdev/make_boot_script9
-rw-r--r--dev/monitor_parent.erl43
-rwxr-xr-xdev/run32
-rw-r--r--emilio.config4
-rw-r--r--erlang_ls.config10
-rw-r--r--mix.exs30
-rw-r--r--mix.lock5
-rw-r--r--rebar.config.script29
-rw-r--r--rel/apps/couch_epi.config7
-rw-r--r--rel/files/eunit.config13
-rw-r--r--rel/overlay/etc/default.ini275
-rw-r--r--rel/overlay/etc/local.ini13
-rw-r--r--rel/reltool.config48
-rw-r--r--setup_eunit.template1
-rw-r--r--src/aegis/src/aegis.erl11
-rw-r--r--src/aegis/src/aegis_key_manager.erl8
-rw-r--r--src/aegis/src/aegis_server.erl3
-rw-r--r--src/aegis/test/aegis_server_test.erl7
-rw-r--r--src/chttpd/src/chttpd.erl114
-rw-r--r--src/chttpd/src/chttpd_auth_cache.erl23
-rw-r--r--src/chttpd/src/chttpd_changes.erl15
-rw-r--r--src/chttpd/src/chttpd_cors.erl7
-rw-r--r--src/chttpd/src/chttpd_db.erl193
-rw-r--r--src/chttpd/src/chttpd_handlers.erl10
-rw-r--r--src/chttpd/src/chttpd_httpd_handlers.erl25
-rw-r--r--src/chttpd/src/chttpd_misc.erl49
-rw-r--r--src/chttpd/src/chttpd_node.erl57
-rw-r--r--src/chttpd/src/chttpd_rewrite.erl487
-rw-r--r--src/chttpd/src/chttpd_show.erl154
-rw-r--r--src/chttpd/src/chttpd_stats.erl12
-rw-r--r--src/chttpd/src/chttpd_sup.erl10
-rw-r--r--src/chttpd/src/chttpd_util.erl41
-rw-r--r--src/chttpd/src/chttpd_view.erl24
-rw-r--r--src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl12
-rw-r--r--src/chttpd/test/eunit/chttpd_session_tests.erl12
-rw-r--r--src/chttpd/test/exunit/pagination_test.exs56
-rw-r--r--src/couch/include/couch_db.hrl44
-rw-r--r--src/couch/priv/couch_ejson_compare/couch_ejson_compare.c40
-rw-r--r--src/couch/priv/couch_js/86/help.h79
-rw-r--r--src/couch/priv/couch_js/86/main.cpp341
-rw-r--r--src/couch/priv/couch_js/86/util.cpp348
-rw-r--r--src/couch/priv/couch_js/86/util.h41
-rw-r--r--src/couch/rebar.config.script31
-rw-r--r--src/couch/src/couch.app.src44
-rw-r--r--src/couch/src/couch_att.erl189
-rw-r--r--src/couch/src/couch_auth_cache.erl93
-rw-r--r--src/couch/src/couch_bt_engine.erl1246
-rw-r--r--src/couch/src/couch_bt_engine.hrl27
-rw-r--r--src/couch/src/couch_bt_engine_compactor.erl590
-rw-r--r--src/couch/src/couch_bt_engine_header.erl451
-rw-r--r--src/couch/src/couch_bt_engine_stream.erl70
-rw-r--r--src/couch/src/couch_btree.erl855
-rw-r--r--src/couch/src/couch_changes.erl724
-rw-r--r--src/couch/src/couch_compress.erl99
-rw-r--r--src/couch/src/couch_db.erl2086
-rw-r--r--src/couch/src/couch_db_engine.erl1105
-rw-r--r--src/couch/src/couch_db_epi.erl1
-rw-r--r--src/couch/src/couch_db_header.erl405
-rw-r--r--src/couch/src/couch_db_int.hrl76
-rw-r--r--src/couch/src/couch_db_plugin.erl96
-rw-r--r--src/couch/src/couch_db_split.erl503
-rw-r--r--src/couch/src/couch_db_updater.erl955
-rw-r--r--src/couch/src/couch_debug.erl38
-rw-r--r--src/couch/src/couch_doc.erl59
-rw-r--r--src/couch/src/couch_drv.erl2
-rw-r--r--src/couch/src/couch_emsort.erl318
-rw-r--r--src/couch/src/couch_event_sup.erl74
-rw-r--r--src/couch/src/couch_file.erl804
-rw-r--r--src/couch/src/couch_flags.erl16
-rw-r--r--src/couch/src/couch_flags_config.erl80
-rw-r--r--src/couch/src/couch_httpd.erl358
-rw-r--r--src/couch/src/couch_httpd_auth.erl29
-rw-r--r--src/couch/src/couch_httpd_db.erl1263
-rw-r--r--src/couch/src/couch_httpd_misc_handlers.erl269
-rw-r--r--src/couch/src/couch_httpd_multipart.erl11
-rw-r--r--src/couch/src/couch_httpd_rewrite.erl484
-rw-r--r--src/couch/src/couch_httpd_vhost.erl6
-rw-r--r--src/couch/src/couch_lru.erl67
-rw-r--r--src/couch/src/couch_multidb_changes.erl903
-rw-r--r--src/couch/src/couch_os_process.erl2
-rw-r--r--src/couch/src/couch_partition.erl2
-rw-r--r--src/couch/src/couch_primary_sup.erl6
-rw-r--r--src/couch/src/couch_proc_manager.erl4
-rw-r--r--src/couch/src/couch_query_servers.erl93
-rw-r--r--src/couch/src/couch_secondary_sup.erl11
-rw-r--r--src/couch/src/couch_server.erl872
-rw-r--r--src/couch/src/couch_server_int.hrl23
-rw-r--r--src/couch/src/couch_stream.erl322
-rw-r--r--src/couch/src/couch_sup.erl34
-rw-r--r--src/couch/src/couch_task_status.erl162
-rw-r--r--src/couch/src/couch_users_db.erl137
-rw-r--r--src/couch/src/couch_util.erl26
-rw-r--r--src/couch/src/test_util.erl42
-rw-r--r--src/couch/test/eunit/chttpd_endpoints_tests.erl18
-rw-r--r--src/couch/test/eunit/couch_auth_cache_tests.erl349
-rw-r--r--src/couch/test/eunit/couch_bt_engine_compactor_tests.erl129
-rw-r--r--src/couch/test/eunit/couch_bt_engine_tests.erl20
-rw-r--r--src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl244
-rw-r--r--src/couch/test/eunit/couch_btree_tests.erl572
-rw-r--r--src/couch/test/eunit/couch_changes_tests.erl962
-rw-r--r--src/couch/test/eunit/couch_db_doc_tests.erl121
-rw-r--r--src/couch/test/eunit/couch_db_mpr_tests.erl12
-rw-r--r--src/couch/test/eunit/couch_db_plugin_tests.erl205
-rw-r--r--src/couch/test/eunit/couch_db_props_upgrade_tests.erl83
-rw-r--r--src/couch/test/eunit/couch_db_split_tests.erl331
-rw-r--r--src/couch/test/eunit/couch_db_tests.erl198
-rw-r--r--src/couch/test/eunit/couch_doc_json_tests.erl82
-rw-r--r--src/couch/test/eunit/couch_doc_tests.erl45
-rw-r--r--src/couch/test/eunit/couch_file_tests.erl551
-rw-r--r--src/couch/test/eunit/couch_flags_config_tests.erl6
-rw-r--r--src/couch/test/eunit/couch_index_tests.erl232
-rw-r--r--src/couch/test/eunit/couch_js_tests.erl3
-rw-r--r--src/couch/test/eunit/couch_query_servers_tests.erl55
-rw-r--r--src/couch/test/eunit/couch_server_tests.erl294
-rw-r--r--src/couch/test/eunit/couch_stream_tests.erl124
-rw-r--r--src/couch/test/eunit/couch_task_status_tests.erl233
-rw-r--r--src/couch/test/eunit/couchdb_attachments_tests.erl765
-rw-r--r--src/couch/test/eunit/couchdb_auth_tests.erl11
-rw-r--r--src/couch/test/eunit/couchdb_cors_tests.erl9
-rw-r--r--src/couch/test/eunit/couchdb_db_tests.erl91
-rw-r--r--src/couch/test/eunit/couchdb_design_doc_tests.erl87
-rw-r--r--src/couch/test/eunit/couchdb_file_compression_tests.erl250
-rw-r--r--src/couch/test/eunit/couchdb_location_header_tests.erl78
-rw-r--r--src/couch/test/eunit/couchdb_mrview_cors_tests.erl18
-rw-r--r--src/couch/test/eunit/couchdb_mrview_tests.erl261
-rw-r--r--src/couch/test/eunit/couchdb_update_conflicts_tests.erl280
-rw-r--r--src/couch/test/eunit/couchdb_vhosts_tests.erl271
-rw-r--r--src/couch/test/eunit/couchdb_views_tests.erl668
-rwxr-xr-xsrc/couch/test/eunit/fixtures/os_daemon_configer.escript3
-rw-r--r--src/couch/test/eunit/global_changes_tests.erl159
-rw-r--r--src/couch/test/exunit/couch_compress_tests.exs113
-rw-r--r--src/couch/test/exunit/fabric_test.exs101
-rw-r--r--src/couch_eval/src/couch_eval.erl55
-rw-r--r--src/couch_event/.gitignore2
-rw-r--r--src/couch_event/LICENSE202
-rw-r--r--src/couch_event/README.md3
-rw-r--r--src/couch_event/rebar.config1
-rw-r--r--src/couch_event/src/couch_event.app.src22
-rw-r--r--src/couch_event/src/couch_event.erl65
-rw-r--r--src/couch_event/src/couch_event_app.erl27
-rw-r--r--src/couch_event/src/couch_event_int.hrl19
-rw-r--r--src/couch_event/src/couch_event_listener.erl238
-rw-r--r--src/couch_event/src/couch_event_listener_mfa.erl107
-rw-r--r--src/couch_event/src/couch_event_os_listener.erl76
-rw-r--r--src/couch_event/src/couch_event_server.erl156
-rw-r--r--src/couch_event/src/couch_event_sup2.erl44
-rw-r--r--src/couch_expiring_cache/.suppressed1
-rw-r--r--src/couch_expiring_cache/src/couch_expiring_cache.erl5
-rw-r--r--src/couch_expiring_cache/src/couch_expiring_cache_fdb.erl11
-rw-r--r--src/couch_expiring_cache/src/couch_expiring_cache_server.erl5
-rw-r--r--src/couch_index/.gitignore3
-rw-r--r--src/couch_index/LICENSE202
-rw-r--r--src/couch_index/rebar.config2
-rw-r--r--src/couch_index/src/couch_index.app.src19
-rw-r--r--src/couch_index/src/couch_index.erl639
-rw-r--r--src/couch_index/src/couch_index_app.erl21
-rw-r--r--src/couch_index/src/couch_index_compactor.erl135
-rw-r--r--src/couch_index/src/couch_index_epi.erl50
-rw-r--r--src/couch_index/src/couch_index_plugin.erl51
-rw-r--r--src/couch_index/src/couch_index_plugin_couch_db.erl26
-rw-r--r--src/couch_index/src/couch_index_server.erl322
-rw-r--r--src/couch_index/src/couch_index_sup.erl24
-rw-r--r--src/couch_index/src/couch_index_updater.erl239
-rw-r--r--src/couch_index/src/couch_index_util.erl78
-rw-r--r--src/couch_index/test/eunit/couch_index_compaction_tests.erl117
-rw-r--r--src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl145
-rw-r--r--src/couch_jobs/src/couch_jobs.erl16
-rw-r--r--src/couch_jobs/src/couch_jobs.hrl6
-rw-r--r--src/couch_jobs/src/couch_jobs_activity_monitor.erl99
-rw-r--r--src/couch_jobs/src/couch_jobs_fdb.erl18
-rw-r--r--src/couch_jobs/src/couch_jobs_notifier.erl163
-rw-r--r--src/couch_jobs/src/couch_jobs_server.erl31
-rw-r--r--src/couch_jobs/src/couch_jobs_type_monitor.erl20
-rw-r--r--src/couch_jobs/src/couch_jobs_util.erl58
-rw-r--r--src/couch_jobs/test/couch_jobs_tests.erl1074
-rw-r--r--src/couch_js/src/couch_js.app.src3
-rw-r--r--src/couch_js/src/couch_js.erl17
-rw-r--r--src/couch_js/src/couch_js_native_process.erl40
-rw-r--r--src/couch_js/src/couch_js_os_process.erl67
-rw-r--r--src/couch_js/src/couch_js_proc_manager.erl11
-rw-r--r--src/couch_js/src/couch_js_query_servers.erl87
-rw-r--r--src/couch_lib/.gitignore21
-rw-r--r--src/couch_lib/README.md28
-rw-r--r--src/couch_lib/src/couch_lib.app.src (renamed from src/couch_pse_tests/src/couch_pse_tests.app.src)6
-rw-r--r--src/couch_lib/src/couch_lib_parse.erl61
-rw-r--r--src/couch_log/src/couch_log_config.erl24
-rw-r--r--src/couch_log/src/couch_log_config_dyn.erl3
-rw-r--r--src/couch_log/src/couch_log_formatter.erl7
-rw-r--r--src/couch_log/src/couch_log_monitor.erl17
-rw-r--r--src/couch_log/src/couch_log_sup.erl2
-rw-r--r--src/couch_log/test/eunit/couch_log_config_listener_test.erl7
-rw-r--r--src/couch_log/test/eunit/couch_log_config_test.erl48
-rw-r--r--src/couch_mrview/LICENSE202
-rw-r--r--src/couch_mrview/include/couch_mrview.hrl114
-rw-r--r--src/couch_mrview/priv/stats_descriptions.cfg24
-rw-r--r--src/couch_mrview/rebar.config2
-rw-r--r--src/couch_mrview/src/couch_mrview.erl701
-rw-r--r--src/couch_mrview/src/couch_mrview_cleanup.erl59
-rw-r--r--src/couch_mrview/src/couch_mrview_compactor.erl294
-rw-r--r--src/couch_mrview/src/couch_mrview_http.erl650
-rw-r--r--src/couch_mrview/src/couch_mrview_index.erl329
-rw-r--r--src/couch_mrview/src/couch_mrview_show.erl468
-rw-r--r--src/couch_mrview/src/couch_mrview_test_util.erl123
-rw-r--r--src/couch_mrview/src/couch_mrview_update_notifier.erl49
-rw-r--r--src/couch_mrview/src/couch_mrview_updater.erl373
-rw-r--r--src/couch_mrview/src/couch_mrview_util.erl1180
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl140
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl207
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl115
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl145
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl422
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl136
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_http_tests.erl28
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl111
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl148
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl144
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl286
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl575
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl95
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_util_tests.erl39
-rw-r--r--src/couch_peruser/.gitignore9
-rw-r--r--src/couch_peruser/LICENSE202
-rw-r--r--src/couch_peruser/README.md34
-rw-r--r--src/couch_peruser/src/couch_peruser.app.src20
-rw-r--r--src/couch_peruser/src/couch_peruser.erl423
-rw-r--r--src/couch_peruser/src/couch_peruser_app.erl26
-rw-r--r--src/couch_peruser/test/eunit/couch_peruser_test.erl538
-rw-r--r--src/couch_plugins/LICENSE202
-rw-r--r--src/couch_plugins/Makefile.am40
-rw-r--r--src/couch_plugins/README.md159
-rw-r--r--src/couch_plugins/src/couch_plugins.app.src22
-rw-r--r--src/couch_plugins/src/couch_plugins.erl304
-rw-r--r--src/couch_plugins/src/couch_plugins_httpd.erl65
-rw-r--r--src/couch_prometheus/src/couch_prometheus.app.src (renamed from src/couch_mrview/src/couch_mrview.app.src)8
-rw-r--r--src/couch_prometheus/src/couch_prometheus.hrl (renamed from src/rexi/include/rexi.hrl)9
-rw-r--r--src/couch_prometheus/src/couch_prometheus_app.erl (renamed from src/ddoc_cache/src/ddoc_cache_app.erl)8
-rw-r--r--src/couch_prometheus/src/couch_prometheus_http.erl102
-rw-r--r--src/couch_prometheus/src/couch_prometheus_server.erl174
-rw-r--r--src/couch_prometheus/src/couch_prometheus_sup.erl (renamed from src/couch_peruser/src/couch_peruser_sup.erl)24
-rw-r--r--src/couch_prometheus/src/couch_prometheus_util.erl166
-rw-r--r--src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl147
-rw-r--r--src/couch_prometheus/test/eunit/couch_prometheus_util_tests.erl65
-rw-r--r--src/couch_pse_tests/src/cpse_gather.erl95
-rw-r--r--src/couch_pse_tests/src/cpse_test_attachments.erl99
-rw-r--r--src/couch_pse_tests/src/cpse_test_compaction.erl318
-rw-r--r--src/couch_pse_tests/src/cpse_test_copy_purge_infos.erl82
-rw-r--r--src/couch_pse_tests/src/cpse_test_fold_changes.erl185
-rw-r--r--src/couch_pse_tests/src/cpse_test_fold_docs.erl400
-rw-r--r--src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl167
-rw-r--r--src/couch_pse_tests/src/cpse_test_get_set_props.erl95
-rw-r--r--src/couch_pse_tests/src/cpse_test_open_close_delete.erl77
-rw-r--r--src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl80
-rw-r--r--src/couch_pse_tests/src/cpse_test_purge_docs.erl464
-rw-r--r--src/couch_pse_tests/src/cpse_test_purge_replication.erl215
-rw-r--r--src/couch_pse_tests/src/cpse_test_purge_seqs.erl129
-rw-r--r--src/couch_pse_tests/src/cpse_test_read_write_docs.erl311
-rw-r--r--src/couch_pse_tests/src/cpse_test_ref_counting.erl113
-rw-r--r--src/couch_pse_tests/src/cpse_util.erl677
-rw-r--r--src/couch_replicator/README.md4
-rw-r--r--src/couch_replicator/src/couch_replicator.erl9
-rw-r--r--src/couch_replicator/src/couch_replicator_api_wrap.erl23
-rw-r--r--src/couch_replicator/src/couch_replicator_auth_session.erl67
-rw-r--r--src/couch_replicator/src/couch_replicator_changes_reader.erl23
-rw-r--r--src/couch_replicator/src/couch_replicator_connection.erl8
-rw-r--r--src/couch_replicator/src/couch_replicator_docs.erl25
-rw-r--r--src/couch_replicator/src/couch_replicator_httpc.erl93
-rw-r--r--src/couch_replicator/src/couch_replicator_httpd.erl2
-rw-r--r--src/couch_replicator/src/couch_replicator_ids.erl2
-rw-r--r--src/couch_replicator/src/couch_replicator_job.erl167
-rw-r--r--src/couch_replicator/src/couch_replicator_job_server.erl33
-rw-r--r--src/couch_replicator/src/couch_replicator_parse.erl39
-rw-r--r--src/couch_replicator/src/couch_replicator_worker.erl94
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_db_tests.erl9
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl6
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_test_helper.erl2
-rw-r--r--src/couch_views/include/couch_views.hrl99
-rw-r--r--src/couch_views/src/couch_views.app.src1
-rw-r--r--src/couch_views/src/couch_views.erl38
-rw-r--r--src/couch_views/src/couch_views_batch.erl2
-rw-r--r--src/couch_views/src/couch_views_batch_impl.erl4
-rw-r--r--src/couch_views/src/couch_views_fdb.erl1
-rw-r--r--src/couch_views/src/couch_views_http.erl49
-rw-r--r--src/couch_views/src/couch_views_http_util.erl337
-rw-r--r--src/couch_views/src/couch_views_indexer.erl179
-rw-r--r--src/couch_views/src/couch_views_jobs.erl21
-rw-r--r--src/couch_views/src/couch_views_reader.erl91
-rw-r--r--src/couch_views/src/couch_views_server.erl5
-rw-r--r--src/couch_views/src/couch_views_sup.erl2
-rw-r--r--src/couch_views/src/couch_views_trees.erl66
-rw-r--r--src/couch_views/src/couch_views_updater.erl24
-rw-r--r--src/couch_views/src/couch_views_util.erl144
-rw-r--r--src/couch_views/src/couch_views_validate.erl460
-rw-r--r--src/couch_views/test/couch_views_active_tasks_test.erl3
-rw-r--r--src/couch_views/test/couch_views_batch_test.erl2
-rw-r--r--src/couch_views/test/couch_views_cleanup_test.erl1
-rw-r--r--src/couch_views/test/couch_views_custom_red_test.erl193
-rw-r--r--src/couch_views/test/couch_views_indexer_test.erl91
-rw-r--r--src/couch_views/test/couch_views_info_test.erl2
-rw-r--r--src/couch_views/test/couch_views_map_test.erl118
-rw-r--r--src/couch_views/test/couch_views_red_test.erl4
-rw-r--r--src/couch_views/test/couch_views_server_test.erl3
-rw-r--r--src/couch_views/test/couch_views_size_test.erl3
-rw-r--r--src/couch_views/test/couch_views_trace_index_test.erl2
-rw-r--r--src/couch_views/test/couch_views_updater_test.erl3
-rw-r--r--src/couch_views/test/couch_views_upgrade_test.erl3
-rw-r--r--src/ctrace/README.md2
-rw-r--r--src/ctrace/src/ctrace.erl4
-rw-r--r--src/ctrace/src/ctrace_config.erl3
-rw-r--r--src/ctrace/test/ctrace_config_test.erl3
-rw-r--r--src/ddoc_cache/LICENSE202
-rw-r--r--src/ddoc_cache/README.md4
-rw-r--r--src/ddoc_cache/priv/stats_descriptions.cfg12
-rw-r--r--src/ddoc_cache/src/ddoc_cache.app.src32
-rw-r--r--src/ddoc_cache/src/ddoc_cache.erl60
-rw-r--r--src/ddoc_cache/src/ddoc_cache.hrl40
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry.erl374
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry_custom.erl37
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl46
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl47
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl44
-rw-r--r--src/ddoc_cache/src/ddoc_cache_lru.erl333
-rw-r--r--src/ddoc_cache/src/ddoc_cache_opener.erl66
-rw-r--r--src/ddoc_cache/src/ddoc_cache_sup.erl46
-rw-r--r--src/ddoc_cache/src/ddoc_cache_value.erl27
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl175
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl77
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl62
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl159
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_ev.erl21
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl96
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl219
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl87
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl46
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl107
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl33
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl174
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl224
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_test.hrl26
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl111
-rw-r--r--src/dreyfus/.gitignore4
-rw-r--r--src/dreyfus/LICENSE.txt202
-rw-r--r--src/dreyfus/README.md78
-rw-r--r--src/dreyfus/include/dreyfus.hrl74
-rw-r--r--src/dreyfus/priv/stats_descriptions.cfg65
-rw-r--r--src/dreyfus/src/clouseau_rpc.erl109
-rw-r--r--src/dreyfus/src/dreyfus.app.src22
-rw-r--r--src/dreyfus/src/dreyfus_app.erl24
-rw-r--r--src/dreyfus/src/dreyfus_bookmark.erl90
-rw-r--r--src/dreyfus/src/dreyfus_config.erl15
-rw-r--r--src/dreyfus/src/dreyfus_epi.erl46
-rw-r--r--src/dreyfus/src/dreyfus_fabric.erl205
-rw-r--r--src/dreyfus/src/dreyfus_fabric_cleanup.erl78
-rw-r--r--src/dreyfus/src/dreyfus_fabric_group1.erl129
-rw-r--r--src/dreyfus/src/dreyfus_fabric_group2.erl158
-rw-r--r--src/dreyfus/src/dreyfus_fabric_info.erl108
-rw-r--r--src/dreyfus/src/dreyfus_fabric_search.erl270
-rw-r--r--src/dreyfus/src/dreyfus_httpd.erl614
-rw-r--r--src/dreyfus/src/dreyfus_httpd_handlers.erl29
-rw-r--r--src/dreyfus/src/dreyfus_index.erl391
-rw-r--r--src/dreyfus/src/dreyfus_index_manager.erl153
-rw-r--r--src/dreyfus/src/dreyfus_index_updater.erl181
-rw-r--r--src/dreyfus/src/dreyfus_plugin_couch_db.erl26
-rw-r--r--src/dreyfus/src/dreyfus_rpc.erl130
-rw-r--r--src/dreyfus/src/dreyfus_sup.erl32
-rw-r--r--src/dreyfus/src/dreyfus_util.erl441
-rw-r--r--src/dreyfus/test/dreyfus_blacklist_await_test.erl76
-rw-r--r--src/dreyfus/test/dreyfus_blacklist_request_test.erl96
-rw-r--r--src/dreyfus/test/dreyfus_config_test.erl71
-rw-r--r--src/dreyfus/test/dreyfus_purge_test.erl867
-rw-r--r--src/dreyfus/test/dreyfus_test_util.erl13
-rw-r--r--src/dreyfus/test/elixir/mix.exs30
-rw-r--r--src/dreyfus/test/elixir/mix.lock5
-rwxr-xr-xsrc/dreyfus/test/elixir/run4
-rw-r--r--src/dreyfus/test/elixir/test/partition_search_test.exs247
-rw-r--r--src/dreyfus/test/elixir/test/search_test.exs226
-rw-r--r--src/dreyfus/test/elixir/test/test_helper.exs4
-rw-r--r--src/ebtree/README.md6
-rw-r--r--src/ebtree/src/ebtree.erl160
-rw-r--r--src/fabric/include/fabric.hrl46
-rw-r--r--src/fabric/include/fabric2.hrl11
-rw-r--r--src/fabric/priv/stats_descriptions.cfg28
-rw-r--r--src/fabric/src/fabric.app.src2
-rw-r--r--src/fabric/src/fabric.erl720
-rw-r--r--src/fabric/src/fabric2_db.erl121
-rw-r--r--src/fabric/src/fabric2_db_expiration.erl18
-rw-r--r--src/fabric/src/fabric2_fdb.erl95
-rw-r--r--src/fabric/src/fabric2_index.erl21
-rw-r--r--src/fabric/src/fabric2_server.erl203
-rw-r--r--src/fabric/src/fabric2_users_db.erl88
-rw-r--r--src/fabric/src/fabric2_util.erl16
-rw-r--r--src/fabric/src/fabric_db_create.erl228
-rw-r--r--src/fabric/src/fabric_db_delete.erl98
-rw-r--r--src/fabric/src/fabric_db_doc_count.erl62
-rw-r--r--src/fabric/src/fabric_db_info.erl171
-rw-r--r--src/fabric/src/fabric_db_meta.erl198
-rw-r--r--src/fabric/src/fabric_db_partition_info.erl155
-rw-r--r--src/fabric/src/fabric_db_update_listener.erl177
-rw-r--r--src/fabric/src/fabric_design_doc_count.erl62
-rw-r--r--src/fabric/src/fabric_dict.erl61
-rw-r--r--src/fabric/src/fabric_doc_attachments.erl160
-rw-r--r--src/fabric/src/fabric_doc_atts.erl170
-rw-r--r--src/fabric/src/fabric_doc_missing_revs.erl97
-rw-r--r--src/fabric/src/fabric_doc_open.erl610
-rw-r--r--src/fabric/src/fabric_doc_open_revs.erl799
-rw-r--r--src/fabric/src/fabric_doc_purge.erl571
-rw-r--r--src/fabric/src/fabric_doc_update.erl377
-rw-r--r--src/fabric/src/fabric_group_info.erl139
-rw-r--r--src/fabric/src/fabric_ring.erl519
-rw-r--r--src/fabric/src/fabric_rpc.erl664
-rw-r--r--src/fabric/src/fabric_streams.erl274
-rw-r--r--src/fabric/src/fabric_util.erl347
-rw-r--r--src/fabric/src/fabric_view.erl478
-rw-r--r--src/fabric/src/fabric_view_all_docs.erl332
-rw-r--r--src/fabric/src/fabric_view_changes.erl820
-rw-r--r--src/fabric/src/fabric_view_map.erl267
-rw-r--r--src/fabric/src/fabric_view_reduce.erl165
-rw-r--r--src/fabric/test/eunit/fabric_rpc_tests.erl181
-rw-r--r--src/fabric/test/fabric2_changes_fold_tests.erl41
-rw-r--r--src/fabric/test/fabric2_db_crud_tests.erl18
-rw-r--r--src/fabric/test/fabric2_dir_prefix_tests.erl4
-rw-r--r--src/fabric/test/fabric2_doc_crud_tests.erl2
-rw-r--r--src/fabric/test/fabric2_node_types_tests.erl4
-rw-r--r--src/fabric/test/fabric2_snapshot_tests.erl134
-rw-r--r--src/fabric/test/fabric2_test_util.erl5
-rw-r--r--src/fabric/test/fabric2_tx_options_tests.erl59
-rw-r--r--src/global_changes/.gitignore2
-rw-r--r--src/global_changes/LICENSE203
-rw-r--r--src/global_changes/README.md27
-rw-r--r--src/global_changes/priv/stats_descriptions.cfg20
-rw-r--r--src/global_changes/src/global_changes.app.src32
-rw-r--r--src/global_changes/src/global_changes_app.erl28
-rw-r--r--src/global_changes/src/global_changes_epi.erl51
-rw-r--r--src/global_changes/src/global_changes_httpd.erl285
-rw-r--r--src/global_changes/src/global_changes_httpd_handlers.erl28
-rw-r--r--src/global_changes/src/global_changes_listener.erl165
-rw-r--r--src/global_changes/src/global_changes_plugin.erl40
-rw-r--r--src/global_changes/src/global_changes_server.erl229
-rw-r--r--src/global_changes/src/global_changes_sup.erl84
-rw-r--r--src/global_changes/src/global_changes_util.erl27
-rw-r--r--src/global_changes/test/eunit/global_changes_hooks_tests.erl156
-rw-r--r--src/ioq/.gitignore2
-rw-r--r--src/ioq/src/ioq.app.src21
-rw-r--r--src/ioq/src/ioq.erl189
-rw-r--r--src/ioq/src/ioq_app.erl21
-rw-r--r--src/ioq/src/ioq_sup.erl24
-rw-r--r--src/jwtf/src/jwtf.erl36
-rw-r--r--src/jwtf/test/jwtf_keystore_tests.erl4
-rw-r--r--src/ken/README.md12
-rw-r--r--src/ken/rebar.config.script28
-rw-r--r--src/ken/src/ken.app.src.script38
-rw-r--r--src/ken/src/ken.erl29
-rw-r--r--src/ken/src/ken_app.erl28
-rw-r--r--src/ken/src/ken_event_handler.erl56
-rw-r--r--src/ken/src/ken_server.erl579
-rw-r--r--src/ken/src/ken_sup.erl33
-rw-r--r--src/ken/test/config.ini2
-rw-r--r--src/ken/test/ken_server_test.erl97
-rw-r--r--src/mango/src/mango_cursor.erl8
-rw-r--r--src/mango/src/mango_cursor_special.erl2
-rw-r--r--src/mango/src/mango_cursor_text.erl334
-rw-r--r--src/mango/src/mango_cursor_view.erl9
-rw-r--r--src/mango/src/mango_eval.erl17
-rw-r--r--src/mango/src/mango_httpd.erl3
-rw-r--r--src/mango/src/mango_idx.erl19
-rw-r--r--src/mango/src/mango_idx_text.erl459
-rw-r--r--src/mango/src/mango_idx_view.erl7
-rw-r--r--src/mango/src/mango_json_bookmark.erl2
-rw-r--r--src/mango/test/06-basic-text-test.py4
-rw-r--r--src/mango/test/17-multi-type-value-test.py2
-rw-r--r--src/mango/test/mango.py2
-rw-r--r--src/mem3/LICENSE202
-rw-r--r--src/mem3/README.md43
-rw-r--r--src/mem3/README_reshard.md93
-rw-r--r--src/mem3/include/mem3.hrl59
-rw-r--r--src/mem3/priv/stats_descriptions.cfg12
-rw-r--r--src/mem3/rebar.config.script22
-rw-r--r--src/mem3/src/mem3.app.src40
-rw-r--r--src/mem3/src/mem3.erl424
-rw-r--r--src/mem3/src/mem3_app.erl21
-rw-r--r--src/mem3/src/mem3_cluster.erl161
-rw-r--r--src/mem3/src/mem3_epi.erl51
-rw-r--r--src/mem3/src/mem3_hash.erl73
-rw-r--r--src/mem3/src/mem3_httpd.erl84
-rw-r--r--src/mem3/src/mem3_httpd_handlers.erl61
-rw-r--r--src/mem3/src/mem3_nodes.erl155
-rw-r--r--src/mem3/src/mem3_plugin_couch_db.erl21
-rw-r--r--src/mem3/src/mem3_rep.erl998
-rw-r--r--src/mem3/src/mem3_reshard.erl913
-rw-r--r--src/mem3/src/mem3_reshard.hrl74
-rw-r--r--src/mem3/src/mem3_reshard_api.erl217
-rw-r--r--src/mem3/src/mem3_reshard_dbdoc.erl274
-rw-r--r--src/mem3/src/mem3_reshard_httpd.erl317
-rw-r--r--src/mem3/src/mem3_reshard_index.erl164
-rw-r--r--src/mem3/src/mem3_reshard_job.erl716
-rw-r--r--src/mem3/src/mem3_reshard_job_sup.erl55
-rw-r--r--src/mem3/src/mem3_reshard_store.erl286
-rw-r--r--src/mem3/src/mem3_reshard_sup.erl47
-rw-r--r--src/mem3/src/mem3_reshard_validate.erl126
-rw-r--r--src/mem3/src/mem3_rpc.erl711
-rw-r--r--src/mem3/src/mem3_seeds.erl162
-rw-r--r--src/mem3/src/mem3_shards.erl766
-rw-r--r--src/mem3/src/mem3_sup.erl40
-rw-r--r--src/mem3/src/mem3_sync.erl323
-rw-r--r--src/mem3/src/mem3_sync_event.erl86
-rw-r--r--src/mem3/src/mem3_sync_event_listener.erl353
-rw-r--r--src/mem3/src/mem3_sync_nodes.erl115
-rw-r--r--src/mem3/src/mem3_sync_security.erl117
-rw-r--r--src/mem3/src/mem3_util.erl650
-rw-r--r--src/mem3/test/eunit/mem3_cluster_test.erl133
-rw-r--r--src/mem3/test/eunit/mem3_hash_test.erl23
-rw-r--r--src/mem3/test/eunit/mem3_rep_test.erl321
-rw-r--r--src/mem3/test/eunit/mem3_reshard_api_test.erl847
-rw-r--r--src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl389
-rw-r--r--src/mem3/test/eunit/mem3_reshard_test.erl834
-rw-r--r--src/mem3/test/eunit/mem3_ring_prop_tests.erl151
-rw-r--r--src/mem3/test/eunit/mem3_seeds_test.erl69
-rw-r--r--src/mem3/test/eunit/mem3_sync_security_test.erl54
-rw-r--r--src/mem3/test/eunit/mem3_util_test.erl130
-rw-r--r--src/rexi/README.md23
-rw-r--r--src/rexi/priv/stats_descriptions.cfg24
-rw-r--r--src/rexi/rebar.config2
-rw-r--r--src/rexi/src/rexi.app.src28
-rw-r--r--src/rexi/src/rexi.erl320
-rw-r--r--src/rexi/src/rexi_app.erl22
-rw-r--r--src/rexi/src/rexi_buffer.erl104
-rw-r--r--src/rexi/src/rexi_monitor.erl65
-rw-r--r--src/rexi/src/rexi_server.erl193
-rw-r--r--src/rexi/src/rexi_server_mon.erl176
-rw-r--r--src/rexi/src/rexi_server_sup.erl29
-rw-r--r--src/rexi/src/rexi_sup.erl64
-rw-r--r--src/rexi/src/rexi_utils.erl105
-rw-r--r--src/setup/.gitignore4
-rw-r--r--src/setup/LICENSE203
-rw-r--r--src/setup/README.md210
-rw-r--r--src/setup/src/setup.app.src27
-rw-r--r--src/setup/src/setup.erl386
-rw-r--r--src/setup/src/setup_app.erl28
-rw-r--r--src/setup/src/setup_epi.erl49
-rw-r--r--src/setup/src/setup_httpd.erl180
-rw-r--r--src/setup/src/setup_httpd_handlers.erl32
-rw-r--r--src/setup/src/setup_sup.erl44
-rwxr-xr-xsrc/setup/test/t-frontend-setup.sh71
-rwxr-xr-xsrc/setup/test/t-single-node-auto-setup.sh24
-rwxr-xr-xsrc/setup/test/t-single-node.sh46
-rwxr-xr-xsrc/setup/test/t.sh63
-rw-r--r--src/smoosh/README.md140
-rw-r--r--src/smoosh/operator_guide.md396
-rw-r--r--src/smoosh/src/smoosh.app.src29
-rw-r--r--src/smoosh/src/smoosh.erl69
-rw-r--r--src/smoosh/src/smoosh_app.erl28
-rw-r--r--src/smoosh/src/smoosh_channel.erl325
-rw-r--r--src/smoosh/src/smoosh_priority_queue.erl86
-rw-r--r--src/smoosh/src/smoosh_server.erl606
-rw-r--r--src/smoosh/src/smoosh_sup.erl38
-rw-r--r--src/smoosh/src/smoosh_utils.erl92
-rw-r--r--src/smoosh/test/exunit/scheduling_window_test.exs79
-rw-r--r--src/smoosh/test/exunit/test_helper.exs2
-rw-r--r--support/build_js.escript8
-rw-r--r--test/elixir/lib/couch.ex19
-rw-r--r--test/elixir/lib/couch/db_test.ex12
-rw-r--r--test/elixir/lib/setup/common.ex4
-rw-r--r--test/elixir/lib/step/create_db.ex2
-rw-r--r--test/elixir/lib/suite.ex213
-rw-r--r--test/elixir/test/all_docs_test.exs20
-rw-r--r--test/elixir/test/attachment_names_test.exs15
-rw-r--r--test/elixir/test/basics_test.exs12
-rw-r--r--test/elixir/test/bulk_docs_test.exs18
-rw-r--r--test/elixir/test/changes_async_test.exs74
-rw-r--r--test/elixir/test/compact_test.exs89
-rw-r--r--test/elixir/test/concurrent_writes_test.exs151
-rw-r--r--test/elixir/test/config/skip.elixir299
-rw-r--r--test/elixir/test/config/suite.elixir594
-rw-r--r--test/elixir/test/config_test.exs12
-rw-r--r--test/elixir/test/test_helper.exs17
-rw-r--r--test/elixir/test/users_db_test.exs125
-rw-r--r--test/elixir/test/view_test.exs12
593 files changed, 8578 insertions, 79748 deletions
diff --git a/.credo.exs b/.credo.exs
index 64d281e5e..ba4bf1409 100644
--- a/.credo.exs
+++ b/.credo.exs
@@ -30,6 +30,7 @@
~r"/src/jason",
~r"/src/hackney",
~r"/src/httpotion",
+ ~r"/src/file_system",
~r"/src/credo",
~r"/src/idna",
~r"/src/junit_formatter",
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
new file mode 100644
index 000000000..d479bc5a6
--- /dev/null
+++ b/.devcontainer/Dockerfile
@@ -0,0 +1,47 @@
+ARG FDB_VERSION
+ARG ELIXIR_VERSION
+
+# Grab fdbcli and client library from same image as server
+FROM foundationdb/foundationdb:${FDB_VERSION} as fdb
+
+# Debian image with Erlang + Elixir installed (we need elixir for test suite)
+FROM elixir:${ELIXIR_VERSION}
+
+# The FROM directive above sweeps out the ARGs so we need to re-declare here
+# in order to use it again to download the FDB client package
+ARG FDB_VERSION
+
+# Install SpiderMonkey 60 and tell CouchDB to use it in configure
+ARG SM_VSN
+ENV SM_VSN=${SM_VSN:-60}
+
+# Workaround for Debian's temporary lack of trust in FDB Root CA
+RUN set -ex; \
+ wget https://www.geotrust.com/resources/root_certificates/certificates/GeoTrust_Global_CA.pem; \
+ wget --ca-certificate=GeoTrust_Global_CA.pem https://www.foundationdb.org/downloads/${FDB_VERSION}/ubuntu/installers/foundationdb-clients_${FDB_VERSION}-1_amd64.deb; \
+ mkdir /var/lib/foundationdb; \
+ dpkg -i foundationdb-clients_${FDB_VERSION}-1_amd64.deb
+
+# Use NodeSource binaries for Node.js (Fauxton dependency)
+RUN set -ex; \
+ curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -; \
+ echo "deb https://deb.nodesource.com/node_10.x buster main" | tee /etc/apt/sources.list.d/nodesource.list; \
+ echo "deb-src https://deb.nodesource.com/node_10.x buster main" | tee -a /etc/apt/sources.list.d/nodesource.list
+
+RUN set -ex; \
+ apt-get update; \
+ apt-get install -y --no-install-recommends \
+ dnsutils \
+ libmozjs-${SM_VSN}-dev \
+ libicu-dev \
+ python3-venv \
+ python3-pip \
+ python3-sphinx \
+ nodejs
+
+# Documentation theme
+RUN pip3 install sphinx_rtd_theme
+
+COPY --from=fdb /var/fdb/scripts/create_cluster_file.bash /usr/local/bin/
+
+CMD sleep infinity
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
new file mode 100644
index 000000000..8f7a26d9b
--- /dev/null
+++ b/.devcontainer/devcontainer.json
@@ -0,0 +1,12 @@
+{
+ "dockerComposeFile": "docker-compose.yaml",
+ "service": "couch",
+ "workspaceFolder": "/usr/src/couchdb",
+
+ // Needs to run at start to translate service name into coordinator IP
+ "postStartCommand": ["bash", "/usr/local/bin/create_cluster_file.bash"],
+
+ "extensions": [
+ "erlang-ls.erlang-ls"
+ ]
+}
diff --git a/.devcontainer/docker-compose.yaml b/.devcontainer/docker-compose.yaml
new file mode 100644
index 000000000..79f1da775
--- /dev/null
+++ b/.devcontainer/docker-compose.yaml
@@ -0,0 +1,52 @@
+services:
+ couch:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ args:
+ # Base image for Erlang and Elixir. Useful choices include:
+ # 1.11 -> Erlang 23, Debian Buster
+ # 1.10 -> Erlang 22, Debian Buster
+ # 1.9 -> Erlang 22, Debian Buster
+ #
+ # Older versions based on Debian Stretch will not include
+ # SpiderMonkey 60, which the Dockerfile expects to be able
+ # to install via apt-get.
+ ELIXIR_VERSION: "1.10"
+
+ # SpiderMonkey version to install with apt-get
+ SM_VSN: "60"
+
+ # This should always match the value in fdb.image
+ FDB_VERSION: "6.2.28"
+
+ environment:
+ # This needs to match the name of the FoundationDB service below
+ FDB_COORDINATOR: fdb
+
+ # The location where the Dockerfile installs the FDB cluster file
+ # retrieved from the `fdb` image. CouchDB looks for the cluster file in
+ # this location by default. If you want to install it somewhere else you
+ # you need to change "[erlfdb] cluster_file" and ERL_ZFLAGS to match.
+ FDB_CLUSTER_FILE: /usr/local/etc/foundationdb/fdb.cluster
+
+ # The test suite will default to trying to start its own fdbserver
+ # process. This environment variable tells it to use the fdbserver
+ # running in the `fdb` image instead. Quite a hacky solution. An
+ # alternative might be to parameterize the Makefile so we can swap
+ # `eunit.config` for a `devcontainer.config` via an environment variable
+ # and maintain both config files in the repo.
+ ERL_ZFLAGS: "-erlfdb test_cluster_file <<\\\"/usr/local/etc/foundationdb/fdb.cluster\\\">>"
+
+ volumes:
+ # Mounts the project folder to '/usr/src/couchdb'. The target path inside
+ # the container should match what your application expects. In this case,
+ # the compose file is in a sub-folder, so you will mount '..'. You would
+ # then reference this path as the 'workspaceFolder' in
+ # '.devcontainer/devcontainer.json' so VS Code starts here.
+ - ..:/usr/src/couchdb:cached
+
+ network_mode: service:fdb
+
+ fdb:
+ image: foundationdb/foundationdb:6.2.28
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 000000000..7702f717b
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,14 @@
+blank_issues_enabled: true
+contact_links:
+ - name: File a Fauxton bug
+ url: https://github.com/apache/couchdb-fauxton/issues/new
+ about: Any web UI bugs should be filed in the Fauxton repo.
+ - name: File a documentation bug
+ url: https://github.com/apache/couchdb-documentation/issues/new
+ about: Any documentation bugs should be filed in our docs repo.
+ - name: Ask a Question
+ url: https://github.com/apache/couchdb/discussions/category_choices
+ about: If you're not specifically reporting a bug, please ask your question here.
+ - name: Read the Documentation
+ url: https://docs.couchdb.org/
+ about: You can also check out our excellent documentation here.
diff --git a/.gitignore b/.gitignore
index 4bc87ecd3..719294101 100644
--- a/.gitignore
+++ b/.gitignore
@@ -23,7 +23,8 @@ bin/
config.erl
*.tar.gz
*.tar.bz2
-dev/boot_node.beam
+dev/*.beam
+dev/devnode.*
dev/lib/
dev/logs/
ebin/
@@ -84,6 +85,7 @@ src/ssl_verify_fun/
src/thrift_protocol/
src/triq/
src/unicode_util_compat/
+src/file_system/
tmp/
src/couch/*.o
diff --git a/INSTALL.Unix.md b/INSTALL.Unix.md
index 6a37f3de4..0127bfc18 100644
--- a/INSTALL.Unix.md
+++ b/INSTALL.Unix.md
@@ -256,9 +256,9 @@ Naturally, you can configure systemd, launchd or SysV-init daemons to
launch CouchDB and keep it running using standard configuration files.
Sample scripts are in the couchdb-pkg repository:
-* SysV-init (Debian-style): https://github.com/apache/couchdb-pkg/blob/master/debian/couchdb.init
-* SysV-init (RHEL-style): https://github.com/apache/couchdb-pkg/blob/master/rpm/SOURCES/couchdb.init
+* SysV-init (Debian-style): https://github.com/apache/couchdb-pkg/blob/main/debian/couchdb.init
+* SysV-init (RHEL-style): https://github.com/apache/couchdb-pkg/blob/main/rpm/SOURCES/couchdb.init
* upstart: Use the Debian-style sysvinit script instead.
-* systemd: https://github.com/apache/couchdb-pkg/blob/master/debian/couchdb.service
+* systemd: https://github.com/apache/couchdb-pkg/blob/main/debian/couchdb.service
Consult your system documentation for more information.
diff --git a/Makefile b/Makefile
index 58eb614be..79141184f 100644
--- a/Makefile
+++ b/Makefile
@@ -72,7 +72,7 @@ DESTDIR=
# Rebar options
apps=
-skip_deps=folsom,meck,mochiweb,triq,proper,snappy,bcrypt,hyper
+skip_deps=folsom,meck,mochiweb,proper,bcrypt,hyper,local
suites=
tests=
@@ -152,7 +152,7 @@ check-all-tests: all python-black
@$(MAKE) elixir
ifdef apps
-subdirs = $(apps)
+subdirs=$(shell echo $(apps) | sed 's/,/ /g')
else
subdirs=$(shell ls src)
endif
@@ -160,10 +160,10 @@ endif
.PHONY: check
check: all
@$(MAKE) emilio
- make eunit apps=couch_eval,couch_expiring_cache,ctrace,couch_jobs,couch_views,fabric,mango,chttpd,couch_replicator
- make elixir tests=test/elixir/test/basics_test.exs,test/elixir/test/replication_test.exs,test/elixir/test/map_test.exs,test/elixir/test/all_docs_test.exs,test/elixir/test/bulk_docs_test.exs
- make exunit apps=chttpd
- make mango-test
+ @$(MAKE) eunit
+ @$(MAKE) elixir-suite
+ @$(MAKE) exunit
+ @$(MAKE) mango-test
.PHONY: eunit
# target: eunit - Run EUnit tests, use EUNIT_OPTS to provide custom options
@@ -271,6 +271,17 @@ elixir-cluster-with-quorum: elixir-init elixir-check-formatted elixir-credo devc
--degrade-cluster 1 \
--no-eval 'mix test --trace --only with_quorum_test $(EXUNIT_OPTS)'
+.PHONY: elixir-suite
+elixir-suite: export MIX_ENV=integration
+elixir-suite: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
+elixir-suite: elixir-init elixir-check-formatted elixir-credo devclean
+ @dev/run -n 1 -q -a adm:pass \
+ --enable-erlang-views \
+ --no-join \
+ --locald-config test/elixir/test/config/test-config.ini \
+ --erlang-config rel/files/eunit.config \
+ --no-eval 'mix test --trace --include test/elixir/test/config/suite.elixir --exclude test/elixir/test/config/skip.elixir'
+
.PHONY: elixir-check-formatted
elixir-check-formatted: elixir-init
@mix format --check-formatted
@@ -438,7 +449,7 @@ clean:
@rm -rf src/mango/.venv
@rm -f src/couch/priv/couchspawnkillable
@rm -f src/couch/priv/couch_js/config.h
- @rm -f dev/boot_node.beam dev/pbkdf2.pyc log/crash.log
+ @rm -f dev/*.beam dev/devnode.* dev/pbkdf2.pyc log/crash.log
.PHONY: distclean
@@ -485,7 +496,7 @@ endif
share/www:
ifeq ($(with_fauxton), 1)
@echo "Building Fauxton"
- @cd src/fauxton && npm install --production && ./node_modules/grunt-cli/bin/grunt couchdb
+ @cd src/fauxton && npm install && ./node_modules/grunt-cli/bin/grunt couchdb
endif
diff --git a/Makefile.win b/Makefile.win
index b52920967..5240da377 100644
--- a/Makefile.win
+++ b/Makefile.win
@@ -76,7 +76,7 @@ DESTDIR=
# Rebar options
apps=
-skip_deps=folsom,meck,mochiweb,triq,proper,snappy,bcrypt,hyper
+skip_deps=folsom,meck,mochiweb,proper,bcrypt,hyper
suites=
tests=
@@ -224,6 +224,17 @@ elixir-cluster-with-quorum: elixir-init elixir-check-formatted elixir-credo devc
--degrade-cluster 1 \
--no-eval 'mix test --trace --only with_quorum_test $(EXUNIT_OPTS)'
+.PHONY: elixir-suite
+elixir-suite: export MIX_ENV=integration
+elixir-suite: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
+elixir-suite: elixir-init elixir-check-formatted elixir-credo devclean
+ @dev\run -n 1 -q -a adm:pass \
+ --enable-erlang-views \
+ --no-join \
+ --locald-config test/elixir/test/config/test-config.ini \
+ --erlang-config rel/files/eunit.config \
+ --no-eval 'mix test --trace --include test\elixir\test\config\suite.elixir --exclude test\elixir\test\config\skip.elixir'
+
.PHONY: elixir-check-formatted
elixir-check-formatted: elixir-init
@mix format --check-formatted
@@ -419,7 +430,7 @@ endif
share\www:
ifeq ($(with_fauxton), 1)
@echo 'Building Fauxton'
- @cd src\fauxton && npm install --production && .\node_modules\.bin\grunt couchdb
+ @cd src\fauxton && npm install && .\node_modules\.bin\grunt couchdb
endif
derived:
diff --git a/NOTICE b/NOTICE
index e37a9901b..f6d6ee2d9 100644
--- a/NOTICE
+++ b/NOTICE
@@ -1,5 +1,5 @@
Apache CouchDB
-Copyright 2009-2020 The Apache Software Foundation
+Copyright 2009-2021 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
diff --git a/README.rst b/README.rst
index bce039d8c..4f54e6f0a 100644
--- a/README.rst
+++ b/README.rst
@@ -62,7 +62,7 @@ Getting started with developing
For more detail, read the README-DEV.rst_ file in this directory.
-.. _README-DEV.rst: https://github.com/apache/couchdb/blob/master/README-DEV.rst
+.. _README-DEV.rst: https://github.com/apache/couchdb/blob/main/README-DEV.rst
Basically you just have to install the needed dependencies which are
documented in the install docs and then run ``./configure && make``.
@@ -85,7 +85,7 @@ Contributing to CouchDB
You can learn more about our contributing process here:
- https://github.com/apache/couchdb/blob/master/CONTRIBUTING.md
+ https://github.com/apache/couchdb/blob/main/CONTRIBUTING.md
Cryptographic Software Notice
-----------------------------
diff --git a/build-aux/Jenkinsfile.full b/build-aux/Jenkinsfile.full
index 5f99da6cf..9b6474513 100644
--- a/build-aux/Jenkinsfile.full
+++ b/build-aux/Jenkinsfile.full
@@ -81,9 +81,10 @@ pipeline {
agent {
docker {
label 'docker'
- image 'couchdbdev/debian-stretch-erlang-20.3.8.25-1:latest'
+ image 'apache/couchdbci-debian:buster-erlang-21.3.8.17-1'
args "${DOCKER_ARGS}"
- alwaysPull true
+ registryUrl 'https://docker.io/'
+ registryCredentialsId 'dockerhub_creds'
}
}
options {
@@ -195,57 +196,14 @@ pipeline {
} // post
} // stage macOS
- stage('CentOS 6') {
- agent {
- docker {
- image 'couchdbdev/centos-6-erlang-20.3.8.25-1:latest'
- label 'docker'
- args "${DOCKER_ARGS}"
- alwaysPull true
- }
- }
- environment {
- platform = 'centos6'
- sm_ver = '1.8.5'
- }
- stages {
- stage('Build from tarball & test') {
- steps {
- unstash 'tarball'
- sh( script: build_and_test )
- }
- post {
- always {
- junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml, **/src/mango/nosetests.xml, **/test/javascript/junit.xml'
- }
- }
- }
- stage('Build CouchDB packages') {
- steps {
- sh( script: make_packages )
- sh( script: cleanup_and_save )
- }
- post {
- success {
- archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
- }
- }
- }
- } // stages
- post {
- cleanup {
- sh 'rm -rf ${WORKSPACE}/*'
- }
- } // post
- } // stage
-
stage('CentOS 7') {
agent {
docker {
- image 'couchdbdev/centos-7-erlang-20.3.8.25-1:latest'
+ image 'apache/couchdbci-centos:7-erlang-21.3.8.17-1'
label 'docker'
args "${DOCKER_ARGS}"
- alwaysPull true
+ registryUrl 'https://docker.io/'
+ registryCredentialsId 'dockerhub_creds'
}
}
environment {
@@ -287,10 +245,11 @@ pipeline {
stage('CentOS 8') {
agent {
docker {
- image 'couchdbdev/centos-8-erlang-20.3.8.25-1:latest'
+ image 'apache/couchdbci-centos:8-erlang-21.3.8.17-1'
label 'docker'
args "${DOCKER_ARGS}"
- alwaysPull true
+ registryUrl 'https://docker.io/'
+ registryCredentialsId 'dockerhub_creds'
}
}
environment {
@@ -332,10 +291,11 @@ pipeline {
stage('Ubuntu Xenial') {
agent {
docker {
- image 'couchdbdev/ubuntu-xenial-erlang-20.3.8.25-1:latest'
+ image 'apache/couchdbci-ubuntu:xenial-erlang-21.3.8.17-1'
label 'docker'
args "${DOCKER_ARGS}"
- alwaysPull true
+ registryUrl 'https://docker.io/'
+ registryCredentialsId 'dockerhub_creds'
}
}
environment {
@@ -376,10 +336,11 @@ pipeline {
stage('Ubuntu Bionic') {
agent {
docker {
- image 'couchdbdev/ubuntu-bionic-erlang-20.3.8.25-1:latest'
+ image 'apache/couchdbci-ubuntu:bionic-erlang-21.3.8.17-1'
label 'docker'
- alwaysPull true
args "${DOCKER_ARGS}"
+ registryUrl 'https://docker.io/'
+ registryCredentialsId 'dockerhub_creds'
}
}
environment {
@@ -420,10 +381,11 @@ pipeline {
stage('Ubuntu Focal') {
agent {
docker {
- image 'couchdbdev/ubuntu-focal-erlang-20.3.8.25-1:latest'
+ image 'apache/couchdbci-ubuntu:focal-erlang-21.3.8.17-1'
label 'docker'
- alwaysPull true
args "${DOCKER_ARGS}"
+ registryUrl 'https://docker.io/'
+ registryCredentialsId 'dockerhub_creds'
}
}
environment {
@@ -464,10 +426,11 @@ pipeline {
stage('Debian Stretch') {
agent {
docker {
- image 'couchdbdev/debian-stretch-erlang-20.3.8.25-1:latest'
+ image 'apache/couchdbci-debian:stretch-erlang-21.3.8.17-1'
label 'docker'
- alwaysPull true
args "${DOCKER_ARGS}"
+ registryUrl 'https://docker.io/'
+ registryCredentialsId 'dockerhub_creds'
}
}
environment {
@@ -508,10 +471,11 @@ pipeline {
stage('Debian Buster amd64') {
agent {
docker {
- image 'couchdbdev/debian-buster-erlang-20.3.8.25-1:latest'
+ image 'apache/couchdbci-debian:buster-erlang-21.3.8.17-1'
label 'docker'
- alwaysPull true
args "${DOCKER_ARGS}"
+ registryUrl 'https://docker.io/'
+ registryCredentialsId 'dockerhub_creds'
}
}
environment {
@@ -550,12 +514,14 @@ pipeline {
} // stage
stage('Debian Buster arm64v8') {
+ when { expression { return false } }
agent {
docker {
- image 'couchdbdev/arm64v8-debian-buster-erlang-20.3.8.25-1:latest'
+ image 'apache/couchdbci-debian:arm64v8-buster-erlang-21.3.8.17-1'
label 'arm64v8'
- alwaysPull true
args "${DOCKER_ARGS}"
+ registryUrl 'https://docker.io/'
+ registryCredentialsId 'dockerhub_creds'
}
}
environment {
@@ -600,10 +566,11 @@ pipeline {
// stage('Debian Buster ppc64le') {
// agent {
// docker {
-// image 'couchdbdev/ppc64le-debian-buster-erlang-20.3.8.25-1:latest'
+// image 'apache/couchdbci-debian:ppc64le-buster-erlang-21.3.8.17-1'
// label 'ppc64le'
-// alwaysPull true
// args "${DOCKER_ARGS}"
+// registryUrl 'https://docker.io/'
+// registryCredentialsId 'dockerhub_creds'
// }
// }
// environment {
@@ -669,12 +636,12 @@ pipeline {
}
stage('Pull latest docker image') {
steps {
- sh "docker pull couchdbdev/arm64v8-debian-buster-erlang-20.3.8.25-1:latest"
+ sh "docker pull apache/couchdbci-debian:arm64v8-buster-erlang-21.3.8.17-1"
}
}
stage('Build from tarball & test & packages') {
steps {
- withDockerContainer(image: "couchdbdev/arm64v8-debian-buster-erlang-20.3.8.25-1:latest", args: "${DOCKER_ARGS}") {
+ withDockerContainer(image: "apache/couchdbci-debian:arm64v8-buster-erlang-21.3.8.17-1", args: "${DOCKER_ARGS}") {
unstash 'tarball'
withEnv(['MIX_HOME='+pwd(), 'HEX_HOME='+pwd()]) {
sh( script: build_and_test )
@@ -714,10 +681,11 @@ pipeline {
agent {
docker {
- image 'couchdbdev/debian-buster-erlang-20.3.8.25-1:latest'
+ image 'apache/couchdbci-debian:buster-erlang-21.3.8.17-1'
label 'docker'
- alwaysPull true
args "${DOCKER_ARGS}"
+ registryUrl 'https://docker.io/'
+ registryCredentialsId 'dockerhub_creds'
}
}
options {
@@ -755,11 +723,9 @@ pipeline {
echo 'Building CentOS repos...'
sh '''
- cp js/centos-6/*rpm pkgs/centos6
cp js/centos-7/*rpm pkgs/centos7
cp js/centos-8/*rpm pkgs/centos8
- cd pkgs/centos6 && createrepo --database .
- cd ../centos7 && createrepo --database .
+ cd pkgs/centos7 && createrepo --database .
cd ../centos8 && createrepo --database .
'''
@@ -767,7 +733,6 @@ pipeline {
sh '''
mv couchdb-pkg/repo/pool $BRANCH_NAME/debian
mv couchdb-pkg/repo/dists $BRANCH_NAME/debian
- mv pkgs/centos6/* $BRANCH_NAME/el6
mv pkgs/centos7/* $BRANCH_NAME/el7
mv pkgs/centos8/* $BRANCH_NAME/el8
mv apache-couchdb-*.tar.gz $BRANCH_NAME/source
diff --git a/build-aux/Jenkinsfile.pr b/build-aux/Jenkinsfile.pr
index 001fd59ca..77362b193 100644
--- a/build-aux/Jenkinsfile.pr
+++ b/build-aux/Jenkinsfile.pr
@@ -20,7 +20,7 @@ mkdir build
cd build
tar -xf ${WORKSPACE}/apache-couchdb-*.tar.gz
cd apache-couchdb-*
-. /usr/local/kerl/${KERL_VER}/activate
+. /usr/local/kerl/${ERLANG_VERSION}/activate
./configure --spidermonkey-version 60
make check || (make build-report && false)
'''
@@ -39,18 +39,18 @@ pipeline {
GIT_COMMITTER_NAME = 'Jenkins User'
GIT_COMMITTER_EMAIL = 'couchdb@apache.org'
// Parameters for the matrix build
- DOCKER_IMAGE = 'couchdbdev/debian-buster-erlang-all:latest'
+ DOCKER_IMAGE = 'apache/couchdbci-debian:buster-erlang-all'
// https://github.com/jenkins-infra/jenkins.io/blob/master/Jenkinsfile#64
// We need the jenkins user mapped inside of the image
// npm config cache below deals with /home/jenkins not mapping correctly
// inside the image
DOCKER_ARGS = '-e npm_config_cache=npm-cache -e HOME=. -v=/etc/passwd:/etc/passwd -v /etc/group:/etc/group'
- // *** BE SURE TO CHANGE THE ERLANG VERSION FARTHER DOWN S WELL ***
+
+ // *** BE SURE TO ALSO CHANGE THE ERLANG VERSIONS FARTHER DOWN ***
// Search for ERLANG_VERSION
- // see https://issues.jenkins-ci.org/browse/JENKINS-40986
- LOW_ERLANG_VER = '20.3.8.11'
- MID_ERLANG_VER = '20.3.8.25'
- HIGH_ERLANG_VER = '22.2.3'
+ // see https://issues.jenkins.io/browse/JENKINS-61047 for why this cannot
+ // be done parametrically
+ LOW_ERLANG_VER = '21.3.8.22'
}
options {
@@ -69,7 +69,8 @@ pipeline {
image "${DOCKER_IMAGE}"
label 'docker'
args "${DOCKER_ARGS}"
- alwaysPull true
+ registryUrl 'https://docker.io/'
+ registryCredentialsId 'dockerhub_creds'
}
}
options {
@@ -106,7 +107,7 @@ pipeline {
axes {
axis {
name 'ERLANG_VERSION'
- values "20.3.8.11", "20.3.8.25", "22.2.3"
+ values '21.3.8.22', '22.3.4.17', '23.3.1'
}
}
@@ -117,12 +118,8 @@ pipeline {
image "${DOCKER_IMAGE}"
label 'docker'
args "${DOCKER_ARGS}"
- alwaysPull true
}
}
- environment {
- KERL_VER = "${ERLANG_VERSION}"
- }
options {
skipDefaultCheckout()
timeout(time: 90, unit: "MINUTES")
diff --git a/build-aux/show-test-results.py b/build-aux/show-test-results.py
index c76a88409..edd6ca13f 100755
--- a/build-aux/show-test-results.py
+++ b/build-aux/show-test-results.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.7
+#!/usr/bin/env python3
import argparse
import glob
@@ -198,15 +198,15 @@ def display_failures(collections):
if not len(failures):
return
- print "Failures"
- print "========"
- print
+ print("Failures")
+ print("========")
+ print()
for failure in failures:
- print failure[0]
- print "-" * len(failure[0])
- print
- print failure[1]
- print
+ print(failure[0])
+ print("-" * len(failure[0]))
+ print()
+ print(failure[1])
+ print()
def display_errors(collections):
@@ -220,15 +220,15 @@ def display_errors(collections):
if not len(errors):
return
- print "Errors"
- print "======"
- print
+ print("Errors")
+ print("======")
+ print()
for error in errors:
- print error[0]
- print "-" * len(error[0])
- print
- print error[1]
- print
+ print(error[0])
+ print("-" * len(error[0]))
+ print()
+ print(error[1])
+ print()
def display_skipped(collections):
@@ -242,12 +242,12 @@ def display_skipped(collections):
skipped.append((name, test.skipped_msg))
if not skipped:
return
- print "Skipped"
- print "======="
- print
+ print("Skipped")
+ print("=======")
+ print()
for row in sorted(skipped):
- print " %s: %s" % row
- print
+ print(" %s: %s" % row)
+ print()
def display_table(table):
@@ -263,7 +263,7 @@ def display_table(table):
table[ridx] = new_row
for row in table:
fmt = " ".join(["%10s"] * len(row))
- print fmt % tuple(row)
+ print(fmt % tuple(row))
def display_collections(collections, sort):
@@ -305,12 +305,12 @@ def display_collections(collections, sort):
rows.sort(key=skey)
- print "Collections"
- print "==========="
- print
+ print("Collections")
+ print("===========")
+ print()
headers = ["Total", "Fixture", "Test", "Count", "Failed", "Errors", "Skipped"]
display_table([headers] + rows)
- print
+ print()
def display_suites(collections, count, sort):
@@ -342,12 +342,12 @@ def display_suites(collections, count, sort):
rows = rows[:count]
- print "Suites"
- print "======"
- print
+ print("Suites")
+ print("======")
+ print()
headers = ["Total", "Fixture", "Test", "Count", "Failed", "Errors", "Skipped"]
display_table([headers] + rows)
- print
+ print()
def display_tests(collections, count):
@@ -367,11 +367,11 @@ def display_tests(collections, count):
rows.sort(key=skey)
rows = rows[:count]
- print "Tests"
- print "====="
- print
+ print("Tests")
+ print("=====")
+ print()
display_table(rows)
- print
+ print()
def main():
diff --git a/config/integration.exs b/config/integration.exs
index c5a5ed24a..796880266 100644
--- a/config/integration.exs
+++ b/config/integration.exs
@@ -5,8 +5,5 @@ config :logger,
compile_time_purge_level: :debug,
level: :debug
-config :kernel,
- error_logger: false
-
config :sasl,
sasl_error_logger: false
diff --git a/configure b/configure
index 0793d6837..07f02e802 100755
--- a/configure
+++ b/configure
@@ -29,7 +29,7 @@ ERLANG_MD5="false"
SKIP_DEPS=0
COUCHDB_USER="$(whoami 2>/dev/null || echo couchdb)"
-SM_VSN="1.8.5"
+SM_VSN=${SM_VSN:-"1.8.5"}
ARCH="$(uname -m)"
. ${rootdir}/version.mk
@@ -220,6 +220,7 @@ cat > rel/couchdb.config << EOF
{node_name, "-name couchdb@127.0.0.1"}.
{cluster_port, 5984}.
{backend_port, 5986}.
+{prometheus_port, 17986}.
EOF
cat > install.mk << EOF
diff --git a/dev/boot_node.erl b/dev/boot_node.erl
deleted file mode 100644
index 922a5ccb6..000000000
--- a/dev/boot_node.erl
+++ /dev/null
@@ -1,148 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(boot_node).
-
--export([start/0]).
-
-
-start() ->
- monitor_parent(),
- Apps = load_apps(),
- Deps = load_deps(Apps),
- start_all_apps(Deps).
-
-
-monitor_parent() ->
- {ok, [[PPid]]} = init:get_argument(parent_pid),
- spawn(fun() -> monitor_parent(PPid) end).
-
-
-monitor_parent(PPid) ->
- timer:sleep(1000),
- case os:type() of
- {unix, _} ->
- case os:cmd("kill -0 " ++ PPid) of
- "" ->
- monitor_parent(PPid);
- _Else ->
- % Assume _Else is a no such process error
- init:stop()
- end;
- {win32, _} ->
- Fmt = "tasklist /fi \"PID eq ~s\" /fo csv /nh",
- Retval = os:cmd(io_lib:format(Fmt, [PPid])),
- case re:run(Retval, "^\"python.exe\",*") of
- {match, _} ->
- monitor_parent(PPid);
- nomatch ->
- init:stop()
- end
- end.
-
-
-load_apps() ->
- {ok, [[Config]]} = init:get_argument(reltool_config),
- {ok, Terms} = file:consult(Config),
- load_apps(Terms).
-
-
-load_apps([]) ->
- erlang:error(failed_to_load_apps);
-load_apps([{sys, Terms} | _]) ->
- load_apps(Terms);
-load_apps([{rel, "couchdb", _Vsn, Apps} | _]) ->
- Apps;
-load_apps([_ | Rest]) ->
- load_apps(Rest).
-
-
-load_deps(Apps) ->
- load_deps(Apps, dict:new()).
-
-
-load_deps([], Deps) ->
- Deps;
-load_deps([App | Rest], Deps) ->
- load_app(App),
- case application:get_key(App, applications) of
- {ok, AppDeps0} ->
- NewDeps = dict:store(App, AppDeps0, Deps),
- Filter = fun(A) -> not dict:is_key(A, Deps) end,
- AppDeps = lists:filter(Filter, AppDeps0),
- load_deps(AppDeps ++ Rest, NewDeps);
- _ ->
- NewDeps = dict:store(App, [], Deps),
- load_deps(Rest, NewDeps)
- end.
-
-
-load_app(App) ->
- case application:load(App) of
- ok ->
- case application:get_key(App, modules) of
- {ok, Modules} ->
- lists:foreach(fun(Mod) ->
- case load_app_module(Mod) of
- ok -> ok;
- E -> io:format("~p = load_app_module(~p)~n", [E, Mod])
- end
- end, Modules);
- undefined ->
- ok
- end;
- {error, {already_loaded, App}} ->
- ok;
- Error ->
- Error
- end.
-
-
-load_app_module(Mod) ->
- case code:is_loaded(Mod) of
- {file, _} ->
- ok;
- _ ->
- case code:load_file(Mod) of
- {module, Mod} ->
- ok;
- Error ->
- Error
- end
- end.
-
-
-start_all_apps(Deps) ->
- lists:foldl(fun(App, Started) ->
- start_app(App, Deps, Started)
- end, [], dict:fetch_keys(Deps)).
-
-
-start_app(App, Deps, Started) ->
- case lists:member(App, Started) of
- true ->
- Started;
- false ->
- AppDeps = dict:fetch(App, Deps),
- NowStarted = lists:foldl(fun(Dep, Acc) ->
- start_app(Dep, Deps, Acc)
- end, Started, AppDeps),
- case application:start(App) of
- ok ->
- [App | NowStarted];
- {error, {already_started,App}} ->
- % Kernel causes this
- [App | NowStarted];
- Else ->
- erlang:error(Else)
- end
- end.
diff --git a/dev/make_boot_script b/dev/make_boot_script
new file mode 100755
index 000000000..549dd9a07
--- /dev/null
+++ b/dev/make_boot_script
@@ -0,0 +1,9 @@
+#!/usr/bin/env escript
+
+main(_) ->
+ {ok, Server} = reltool:start_server([
+ {config, "../rel/reltool.config"}
+ ]),
+ {ok, Release} = reltool:get_rel(Server, "couchdb"),
+ ok = file:write_file("devnode.rel", io_lib:format("~p.~n", [Release])),
+ ok = systools:make_script("devnode", [local]).
diff --git a/dev/monitor_parent.erl b/dev/monitor_parent.erl
new file mode 100644
index 000000000..382f37e9c
--- /dev/null
+++ b/dev/monitor_parent.erl
@@ -0,0 +1,43 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(monitor_parent).
+
+-export([start/0]).
+
+
+start() ->
+ {ok, [[PPid]]} = init:get_argument(parent_pid),
+ spawn(fun() -> monitor_parent(PPid) end).
+
+
+monitor_parent(PPid) ->
+ timer:sleep(1000),
+ case os:type() of
+ {unix, _} ->
+ case os:cmd("kill -0 " ++ PPid) of
+ "" ->
+ monitor_parent(PPid);
+ _Else ->
+ % Assume _Else is a no such process error
+ init:stop()
+ end;
+ {win32, _} ->
+ Fmt = "tasklist /fi \"PID eq ~s\" /fo csv /nh",
+ Retval = os:cmd(io_lib:format(Fmt, [PPid])),
+ case re:run(Retval, "^\"python.exe\",*") of
+ {match, _} ->
+ monitor_parent(PPid);
+ nomatch ->
+ init:stop()
+ end
+ end.
diff --git a/dev/run b/dev/run
index d620e21cd..3fe1b33b6 100755
--- a/dev/run
+++ b/dev/run
@@ -101,6 +101,7 @@ def setup():
setup_logging(ctx)
setup_dirs(ctx)
check_beams(ctx)
+ check_boot_script(ctx)
setup_configs(ctx)
return ctx
@@ -127,7 +128,7 @@ def get_args_parser():
"-n",
"--nodes",
metavar="nodes",
- default=3,
+ default=1,
type=int,
help="Number of development nodes to be spun up",
)
@@ -154,7 +155,7 @@ def get_args_parser():
parser.add_option(
"--no-join",
dest="no_join",
- default=False,
+ default=True,
action="store_true",
help="Do not join nodes on boot",
)
@@ -189,7 +190,7 @@ def get_args_parser():
"--erlang-config",
dest="erlang_config",
default="rel/files/sys.config",
- help="Specify an alternative Erlang application configuration"
+ help="Specify an alternative Erlang application configuration",
)
parser.add_option(
"--degrade-cluster",
@@ -275,10 +276,19 @@ def check_beams(ctx):
sp.check_call(["erlc", "-o", ctx["devdir"] + os.sep, fname])
+@log("Ensure Erlang boot script exists")
+def check_boot_script(ctx):
+ if not os.path.exists(os.path.join(ctx["devdir"], "devnode.boot")):
+ env = os.environ.copy()
+ env["ERL_LIBS"] = os.path.join(ctx["rootdir"], "src")
+ sp.check_call(["escript", "make_boot_script"], env=env, cwd=ctx["devdir"])
+
+
@log("Prepare configuration files")
def setup_configs(ctx):
for idx, node in enumerate(ctx["nodes"]):
- cluster_port, backend_port = get_ports(ctx, idx + ctx["node_number"])
+ cluster_port, backend_port, prometheus_port = get_ports(ctx,
+ idx + ctx["node_number"])
env = {
"prefix": toposixpath(ctx["rootdir"]),
"package_author_name": "The Apache Software Foundation",
@@ -291,6 +301,7 @@ def setup_configs(ctx):
"node_name": "-name %s@127.0.0.1" % node,
"cluster_port": cluster_port,
"backend_port": backend_port,
+ "prometheus_port": prometheus_port,
"uuid": "fake_uuid_for_dev",
"_default": "",
}
@@ -351,7 +362,8 @@ def apply_config_overrides(ctx, content):
def get_ports(ctx, idnode):
assert idnode
if idnode <= 5 and not ctx["auto_ports"]:
- return ((10000 * idnode) + 5984, (10000 * idnode) + 5986)
+ return ((10000 * idnode) + 5984, (10000 * idnode) + 5986,
+ (10000 * idnode) + 7986)
else:
return tuple(get_available_ports(2))
@@ -599,10 +611,9 @@ def set_boot_env(ctx):
@log("Start node {node}")
def boot_node(ctx, node):
- erl_libs = os.path.join(ctx["rootdir"], "src")
set_boot_env(ctx)
env = os.environ.copy()
- env["ERL_LIBS"] = os.pathsep.join([erl_libs])
+ env["ERL_LIBS"] = os.path.join(ctx["rootdir"], "src")
node_etcdir = os.path.join(ctx["devdir"], "lib", node, "etc")
reldir = os.path.join(ctx["rootdir"], "rel")
@@ -621,11 +632,12 @@ def boot_node(ctx, node):
os.path.join(reldir, "reltool.config"),
"-parent_pid",
str(os.getpid()),
+ "-boot",
+ os.path.join(ctx["devdir"], "devnode"),
"-pa",
ctx["devdir"],
+ "-s monitor_parent",
]
- cmd += [p[:-1] for p in glob.glob(erl_libs + "/*/")]
- cmd += ["-s", "boot_node"]
if ctx["reset_logs"]:
mode = "wb"
else:
@@ -782,7 +794,7 @@ def try_request(
def create_system_databases(host, port):
- for dbname in ["_users", "_replicator", "_global_changes"]:
+ for dbname in ["_users", "_replicator"]:
conn = httpclient.HTTPConnection(host, port)
conn.request("HEAD", "/" + dbname)
resp = conn.getresponse()
diff --git a/emilio.config b/emilio.config
index 0dad93898..84a6571d2 100644
--- a/emilio.config
+++ b/emilio.config
@@ -8,13 +8,11 @@
"src[\/]emilio[\/]*",
"src[\/]folsom[\/]*",
"src[\/]mochiweb[\/]*",
- "src[\/]snappy[\/]*",
"src[\/]ssl_verify_fun[\/]*",
"src[\/]ibrowse[\/]*",
"src[\/]jiffy[\/]*",
"src[\/]meck[\/]*",
"src[\/]proper[\/]*",
"src[\/]recon[\/]*",
- "src[\/]hyper[\/]*",
- "src[\/]triq[\/]*"
+ "src[\/]hyper[\/]*"
]}.
diff --git a/erlang_ls.config b/erlang_ls.config
new file mode 100644
index 000000000..ffc769e1b
--- /dev/null
+++ b/erlang_ls.config
@@ -0,0 +1,10 @@
+include_dirs:
+ - "src/"
+ - "src/*/include/"
+macros:
+ - name: COUCHDB_VERSION
+ value: erlangls
+ - name: COUCHDB_GIT_SHA
+ value: deadbeef
+ - name: AEGIS_KEY_MANAGER
+ value: aegis_noop_key_manager
diff --git a/mix.exs b/mix.exs
index ae42af5d6..12e02215f 100644
--- a/mix.exs
+++ b/mix.exs
@@ -20,6 +20,29 @@ defmodule CoverTool do
end
end
+defmodule Mix.Tasks.Suite do
+ @moduledoc """
+ Helper task to create `suites.elixir` file. It suppose to be used as follows
+ ```
+ MIX_ENV=integration mix suite > test/elixir/test/config/suite.elixir
+ ```
+ """
+ use Mix.Task
+ @shortdoc "Outputs all availabe integration tests"
+ def run(_) do
+ Path.wildcard(Path.join(Mix.Project.build_path(), "/**/ebin"))
+ |> Enum.filter(&File.dir?/1)
+ |> Enum.map(&Code.append_path/1)
+
+ tests =
+ Couch.Test.Suite.list()
+ |> Enum.sort()
+ |> Couch.Test.Suite.group_by()
+
+ IO.puts(Couch.Test.Suite.pretty_print(tests))
+ end
+end
+
defmodule CouchDBTest.Mixfile do
use Mix.Project
@@ -70,7 +93,7 @@ defmodule CouchDBTest.Mixfile do
{:jwtf, path: Path.expand("src/jwtf", __DIR__)},
{:ibrowse,
path: Path.expand("src/ibrowse", __DIR__), override: true, compile: false},
- {:credo, "~> 1.4.0", only: [:dev, :test, :integration], runtime: false}
+ {:credo, "~> 1.5.4", only: [:dev, :test, :integration], runtime: false}
]
end
@@ -110,16 +133,13 @@ defmodule CouchDBTest.Mixfile do
"b64url",
"bear",
"mochiweb",
- "snappy",
"rebar",
"proper",
"mochiweb",
"meck",
- "khash",
"hyper",
"fauxton",
- "folsom",
- "hqueue"
+ "folsom"
]
deps |> Enum.map(fn app -> "src/#{app}" end)
diff --git a/mix.lock b/mix.lock
index 8b6489f0c..505d7f22e 100644
--- a/mix.lock
+++ b/mix.lock
@@ -1,13 +1,14 @@
%{
"bunt": {:hex, :bunt, "0.2.0", "951c6e801e8b1d2cbe58ebbd3e616a869061ddadcc4863d0a2182541acae9a38", [:mix], [], "hexpm", "7af5c7e09fe1d40f76c8e4f9dd2be7cebd83909f31fee7cd0e9eadc567da8353"},
"certifi": {:hex, :certifi, "2.5.1", "867ce347f7c7d78563450a18a6a28a8090331e77fa02380b4a21962a65d36ee5", [:rebar3], [{:parse_trans, "~>3.3", [hex: :parse_trans, repo: "hexpm", optional: false]}], "hexpm", "805abd97539caf89ec6d4732c91e62ba9da0cda51ac462380bbd28ee697a8c42"},
- "credo": {:hex, :credo, "1.4.0", "92339d4cbadd1e88b5ee43d427b639b68a11071b6f73854e33638e30a0ea11f5", [:mix], [{:bunt, "~> 0.2.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "1fd3b70dce216574ce3c18bdf510b57e7c4c85c2ec9cad4bff854abaf7e58658"},
+ "credo": {:hex, :credo, "1.5.4", "9914180105b438e378e94a844ec3a5088ae5875626fc945b7c1462b41afc3198", [:mix], [{:bunt, "~> 0.2.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2.8", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "cf51af45eadc0a3f39ba13b56fdac415c91b34f7b7533a13dc13550277141bc4"},
"excoveralls": {:hex, :excoveralls, "0.12.1", "a553c59f6850d0aff3770e4729515762ba7c8e41eedde03208182a8dc9d0ce07", [:mix], [{:hackney, "~> 1.0", [hex: :hackney, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "5c1f717066a299b1b732249e736c5da96bb4120d1e55dc2e6f442d251e18a812"},
+ "file_system": {:hex, :file_system, "0.2.10", "fb082005a9cd1711c05b5248710f8826b02d7d1784e7c3451f9c1231d4fc162d", [:mix], [], "hexpm", "41195edbfb562a593726eda3b3e8b103a309b733ad25f3d642ba49696bf715dc"},
"hackney": {:hex, :hackney, "1.15.2", "07e33c794f8f8964ee86cebec1a8ed88db5070e52e904b8f12209773c1036085", [:rebar3], [{:certifi, "2.5.1", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "6.0.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "1.0.1", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~>1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "1.1.5", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "e0100f8ef7d1124222c11ad362c857d3df7cb5f4204054f9f0f4a728666591fc"},
"httpotion": {:hex, :httpotion, "3.1.3", "fdaf1e16b9318dcb722de57e75ac368c93d4c6e3c9125f93e960f953a750fb77", [:mix], [{:ibrowse, "== 4.4.0", [hex: :ibrowse, repo: "hexpm", optional: false]}], "hexpm", "e420172ef697a0f1f4dc40f89a319d5a3aad90ec51fa424f08c115f04192ae43"},
"ibrowse": {:hex, :ibrowse, "4.4.0", "2d923325efe0d2cb09b9c6a047b2835a5eda69d8a47ed6ff8bc03628b764e991", [:rebar3], [], "hexpm"},
"idna": {:hex, :idna, "6.0.0", "689c46cbcdf3524c44d5f3dde8001f364cd7608a99556d8fbd8239a5798d4c10", [:rebar3], [{:unicode_util_compat, "0.4.1", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "4bdd305eb64e18b0273864920695cb18d7a2021f31a11b9c5fbcd9a253f936e2"},
- "jason": {:hex, :jason, "1.2.1", "12b22825e22f468c02eb3e4b9985f3d0cb8dc40b9bd704730efa11abd2708c44", [:mix], [{:decimal, "~> 1.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "b659b8571deedf60f79c5a608e15414085fa141344e2716fbd6988a084b5f993"},
+ "jason": {:hex, :jason, "1.2.2", "ba43e3f2709fd1aa1dce90aaabfd039d000469c05c56f0b8e31978e03fa39052", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "18a228f5f0058ee183f29f9eae0805c6e59d61c3b006760668d8d18ff0d12179"},
"jiffy": {:hex, :jiffy, "0.15.2", "de266c390111fd4ea28b9302f0bc3d7472468f3b8e0aceabfbefa26d08cd73b7", [:rebar3], [], "hexpm"},
"junit_formatter": {:hex, :junit_formatter, "3.0.0", "13950d944dbd295da7d8cc4798b8faee808a8bb9b637c88069954eac078ac9da", [:mix], [], "hexpm", "d77b7b9a1601185b18dfe7682b27c46d5d12721f12fdc75180a6fc573b4e64b1"},
"metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"},
diff --git a/rebar.config.script b/rebar.config.script
index 54f0572eb..b44dba0ee 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -118,34 +118,21 @@ SubDirs = [
"src/chttpd",
"src/couch",
"src/couch_eval",
- "src/couch_event",
- "src/mem3",
- "src/couch_index",
- "src/couch_mrview",
"src/couch_js",
+ "src/couch_lib",
"src/couch_replicator",
- "src/couch_plugins",
- "src/couch_pse_tests",
"src/couch_stats",
- "src/couch_peruser",
"src/couch_tests",
"src/couch_views",
"src/ctrace",
- "src/ddoc_cache",
- "src/dreyfus",
"src/fabric",
"src/aegis",
"src/couch_jobs",
"src/couch_expiring_cache",
- "src/global_changes",
- "src/ioq",
"src/jwtf",
- "src/ken",
"src/mango",
- "src/rexi",
- "src/setup",
- "src/smoosh",
"src/ebtree",
+ "src/couch_prometheus",
"rel"
].
@@ -153,22 +140,20 @@ DepDescs = [
%% Independent Apps
{config, "config", {tag, "2.1.8"}},
{b64url, "b64url", {tag, "1.0.2"}},
-{erlfdb, "erlfdb", {tag, "v1.2.2"}},
+{erlfdb, "erlfdb", {tag, "v1.3.4"}},
{ets_lru, "ets-lru", {tag, "1.1.0"}},
-{khash, "khash", {tag, "1.1.0"}},
-{snappy, "snappy", {tag, "CouchDB-1.0.4"}},
%% Non-Erlang deps
{docs, {url, "https://github.com/apache/couchdb-documentation"},
{tag, "3.1.0-RC1"}, [raw]},
{fauxton, {url, "https://github.com/apache/couchdb-fauxton"},
- {tag, "v1.2.4"}, [raw]},
+ {tag, "v1.2.6"}, [raw]},
%% Third party deps
{folsom, "folsom", {tag, "CouchDB-0.8.3"}},
{hyper, "hyper", {tag, "CouchDB-2.2.0-6"}},
{ibrowse, "ibrowse", {tag, "CouchDB-4.4.1-1"}},
-{jaeger_passage, "jaeger-passage", {tag, "CouchDB-0.1.14-1"}},
-{jiffy, "jiffy", {tag, "CouchDB-1.0.4-1"}},
+{jaeger_passage, "jaeger-passage", {tag, "CouchDB-0.1.14-2"}},
+{jiffy, "jiffy", {tag, "CouchDB-1.0.5-1"}},
{local, "local", {tag, "0.2.1"}},
{mochiweb, "mochiweb", {tag, "v2.20.0"}},
{meck, "meck", {tag, "0.8.8"}},
@@ -207,7 +192,7 @@ ErlOpts = case os:getenv("ERL_OPTS") of
end.
AddConfig = [
- {require_otp_vsn, "19|20|21|22"},
+ {require_otp_vsn, "21|22|23"},
{deps_dir, "src"},
{deps, lists:map(MakeDep, DepDescs ++ OptionalDeps)},
{sub_dirs, SubDirs},
diff --git a/rel/apps/couch_epi.config b/rel/apps/couch_epi.config
index f9f49e1c3..db85ef1e2 100644
--- a/rel/apps/couch_epi.config
+++ b/rel/apps/couch_epi.config
@@ -14,12 +14,7 @@
couch_db_epi,
fabric2_epi,
chttpd_epi,
- couch_index_epi,
couch_views_epi,
couch_replicator_epi,
- dreyfus_epi,
- global_changes_epi,
- mango_epi,
- mem3_epi,
- setup_epi
+ mango_epi
]}.
diff --git a/rel/files/eunit.config b/rel/files/eunit.config
index 5e96fae9e..4e49c6d17 100644
--- a/rel/files/eunit.config
+++ b/rel/files/eunit.config
@@ -13,5 +13,18 @@
[
{kernel, [{error_logger, silent}]},
{sasl, [{sasl_error_logger, false}]},
+
+ % When fabric is configured with eunit_run=true it will ask erlfdb for a
+ % test database. The default behavior for erlfdb in this case is start a
+ % new fdbserver process for the test. If you would rather have erlfdb
+ % connect to an existing FoundationDB cluster, you can supply the path
+ % to the cluster file as a binary string here.
+ %
+ % NOTE: the unit tests will erase all the data in the cluster!
+ %
+ % The docker-compose configuration in the .devcontainer activates this
+ % application setting using ERL_ZFLAGS in the container environment, so
+ % any tests will use the fdbserver running in the fdb container.
+ % {erlfdb, [{test_cluster_file, <<"/usr/local/etc/foundationdb/fdb.cluster">>}]},
{fabric, [{eunit_run, true}]}
].
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index 8970be572..3d15eb48a 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -4,33 +4,17 @@ name = {{package_author_name}}
[couchdb]
uuid = {{uuid}}
-database_dir = {{data_dir}}
-view_index_dir = {{view_index_dir}}
; util_driver_dir =
; plugin_dir =
os_process_timeout = 5000 ; 5 seconds. for view servers.
max_dbs_open = 500
-; Method used to compress everything that is appended to database and view index files, except
-; for attachments (see the attachments section). Available methods are:
-;
-; none - no compression
-; snappy - use google snappy, a very fast compressor/decompressor
-; deflate_N - use zlib's deflate, N is the compression level which ranges from 1 (fastest,
-; lowest compression ratio) to 9 (slowest, highest compression ratio)
-file_compression = snappy
-; Higher values may give better read performance due to less read operations
-; and/or more OS page cache hits, but they can also increase overall response
-; time for writes when there are many attachment write requests in parallel.
-attachment_stream_buffer_size = 4096
; Default security object for databases if not explicitly set
; everyone - same as couchdb 1.0, everyone can read/write
; admin_only - only admins can read/write
; admin_local - sharded dbs on :5984 are read/write for everyone,
; local dbs on :5986 are read/write for admins only
default_security = admin_only
-; btree_chunk_size = 1279
; maintenance_mode = false
-; stem_interactive_updates = true
; uri_file =
; The speed of processing the _changes feed with doc_ids filter can be
; influenced directly with this setting - increase for faster processing at the
@@ -64,68 +48,15 @@ max_document_size = 8000000 ; bytes
; Maximum attachment size.
; max_attachment_size = infinity
;
-; Do not update the least recently used DB cache on reads, only writes
-;update_lru_on_read = false
-;
-; The default storage engine to use when creating databases
-; is set as a key into the [couchdb_engines] section.
-default_engine = couch
-;
; Enable this to only "soft-delete" databases when DELETE /{db} requests are
; made. This will place a .recovery directory in your data directory and
; move deleted databases/shards there instead. You can then manually delete
; these files later, as desired.
;enable_database_recovery = false
;
-; Set the maximum size allowed for a partition. This helps users avoid
-; inadvertently abusing partitions resulting in hot shards. The default
-; is 10GiB. A value of 0 or less will disable partition size checks.
-;max_partition_size = 10737418240
-;
-; When true, system databases _users and _replicator are created immediately
-; on startup if not present.
-;single_node = false
-
; Allow edits on the _security object in the user db. By default, it's disabled.
users_db_security_editable = false
-[purge]
-; Allowed maximum number of documents in one purge request
-;max_document_id_number = 100
-;
-; Allowed maximum number of accumulated revisions in one purge request
-;max_revisions_number = 1000
-;
-; Allowed durations when index is not updated for local purge checkpoint
-; document. Default is 24 hours.
-;index_lag_warn_seconds = 86400
-
-[couchdb_engines]
-; The keys in this section are the filename extension that
-; the specified engine module will use. This is important so
-; that couch_server is able to find an existing database without
-; having to ask every configured engine.
-couch = couch_bt_engine
-
-[process_priority]
-; Selectively disable altering process priorities for modules that request it.
-; * NOTE: couch_server priority has been shown to lead to CouchDB hangs and
-; failures on Erlang releases 21.0 - 21.3.8.12 and 22.0 -> 22.2.4. Do not
-; enable when running with those versions.
-;couch_server = false
-
-[cluster]
-q=2
-n=3
-; placement = metro-dc-a:2,metro-dc-b:1
-
-; Supply a comma-delimited list of node names that this node should
-; contact in order to join a cluster. If a seedlist is configured the ``_up``
-; endpoint will return a 404 until the node has successfully contacted at
-; least one of the members of the seedlist and replicated an up-to-date copy
-; of the ``_nodes``, ``_dbs``, and ``_users`` system databases.
-; seedlist = couchdb@node1.example.com,couchdb@node2.example.com
-
[chttpd]
; These settings affect the main, clustered port (5984 by default).
port = {{cluster_port}}
@@ -178,23 +109,6 @@ max_db_number_for_dbs_info_req = 100
; rsa:foo = -----BEGIN PUBLIC KEY-----\nMIIBIjAN...IDAQAB\n-----END PUBLIC KEY-----\n
; ec:bar = -----BEGIN PUBLIC KEY-----\nMHYwEAYHK...AzztRs\n-----END PUBLIC KEY-----\n
-[couch_peruser]
-; If enabled, couch_peruser ensures that a private per-user database
-; exists for each document in _users. These databases are writable only
-; by the corresponding user. Databases are in the following form:
-; userdb-{hex encoded username}
-enable = false
-; If set to true and a user is deleted, the respective database gets
-; deleted as well.
-delete_dbs = false
-; Set a default q value for peruser-created databases that is different from
-; cluster / q
-;q = 1
-; prefix for user databases. If you change this after user dbs have been
-; created, the existing databases won't get deleted if the associated user
-; gets deleted because of the then prefix mismatch.
-database_prefix = userdb-
-
[httpd]
port = {{backend_port}}
bind_address = 127.0.0.1
@@ -220,13 +134,6 @@ enable_xframe_options = false
; Maximum allowed http request size. Applies to both clustered and local port.
max_http_request_size = 4294967296 ; 4GB
-; [httpd_design_handlers]
-; _view =
-
-; [ioq]
-; concurrency = 10
-; ratio = 0.01
-
[ssl]
port = 6984
@@ -238,23 +145,7 @@ port = 6984
; max_objects =
; max_size = 104857600
-; [mem3]
-; nodes_db = _nodes
-; shard_cache_size = 25000
-; shards_db = _dbs
-; sync_concurrency = 10
-
; [fabric]
-; all_docs_concurrency = 10
-; changes_duration =
-; shard_timeout_factor = 2
-; uuid_prefix_len = 7
-; request_timeout = 60000
-; all_docs_timeout = 10000
-; attachments_timeout = 60000
-; view_timeout = 3600000
-; partition_view_timeout = 3600000
-;
; Custom FDB directory prefix. All the nodes of the same CouchDB instance
; should have a matching directory prefix in order to read and write the same
; data. Changes to this value take effect only on node start-up.
@@ -279,26 +170,6 @@ port = 6984
; Bulk docs transaction batch size in bytes
;update_docs_batch_size = 2500000
-; [rexi]
-; buffer_count = 2000
-; server_per_node = true
-; stream_limit = 5
-;
-; Use a single message to kill a group of remote workers This is
-; mostly is an upgrade clause to allow operating in a mixed cluster of
-; 2.x and 3.x nodes. After upgrading switch to true to save some
-; network bandwidth
-;use_kill_all = false
-
-; [global_changes]
-; max_event_delay = 25
-; max_write_delay = 500
-; update_db = true
-
-; [view_updater]
-; min_writer_items = 100
-; min_writer_size = 16777216
-
[couch_httpd_auth]
; WARNING! This only affects the node-local port (5986 by default).
; You probably want the settings under [chttpd].
@@ -312,6 +183,10 @@ iterations = 10 ; iterations for password hashing
; min_iterations = 1
; max_iterations = 1000000000
; password_scheme = pbkdf2
+; List of Erlang RegExp or tuples of RegExp and an optional error message.
+; Where a new password must match all RegExp.
+; Example: [{".{10,}", "Password min length is 10 characters."}, "\\d+"]
+; password_regexp = []
; proxy_use_secret = false
; comma-separated list of public fields, 404 if empty
; public_fields =
@@ -323,6 +198,9 @@ iterations = 10 ; iterations for password hashing
; Settings for view indexing
[couch_views]
+; Enable custom reduce functions
+;custom_reduce_enabled = true
+
; Maximum acceptors waiting to accept view indexing jobs
;max_acceptors = 5
;
@@ -340,14 +218,43 @@ iterations = 10 ; iterations for password hashing
;
; The maximum size of B+Tree nodes used by view btrees
;view_btree_node_size = 100
-;
+
; Batch size sensing parameters
-; batch_initial_size = 100 ; Initial batch size in number of documents
-; batch_search_increment = 500 ; Size change when searching for the threshold
-; batch_sense_increment = 100 ; Size change increment after hitting a threshold
-; batch_max_tx_size_bytes = 9000000 ; Maximum transaction size in bytes
-; batch_max_tx_time_msec = 4500 ; Maximum transaction time in milliseconds
-; batch_thresold_penalty = 0.2 ; Amount to reduce batch size when crossing a threshold
+;
+; Initial batch size in number of documents
+;batch_initial_size = 100
+;
+; Size change when searching for the threshold
+;batch_search_increment = 500
+;
+; Size change increment after hitting a threshold
+;batch_sense_increment = 100
+;
+; Maximum transaction size in bytes
+;batch_max_tx_size_bytes = 9000000
+;
+; Maximum transaction time used by the view indexer in
+; milliseconds. This value should be set such that the indexing
+; transaction GRVs would live long enough for clients waiting on the
+; indexer to have be able re-use the same GRVs to emit include_docs
+; bodies.
+;batch_max_tx_time_msec = 1500
+;
+; Amount to reduce batch size when crossing a threshold;
+;batch_thresold_penalty = 0.2
+
+; Set the FDB transaction retry limit applied to the main indexing transaction
+; for retriable errors. These errors include 1007 (transaction_too_old), 1020
+; (commit conflict) and others. This value overrides the retry_limit setting
+; from fdb transaction settings in the [fdb_tx_options] section. Setting this
+; value may interfere with the batching algorithm as it's designed to adjust
+; the batch size in response to erlfdb errors. A too large a value, would
+; re-run the same transaction too many times in a row wasting resources.
+; However, the value shouldn't be too small as in some cases the errors are
+; recoverable such as when the FDB server fast-forwards time during the
+; transaction system recovery.
+;
+;indexer_tx_retry_limit = 5
; CSP (Content Security Policy) Support for _utils
[csp]
@@ -408,7 +315,6 @@ os_process_limit = 100
; os_process_soft_limit = 100
; Timeout for how long a response from a busy view group server can take.
; "infinity" is also a valid configuration value.
-;group_info_timeout = 5000
;query_limit = 268435456
;partition_query_limit = 268435456
@@ -430,15 +336,12 @@ query = mango_eval
; the warning.
;index_scan_warning_threshold = 10
-[indexers]
-couch_mrview = true
-
[feature_flags]
; This enables any database to be created as a partitioned databases (except system db's).
; Setting this to false will stop the creation of paritioned databases.
; paritioned||allowed* = true will scope the creation of partitioned databases
; to databases with 'allowed' prefix.
-partitioned||* = true
+; partitioned||* = true
[uuids]
; Known algorithms:
@@ -609,6 +512,11 @@ level = info
;
; max_message_size = 16000
;
+; Do not log last message received by terminated process
+; strip_last_msg = true
+;
+; List of fields to remove before logging the crash report
+; filter_fields = [pid, registered_name, error_info, messages]
;
; There are four different log writers that can be configured
; to write log messages. The default writes to stderr of the
@@ -662,86 +570,6 @@ writer = stderr
; Stats collection interval in seconds. Default 10 seconds.
;interval = 10
-[smoosh.ratio_dbs]
-min_priority = 2.0
-
-[smoosh.ratio_views]
-min_priority = 2.0
-
-[ioq]
-; The maximum number of concurrent in-flight IO requests that
-concurrency = 10
-
-; The fraction of the time that a background IO request will be selected
-; over an interactive IO request when both queues are non-empty
-ratio = 0.01
-
-[ioq.bypass]
-; System administrators can choose to submit specific classes of IO directly
-; to the underlying file descriptor or OS process, bypassing the queues
-; altogether. Installing a bypass can yield higher throughput and lower
-; latency, but relinquishes some control over prioritization. The following
-; classes are recognized with the following defaults:
-
-; Messages on their way to an external process (e.g., couchjs) are bypassed
-os_process = true
-
-; Disk IO fulfilling interactive read requests is bypassed
-read = true
-
-; Disk IO required to update a database is bypassed
-write = true
-
-; Disk IO required to update views and other secondary indexes is bypassed
-view_update = true
-
-; Disk IO issued by the background replication processes that fix any
-; inconsistencies between shard copies is queued
-shard_sync = false
-
-; Disk IO issued by compaction jobs is queued
-compaction = false
-
-[dreyfus]
-; The name and location of the Clouseau Java service required to
-; enable Search functionality.
-; name = clouseau@127.0.0.1
-
-; CouchDB will try to re-connect to Clouseau using a bounded
-; exponential backoff with the following number of iterations.
-; retry_limit = 5
-
-; The default number of results returned from a global search query.
-; limit = 25
-
-; The default number of results returned from a search on a partition
-; of a database.
-; limit_partitions = 2000
-
-; The maximum number of results that can be returned from a global
-; search query (or any search query on a database without user-defined
-; partitions). Attempts to set ?limit=N higher than this value will
-; be rejected.
-; max_limit = 200
-
-; The maximum number of results that can be returned when searching
-; a partition of a database. Attempts to set ?limit=N higher than this
-; value will be rejected. If this config setting is not defined,
-; CouchDB will use the value of `max_limit` instead. If neither is
-; defined, the default is 2000 as stated here.
-; max_limit_partitions = 2000
-
-[reshard]
-;max_jobs = 48
-;max_history = 20
-;max_retries = 1
-;retry_interval_sec = 10
-;delete_source = true
-;update_shard_map_timeout_sec = 60
-;source_close_timeout_sec = 600
-;require_node_param = false
-;require_range_param = false
-
[couch_jobs]
;
; Maximum jitter used when checking for active job timeouts
@@ -868,3 +696,8 @@ compaction = false
; The interval in seconds of how often the expiration check runs.
;cache_expiration_check_sec = 10
+
+[prometheus]
+additional_port = false
+bind_address = 127.0.0.1
+port = {{prometheus_port}}
diff --git a/rel/overlay/etc/local.ini b/rel/overlay/etc/local.ini
index 2c9e89955..b788e822f 100644
--- a/rel/overlay/etc/local.ini
+++ b/rel/overlay/etc/local.ini
@@ -8,19 +8,6 @@
;max_document_size = 4294967296 ; bytes
;os_process_timeout = 5000
-[couch_peruser]
-; If enabled, couch_peruser ensures that a private per-user database
-; exists for each document in _users. These databases are writable only
-; by the corresponding user. Databases are in the following form:
-; userdb-{hex encoded username}
-;enable = true
-; If set to true and a user is deleted, the respective database gets
-; deleted as well.
-;delete_dbs = true
-; Set a default q value for peruser-created databases that is different from
-; cluster / q
-;q = 1
-
[chttpd]
;port = 5984
;bind_address = 127.0.0.1
diff --git a/rel/reltool.config b/rel/reltool.config
index 0cc8dbb13..a1cc938c1 100644
--- a/rel/reltool.config
+++ b/rel/reltool.config
@@ -30,42 +30,34 @@
b64url,
bear,
chttpd,
+ ctrace,
config,
couch,
couch_epi,
couch_jobs,
- couch_index,
+ couch_lib,
couch_log,
- couch_mrview,
- couch_plugins,
couch_replicator,
couch_stats,
couch_eval,
couch_js,
- couch_event,
- couch_peruser,
couch_views,
- ddoc_cache,
- dreyfus,
ebtree,
+ erlfdb,
ets_lru,
fabric,
folsom,
- global_changes,
hyper,
ibrowse,
- ioq,
+ jaeger_passage,
jiffy,
jwtf,
- ken,
- khash,
+ local,
mango,
- mem3,
mochiweb,
- rexi,
- setup,
- smoosh,
- snappy,
+ passage,
+ thrift_protocol,
+ couch_prometheus,
%% extra
recon
]},
@@ -95,42 +87,34 @@
{app, b64url, [{incl_cond, include}]},
{app, bear, [{incl_cond, include}]},
{app, chttpd, [{incl_cond, include}]},
+ {app, ctrace, [{incl_cond, include}]},
{app, config, [{incl_cond, include}]},
{app, couch, [{incl_cond, include}]},
{app, couch_epi, [{incl_cond, include}]},
{app, couch_eval, [{incl_cond, include}]},
{app, couch_js, [{incl_cond, include}]},
{app, couch_jobs, [{incl_cond, include}]},
- {app, couch_index, [{incl_cond, include}]},
+ {app, couch_lib, [{incl_cond, include}]},
{app, couch_log, [{incl_cond, include}]},
- {app, couch_mrview, [{incl_cond, include}]},
- {app, couch_plugins, [{incl_cond, include}]},
{app, couch_replicator, [{incl_cond, include}]},
{app, couch_stats, [{incl_cond, include}]},
- {app, couch_event, [{incl_cond, include}]},
- {app, couch_peruser, [{incl_cond, include}]},
{app, couch_views, [{incl_cond, include}]},
- {app, ddoc_cache, [{incl_cond, include}]},
- {app, dreyfus, [{incl_cond, include}]},
+ {app, erlfdb, [{incl_cond, include}]},
{app, ebtree, [{incl_cond, include}]},
{app, ets_lru, [{incl_cond, include}]},
{app, fabric, [{incl_cond, include}]},
{app, folsom, [{incl_cond, include}]},
- {app, global_changes, [{incl_cond, include}]},
{app, hyper, [{incl_cond, include}]},
{app, ibrowse, [{incl_cond, include}]},
- {app, ioq, [{incl_cond, include}]},
+ {app, jaeger_passage, [{incl_cond, include}]},
{app, jiffy, [{incl_cond, include}]},
{app, jwtf, [{incl_cond, include}]},
- {app, ken, [{incl_cond, include}]},
- {app, khash, [{incl_cond, include}]},
+ {app, local, [{incl_cond, include}]},
{app, mango, [{incl_cond, include}]},
- {app, mem3, [{incl_cond, include}]},
{app, mochiweb, [{incl_cond, include}]},
- {app, rexi, [{incl_cond, include}]},
- {app, setup, [{incl_cond, include}]},
- {app, smoosh, [{incl_cond, include}]},
- {app, snappy, [{incl_cond, include}]},
+ {app, passage, [{incl_cond, include}]},
+ {app, thrift_protocol, [{incl_cond, include}]},
+ {app, couch_prometheus, [{incl_cond, include}]},
%% extra
{app, recon, [{incl_cond, include}]}
diff --git a/setup_eunit.template b/setup_eunit.template
index 97bee466c..3625441bd 100644
--- a/setup_eunit.template
+++ b/setup_eunit.template
@@ -2,6 +2,7 @@
{package_author_name, "The Apache Software Foundation"},
{cluster_port, 5984},
{backend_port, 5986},
+ {prometheus_port, 17986},
{node_name, "-name couchdbtest@127.0.0.1"},
{data_dir, "/tmp"},
diff --git a/src/aegis/src/aegis.erl b/src/aegis/src/aegis.erl
index e8a0b4bfb..d3f7f9676 100644
--- a/src/aegis/src/aegis.erl
+++ b/src/aegis/src/aegis.erl
@@ -20,6 +20,7 @@
-export([
init_db/2,
open_db/1,
+ get_db_info/1,
decrypt/2,
decrypt/3,
@@ -39,6 +40,16 @@ open_db(#{} = Db) ->
}.
+get_db_info(#{is_encrypted := IsEncrypted} = Db) ->
+ KeyManagerInfo = case erlang:function_exported(?AEGIS_KEY_MANAGER, get_db_info, 1) of
+ true ->
+ ?AEGIS_KEY_MANAGER:get_db_info(Db);
+ false ->
+ []
+ end,
+ [{enabled, IsEncrypted}, {key_manager, {KeyManagerInfo}}].
+
+
encrypt(#{} = _Db, _Key, <<>>) ->
<<>>;
diff --git a/src/aegis/src/aegis_key_manager.erl b/src/aegis/src/aegis_key_manager.erl
index aa9e3429a..4426c4f10 100644
--- a/src/aegis/src/aegis_key_manager.erl
+++ b/src/aegis/src/aegis_key_manager.erl
@@ -20,3 +20,11 @@
-callback open_db(Db :: #{}) -> {ok, binary()} | false.
+
+
+-callback get_db_info(Db :: #{}) -> list().
+
+
+-optional_callbacks([
+ get_db_info/1
+]).
diff --git a/src/aegis/src/aegis_server.erl b/src/aegis/src/aegis_server.erl
index 15fea4c63..92ba7e80d 100644
--- a/src/aegis/src/aegis_server.erl
+++ b/src/aegis/src/aegis_server.erl
@@ -18,6 +18,7 @@
-include("aegis.hrl").
+-include_lib("kernel/include/logger.hrl").
%% aegis_server API
@@ -94,6 +95,7 @@ encrypt(#{} = Db, Key, Value) when is_binary(Key), is_binary(Value) ->
CipherText when is_binary(CipherText) ->
CipherText;
{error, {_Tag, {_C_FileName,_LineNumber}, _Desc} = Reason} ->
+ ?LOG_ERROR(#{what => encrypt_failure, details => Reason}),
couch_log:error("aegis encryption failure: ~p ", [Reason]),
erlang:error(decryption_failed);
{error, Reason} ->
@@ -119,6 +121,7 @@ decrypt(#{} = Db, Key, Value) when is_binary(Key), is_binary(Value) ->
PlainText when is_binary(PlainText) ->
PlainText;
{error, {_Tag, {_C_FileName,_LineNumber}, _Desc} = Reason} ->
+ ?LOG_ERROR(#{what => decrypt_failure, details => Reason}),
couch_log:error("aegis decryption failure: ~p ", [Reason]),
erlang:error(decryption_failed);
{error, Reason} ->
diff --git a/src/aegis/test/aegis_server_test.erl b/src/aegis/test/aegis_server_test.erl
index 0f96798b7..ff16475b4 100644
--- a/src/aegis/test/aegis_server_test.erl
+++ b/src/aegis/test/aegis_server_test.erl
@@ -35,6 +35,8 @@ basic_test_() ->
{timeout, ?TIMEOUT, fun test_init_db/0}},
{"open_db returns true when encryption enabled",
{timeout, ?TIMEOUT, fun test_open_db/0}},
+ {"get_db_info returns encryption enabled true",
+ {timeout, ?TIMEOUT, fun test_get_db_info/0}},
{"init_db caches key",
{timeout, ?TIMEOUT, fun test_init_db_cache/0}},
{"open_db caches key",
@@ -70,6 +72,11 @@ test_open_db() ->
?assertEqual(1, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)).
+test_get_db_info() ->
+ ?assertEqual([{enabled,true},{key_manager,{[]}}],
+ aegis:get_db_info(#{is_encrypted => true})).
+
+
test_init_db_cache() ->
?assertEqual(0, meck:num_calls(?AEGIS_KEY_MANAGER, init_db, 2)),
diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl
index 1a9b19bb1..53fdf989a 100644
--- a/src/chttpd/src/chttpd.erl
+++ b/src/chttpd/src/chttpd.erl
@@ -16,6 +16,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("chttpd/include/chttpd.hrl").
+-include_lib("kernel/include/logger.hrl").
-export([start_link/0, start_link/1, start_link/2,
stop/0, handle_request/1, handle_request_int/1,
@@ -123,6 +124,12 @@ start_link(Name, Options) ->
end,
ok = couch_httpd:validate_bind_address(IP),
+ % Ensure uuid is set so that concurrent replications
+ % get the same value. This used to in the backend (:5986) httpd
+ % start_link and was moved here for now. Ideally this should be set
+ % in FDB or coordinated across all the nodes
+ couch_server:get_uuid(),
+
set_auth_handlers(),
Options1 = Options ++ [
@@ -153,7 +160,6 @@ stop() ->
mochiweb_http:stop(?MODULE).
handle_request(MochiReq0) ->
- erlang:put(?REWRITE_COUNT, 0),
MochiReq = couch_httpd_vhost:dispatch_host(MochiReq0),
handle_request_int(MochiReq).
@@ -201,7 +207,13 @@ handle_request_int(MochiReq) ->
true ->
couch_log:notice("MethodOverride: ~s (real method was ~s)", [MethodOverride, Method1]),
case Method1 of
- 'POST' -> couch_util:to_existing_atom(MethodOverride);
+ 'POST' ->
+ ?LOG_NOTICE(#{
+ what => http_method_override,
+ result => ok,
+ new_method => MethodOverride
+ }),
+ couch_util:to_existing_atom(MethodOverride);
_ ->
% Ignore X-HTTP-Method-Override when the original verb isn't POST.
% I'd like to send a 406 error to the client, but that'd require a nasty refactor.
@@ -218,6 +230,7 @@ handle_request_int(MochiReq) ->
end,
Nonce = couch_util:to_hex(crypto:strong_rand_bytes(5)),
+ logger:set_process_metadata(#{request_id => Nonce}),
HttpReq0 = #httpd{
mochi_req = MochiReq,
@@ -263,6 +276,10 @@ handle_request_int(MochiReq) ->
span_ok(HttpResp),
{ok, Resp};
#httpd_resp{status = aborted, reason = Reason} ->
+ ?LOG_ERROR(#{
+ what => abnormal_response_termation,
+ details => Reason
+ }),
couch_log:error("Response abnormally terminated: ~p", [Reason]),
exit(normal)
end.
@@ -273,16 +290,15 @@ before_request(HttpReq) ->
{ok, HttpReq1} = chttpd_plugin:before_request(HttpReq),
chttpd_stats:init(HttpReq1),
{ok, HttpReq1}
- catch Tag:Error ->
- {error, catch_error(HttpReq, Tag, Error)}
+ catch Tag:Error:Stack ->
+ {error, catch_error(HttpReq, Tag, Error, Stack)}
end.
after_request(HttpReq, HttpResp0) ->
{ok, HttpResp1} =
try
chttpd_plugin:after_request(HttpReq, HttpResp0)
- catch _Tag:Error ->
- Stack = erlang:get_stacktrace(),
+ catch _Tag:Error:Stack ->
send_error(HttpReq, {Error, nil, Stack}),
{ok, HttpResp0#httpd_resp{status = aborted}}
end,
@@ -315,8 +331,8 @@ process_request(#httpd{mochi_req = MochiReq} = HttpReq) ->
Response ->
{HttpReq, Response}
end
- catch Tag:Error ->
- {HttpReq, catch_error(HttpReq, Tag, Error)}
+ catch Tag:Error:Stack ->
+ {HttpReq, catch_error(HttpReq, Tag, Error, Stack)}
end.
handle_req_after_auth(HandlerKey, HttpReq) ->
@@ -328,42 +344,50 @@ handle_req_after_auth(HandlerKey, HttpReq) ->
AuthorizedReq = chttpd_auth:authorize(possibly_hack(HttpReq),
fun chttpd_auth_request:authorize_request/1),
{AuthorizedReq, HandlerFun(AuthorizedReq)}
- catch Tag:Error ->
- {HttpReq, catch_error(HttpReq, Tag, Error)}
+ catch Tag:Error:Stack ->
+ {HttpReq, catch_error(HttpReq, Tag, Error, Stack)}
end.
-catch_error(_HttpReq, throw, {http_head_abort, Resp}) ->
+catch_error(_HttpReq, throw, {http_head_abort, Resp}, _Stack) ->
{ok, Resp};
-catch_error(_HttpReq, throw, {http_abort, Resp, Reason}) ->
+catch_error(_HttpReq, throw, {http_abort, Resp, Reason}, _Stack) ->
{aborted, Resp, Reason};
-catch_error(HttpReq, throw, {invalid_json, _}) ->
+catch_error(HttpReq, throw, {invalid_json, _}, _Stack) ->
send_error(HttpReq, {bad_request, "invalid UTF-8 JSON"});
-catch_error(HttpReq, exit, {mochiweb_recv_error, E}) ->
+catch_error(HttpReq, exit, {mochiweb_recv_error, E}, _Stack) ->
#httpd{
mochi_req = MochiReq,
peer = Peer,
original_method = Method
} = HttpReq,
+ ?LOG_NOTICE(#{
+ what => mochiweb_recv_error,
+ peer => Peer,
+ method => Method,
+ path => MochiReq:get(raw_path),
+ details => E
+ }),
couch_log:notice("mochiweb_recv_error for ~s - ~p ~s - ~p", [
Peer,
Method,
MochiReq:get(raw_path),
E]),
exit(normal);
-catch_error(HttpReq, exit, {uri_too_long, _}) ->
+catch_error(HttpReq, exit, {uri_too_long, _}, _Stack) ->
send_error(HttpReq, request_uri_too_long);
-catch_error(HttpReq, exit, {body_too_large, _}) ->
+catch_error(HttpReq, exit, {body_too_large, _}, _Stack) ->
send_error(HttpReq, request_entity_too_large);
-catch_error(HttpReq, throw, Error) ->
+catch_error(HttpReq, throw, Error, _Stack) ->
send_error(HttpReq, Error);
-catch_error(HttpReq, error, database_does_not_exist) ->
+catch_error(HttpReq, error, database_does_not_exist, _Stack) ->
send_error(HttpReq, database_does_not_exist);
-catch_error(HttpReq, error, decryption_failed) ->
+catch_error(HttpReq, error, decryption_failed, _Stack) ->
send_error(HttpReq, decryption_failed);
-catch_error(HttpReq, error, not_ciphertext) ->
+catch_error(HttpReq, error, not_ciphertext, _Stack) ->
send_error(HttpReq, not_ciphertext);
-catch_error(HttpReq, Tag, Error) ->
- Stack = erlang:get_stacktrace(),
+catch_error(HttpReq, error, {erlfdb_error, _} = Error, _Stack) ->
+ send_error(HttpReq, Error);
+catch_error(HttpReq, Tag, Error, Stack) ->
% TODO improve logging and metrics collection for client disconnects
case {Tag, Error, Stack} of
{exit, normal, [{mochiweb_request, send, _, _} | _]} ->
@@ -408,9 +432,23 @@ maybe_log(#httpd{} = HttpReq, #httpd_resp{should_log = true} = HttpResp) ->
User = get_user(HttpReq),
Host = MochiReq:get_header_value("Host"),
RawUri = MochiReq:get(raw_path),
- RequestTime = timer:now_diff(EndTime, BeginTime) / 1000,
+ RequestTime = round(timer:now_diff(EndTime, BeginTime) / 1000),
+ % Wish List
+ % - client port
+ % - timers: connection, request, time to first byte, ...
+ % - response size
+ %
+ ?LOG_NOTICE(#{
+ method => Method,
+ path => RawUri,
+ code => Code,
+ user => User,
+ % req_size => MochiReq:get(body_length),
+ src => #{ip4 => Peer},
+ duration => RequestTime
+ }, #{domain => [chttpd_access_log]}),
couch_log:notice("~s ~s ~s ~s ~s ~B ~p ~B", [Host, Peer, User,
- Method, RawUri, Code, Status, round(RequestTime)]);
+ Method, RawUri, Code, Status, RequestTime]);
maybe_log(_HttpReq, #httpd_resp{should_log = false}) ->
ok.
@@ -923,6 +961,10 @@ buffer_response(Req) ->
end.
+error_info({erlfdb_error, ErrorCode}) ->
+ ErrorDesc = erlfdb:get_error_string(ErrorCode),
+ Reason = ?l2b(io_lib:format("code: ~B, desc: ~s", [ErrorCode, ErrorDesc])),
+ {500, erlfdb_error, Reason};
error_info({Error, Reason}) when is_list(Reason) ->
error_info({Error, couch_util:to_binary(Reason)});
error_info(bad_request) ->
@@ -1017,6 +1059,8 @@ error_info(all_workers_died) ->
"request due to overloading or maintenance mode.">>};
error_info(not_implemented) ->
{501, <<"not_implemented">>, <<"this feature is not yet implemented">>};
+error_info({disabled, Reason}) ->
+ {501, <<"disabled">>, Reason};
error_info(timeout) ->
{500, <<"timeout">>, <<"The request could not be processed in a reasonable"
" amount of time.">>};
@@ -1026,6 +1070,9 @@ error_info(not_ciphertext) ->
{500, <<"not_ciphertext">>, <<"Not Ciphertext">>};
error_info({service_unavailable, Reason}) ->
{503, <<"service unavailable">>, Reason};
+error_info({unknown_eval_api_language, Language}) ->
+ {400, <<"unknown_eval_api_language">>, <<"unsupported language in design"
+ " doc: `", Language/binary, "`">>};
error_info({timeout, _Reason}) ->
error_info(timeout);
error_info({Error, null}) ->
@@ -1235,6 +1282,13 @@ maybe_decompress(Httpd, Body) ->
log_error_with_stack_trace({bad_request, _, _}) ->
ok;
log_error_with_stack_trace({Error, Reason, Stack}) ->
+ ?LOG_ERROR(#{
+ what => request_failure,
+ error => Error,
+ reason => Reason,
+ hash => stack_hash(Stack),
+ stacktrace => Stack
+ }),
EFmt = if is_binary(Error) -> "~s"; true -> "~w" end,
RFmt = if is_binary(Reason) -> "~s"; true -> "~w" end,
Fmt = "req_err(~w) " ++ EFmt ++ " : " ++ RFmt ++ "~n ~p",
@@ -1265,8 +1319,9 @@ basic_headers(Req, Headers0) ->
++ server_header()
++ couch_httpd_auth:cookie_auth_header(Req, Headers0),
Headers1 = chttpd_cors:headers(Req, Headers),
- Headers2 = chttpd_xframe_options:header(Req, Headers1),
- chttpd_prefer_header:maybe_return_minimal(Req, Headers2).
+ Headers2 = chttpd_xframe_options:header(Req, Headers1),
+ Headers3 = [reqid(), timing() | Headers2],
+ chttpd_prefer_header:maybe_return_minimal(Req, Headers3).
handle_response(Req0, Code0, Headers0, Args0, Type) ->
{ok, {Req1, Code1, Headers1, Args1}} =
@@ -1384,8 +1439,13 @@ get_action(#httpd{} = Req) ->
try
chttpd_handlers:handler_info(Req)
catch Tag:Error ->
+ ?LOG_ERROR(#{
+ what => tracing_configuration_failure,
+ tag => Tag,
+ details => Error
+ }),
couch_log:error("Cannot set tracing action ~p:~p", [Tag, Error]),
- {undefind, #{}}
+ {undefined, #{}}
end.
span_ok(#httpd_resp{code = Code}) ->
diff --git a/src/chttpd/src/chttpd_auth_cache.erl b/src/chttpd/src/chttpd_auth_cache.erl
index c5a56bddb..88ffb7ade 100644
--- a/src/chttpd/src/chttpd_auth_cache.erl
+++ b/src/chttpd/src/chttpd_auth_cache.erl
@@ -22,6 +22,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_js_functions.hrl").
+-include_lib("kernel/include/logger.hrl").
-define(CACHE, chttpd_auth_cache_lru).
-define(RELISTEN_DELAY, 5000).
@@ -74,6 +75,7 @@ get_from_cache(UserName) ->
try ets_lru:lookup_d(?CACHE, UserName) of
{ok, Props} ->
couch_stats:increment_counter([couchdb, auth_cache_hits]),
+ ?LOG_DEBUG(#{what => cache_hit, user => UserName}),
couch_log:debug("cache hit for ~s", [UserName]),
Props;
_ ->
@@ -96,6 +98,7 @@ maybe_increment_auth_cache_miss(UserName) ->
case lists:keymember(?b2l(UserName), 1, Admins) of
false ->
couch_stats:increment_counter([couchdb, auth_cache_misses]),
+ ?LOG_DEBUG(#{what => cache_miss, user => UserName}),
couch_log:debug("cache miss for ~s", [UserName]);
_True ->
ok
@@ -137,9 +140,18 @@ handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid=Pid} = State) ->
{seq, EndSeq} ->
EndSeq;
{database_does_not_exist, _} ->
+ ?LOG_NOTICE(#{
+ what => changes_listener_died,
+ reason => database_does_not_exist,
+ details => "create the _users database to silence this notice"
+ }),
couch_log:notice("~p changes listener died because the _users database does not exist. Create the database to silence this notice.", [?MODULE]),
0;
_ ->
+ ?LOG_NOTICE(#{
+ what => changes_listener_died,
+ reason => Reason
+ }),
couch_log:notice("~p changes listener died ~r", [?MODULE, Reason]),
0
end,
@@ -192,6 +204,7 @@ changes_callback({change, {Change}}, _) ->
ok;
DocId ->
UserName = username(DocId),
+ ?LOG_DEBUG(#{what => invalidate_cache, user => UserName}),
couch_log:debug("Invalidating cached credentials for ~s", [UserName]),
ets_lru:remove(?CACHE, UserName)
end,
@@ -221,6 +234,10 @@ load_user_from_db(UserName) ->
{Props} = couch_doc:to_json_obj(Doc, []),
Props;
_Else ->
+ ?LOG_DEBUG(#{
+ what => missing_user_document,
+ user => UserName
+ }),
couch_log:debug("no record of user ~s", [UserName]),
nil
catch error:database_does_not_exist ->
@@ -267,6 +284,12 @@ ensure_auth_ddoc_exists(Db, DDocId) ->
update_doc_ignoring_conflict(Db, NewDoc)
end;
{error, Reason} ->
+ ?LOG_NOTICE(#{
+ what => ensure_auth_ddoc_exists_failure,
+ db => dbname(),
+ docid => DDocId,
+ details => Reason
+ }),
couch_log:notice("Failed to ensure auth ddoc ~s/~s exists for reason: ~p", [dbname(), DDocId, Reason]),
ok
end,
diff --git a/src/chttpd/src/chttpd_changes.erl b/src/chttpd/src/chttpd_changes.erl
index 45c7d57b9..79ca4d1b8 100644
--- a/src/chttpd/src/chttpd_changes.erl
+++ b/src/chttpd/src/chttpd_changes.erl
@@ -12,7 +12,7 @@
-module(chttpd_changes).
-include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
-export([
handle_db_changes/3,
@@ -466,7 +466,14 @@ send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) ->
send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
- FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
+ FoldFun = fun(FDI, Acc) ->
+ case FDI of
+ {row, Row} ->
+ DocId = proplists:get_value(id, Row),
+ {ok, [fabric2_db:get_full_doc_info(Db, DocId) | Acc]};
+ _ -> {ok, Acc}
+ end
+ end,
Opts = [
include_deleted,
{start_key, <<"_design/">>},
@@ -523,9 +530,7 @@ send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) ->
fwd ->
FinalAcc0 = case element(1, FinalAcc) of
changes_acc -> % we came here via couch_http or internal call
- FinalAcc#changes_acc{seq = fabric2_db:get_update_seq(Db)};
- fabric_changes_acc -> % we came here via chttpd / fabric / rexi
- FinalAcc#fabric_changes_acc{seq = couch_db:get_update_seq(Db)}
+ FinalAcc#changes_acc{seq = fabric2_db:get_update_seq(Db)}
end,
{ok, FinalAcc0};
rev -> {ok, FinalAcc}
diff --git a/src/chttpd/src/chttpd_cors.erl b/src/chttpd/src/chttpd_cors.erl
index a8dd348f8..ba1323387 100644
--- a/src/chttpd/src/chttpd_cors.erl
+++ b/src/chttpd/src/chttpd_cors.erl
@@ -27,6 +27,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("chttpd/include/chttpd_cors.hrl").
+-include_lib("kernel/include/logger.hrl").
%% http://www.w3.org/TR/cors/#resource-preflight-requests
@@ -51,6 +52,12 @@ maybe_handle_preflight_request(#httpd{}=Req, Config) ->
not_preflight ->
not_preflight;
UnknownError ->
+ ?LOG_ERROR(#{
+ what => preflight_request_error,
+ origin => get_origin(Req),
+ accepted_origins => get_accepted_origins(Req, Config),
+ details => UnknownError
+ }),
couch_log:error(
"Unknown response of chttpd_cors:preflight_request(~p): ~p",
[Req, UnknownError]
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index bfd8f9fc2..4a7b631f9 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -15,16 +15,14 @@
-compile(tuple_calls).
-include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
+-include_lib("kernel/include/logger.hrl").
-export([handle_request/1, handle_compact_req/2, handle_design_req/2,
db_req/2, couch_doc_open/4,handle_changes_req/2,
update_doc_result_to_json/1, update_doc_result_to_json/2,
handle_design_info_req/3, handle_view_cleanup_req/2,
- update_doc/4, http_code_from_status/1,
- handle_partition_req/2]).
+ update_doc/4, http_code_from_status/1]).
-import(chttpd,
[send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
@@ -222,9 +220,13 @@ changes_callback(waiting_for_updates, Acc) ->
mochi = Resp1,
chunks_sent = ChunksSent + 1
}};
-changes_callback({timeout, _ResponseType}, Acc) ->
+changes_callback({timeout, ResponseType}, Acc) ->
#cacc{mochi = Resp, chunks_sent = ChunksSent} = Acc,
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, "\n"),
+ Chunk = case ResponseType of
+ "eventsource" -> "event: heartbeat\ndata: \n\n";
+ _ -> "\n"
+ end,
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp, Chunk),
{ok, Acc#cacc{mochi = Resp1, chunks_sent = ChunksSent + 1}};
changes_callback({error, Reason}, #cacc{mochi = #httpd{}} = Acc) ->
#cacc{mochi = Req} = Acc,
@@ -271,80 +273,6 @@ handle_view_cleanup_req(Req, Db) ->
ok = fabric2_index:cleanup(Db),
send_json(Req, 202, {[{ok, true}]}).
-
-handle_partition_req(#httpd{path_parts=[_,_]}=_Req, _Db) ->
- throw({bad_request, invalid_partition_req});
-
-handle_partition_req(#httpd{method='GET', path_parts=[_,_,PartId]}=Req, Db) ->
- couch_partition:validate_partition(PartId),
- case couch_db:is_partitioned(Db) of
- true ->
- {ok, PartitionInfo} = fabric:get_partition_info(Db, PartId),
- send_json(Req, {PartitionInfo});
- false ->
- throw({bad_request, <<"database is not partitioned">>})
- end;
-
-handle_partition_req(#httpd{method='POST',
- path_parts=[_, <<"_partition">>, <<"_", _/binary>>]}, _Db) ->
- Msg = <<"Partition must not start with an underscore">>,
- throw({illegal_partition, Msg});
-
-handle_partition_req(#httpd{path_parts = [_, _, _]}=Req, _Db) ->
- send_method_not_allowed(Req, "GET");
-
-handle_partition_req(#httpd{path_parts=[DbName, _, PartId | Rest]}=Req, Db) ->
- case couch_db:is_partitioned(Db) of
- true ->
- couch_partition:validate_partition(PartId),
- QS = chttpd:qs(Req),
- PartIdStr = ?b2l(PartId),
- QSPartIdStr = couch_util:get_value("partition", QS, PartIdStr),
- if QSPartIdStr == PartIdStr -> ok; true ->
- Msg = <<"Conflicting value for `partition` in query string">>,
- throw({bad_request, Msg})
- end,
- NewQS = lists:ukeysort(1, [{"partition", PartIdStr} | QS]),
- NewReq = Req#httpd{
- path_parts = [DbName | Rest],
- qs = NewQS
- },
- update_partition_stats(Rest),
- case Rest of
- [OP | _] when OP == <<"_all_docs">> orelse ?IS_MANGO(OP) ->
- case chttpd_handlers:db_handler(OP, fun db_req/2) of
- Handler when is_function(Handler, 2) ->
- Handler(NewReq, Db);
- _ ->
- chttpd:send_error(Req, not_found)
- end;
- [<<"_design">>, _Name, <<"_", _/binary>> | _] ->
- handle_design_req(NewReq, Db);
- _ ->
- chttpd:send_error(Req, not_found)
- end;
- false ->
- throw({bad_request, <<"database is not partitioned">>})
- end;
-
-handle_partition_req(Req, _Db) ->
- chttpd:send_error(Req, not_found).
-
-update_partition_stats(PathParts) ->
- case PathParts of
- [<<"_design">> | _] ->
- couch_stats:increment_counter([couchdb, httpd, partition_view_requests]);
- [<<"_all_docs">> | _] ->
- couch_stats:increment_counter([couchdb, httpd, partition_all_docs_requests]);
- [<<"_find">> | _] ->
- couch_stats:increment_counter([couchdb, httpd, partition_find_requests]);
- [<<"_explain">> | _] ->
- couch_stats:increment_counter([couchdb, httpd, partition_explain_requests]);
- _ ->
- ok % ignore path that do not match
- end.
-
-
handle_design_req(#httpd{
path_parts=[_DbName, _Design, Name, <<"_",_/binary>> = Action | _Rest]
}=Req, Db) ->
@@ -412,6 +340,7 @@ db_req(#httpd{method='POST', path_parts=[DbName]}=Req, Db) ->
Doc0 = chttpd:json_body(Req),
Doc1 = couch_doc:from_json_obj_validate(Doc0, fabric2_db:name(Db)),
+ validate_attachment_names(Doc1),
Doc2 = case Doc1#doc.id of
<<"">> ->
Doc1#doc{id=couch_uuids:new(), revs={0, []}};
@@ -432,6 +361,12 @@ db_req(#httpd{method='POST', path_parts=[DbName]}=Req, Db) ->
chttpd_stats:incr_writes(),
ok;
Error ->
+ ?LOG_DEBUG(#{
+ what => async_update_error,
+ db => DbName,
+ docid => DocId,
+ details => Error
+ }),
couch_log:debug("Batch doc error (~s): ~p",[DocId, Error])
end
end),
@@ -630,41 +565,6 @@ db_req(#httpd{method='POST', path_parts=[_, <<"_bulk_get">>],
db_req(#httpd{path_parts=[_, <<"_bulk_get">>]}=Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
- couch_stats:increment_counter([couchdb, httpd, purge_requests]),
- chttpd:validate_ctype(Req, "application/json"),
- {IdsRevs} = chttpd:json_body_obj(Req),
- IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs],
- MaxIds = config:get_integer("purge", "max_document_id_number", 100),
- case length(IdsRevs2) =< MaxIds of
- false -> throw({bad_request, "Exceeded maximum number of documents."});
- true -> ok
- end,
- RevsLen = lists:foldl(fun({_Id, Revs}, Acc) ->
- length(Revs) + Acc
- end, 0, IdsRevs2),
- MaxRevs = config:get_integer("purge", "max_revisions_number", 1000),
- case RevsLen =< MaxRevs of
- false -> throw({bad_request, "Exceeded maximum number of revisions."});
- true -> ok
- end,
- couch_stats:increment_counter([couchdb, document_purges, total], length(IdsRevs2)),
- Results2 = case fabric:purge_docs(Db, IdsRevs2, []) of
- {ok, Results} ->
- chttpd_stats:incr_writes(length(Results)),
- Results;
- {accepted, Results} ->
- chttpd_stats:incr_writes(length(Results)),
- Results
- end,
- {Code, Json} = purge_results_to_json(IdsRevs2, Results2),
- send_json(Req, Code, {[{<<"purge_seq">>, null}, {<<"purged">>, {Json}}]});
-
-db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-
-
db_req(#httpd{method='GET',path_parts=[_,OP]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
case chttpd:qs_json_value(Req, "keys", nil) of
Keys when is_list(Keys) ->
@@ -678,7 +578,7 @@ db_req(#httpd{method='GET',path_parts=[_,OP]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
db_req(#httpd{method='POST',
path_parts=[_, OP, <<"queries">>]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
Props = chttpd:json_body_obj(Req),
- case couch_mrview_util:get_view_queries(Props) of
+ case couch_views_util:get_view_queries(Props) of
undefined ->
throw({bad_request,
<<"POST body must include `queries` parameter.">>});
@@ -773,22 +673,6 @@ db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) ->
send_method_not_allowed(Req, "PUT,GET");
-db_req(#httpd{method='PUT',path_parts=[_,<<"_purged_infos_limit">>]}=Req, Db) ->
- case chttpd:json_body(Req) of
- Limit when is_integer(Limit), Limit > 0 ->
- case fabric:set_purge_infos_limit(Db, Limit, []) of
- ok ->
- send_json(Req, {[{<<"ok">>, true}]});
- Error ->
- throw(Error)
- end;
- _->
- throw({bad_request, "`purge_infos_limit` must be positive integer"})
- end;
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_purged_infos_limit">>]}=Req, Db) ->
- send_json(Req, fabric:get_purge_infos_limit(Db));
-
% Special case to enable using an unencoded slash in the URL of design docs,
% as slashes in document IDs must otherwise be URL encoded.
db_req(#httpd{method='GET', mochi_req=MochiReq, path_parts=[_DbName, <<"_design/", _/binary>> | _]}=Req, _Db) ->
@@ -1011,7 +895,7 @@ send_all_docs_keys(Db, #mrargs{} = Args, VAcc0) ->
doc = DocValue
}
end,
- Row1 = fabric_view:transform_row(Row0),
+ Row1 = couch_views_http:transform_row(Row0),
view_cb(Row1, Acc)
end,
{ok, VAcc2} = fabric2_db:fold_docs(Db, Keys, CB, VAcc1, OpenOpts),
@@ -1231,6 +1115,12 @@ db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) ->
chttpd_stats:incr_writes(),
ok;
Error ->
+ ?LOG_NOTICE(#{
+ what => async_update_error,
+ db => DbName,
+ docid => DocId,
+ details => Error
+ }),
couch_log:notice("Batch doc error (~s): ~p",[DocId, Error])
end
end),
@@ -1252,7 +1142,7 @@ db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) ->
missing_rev -> nil;
Rev -> Rev
end,
- {TargetDocId0, TargetRevs} = couch_httpd_db:parse_copy_destination_header(Req),
+ {TargetDocId0, TargetRevs} = chttpd_util:parse_copy_destination_header(Req),
TargetDocId = list_to_binary(mochiweb_util:unquote(TargetDocId0)),
% open old doc
Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
@@ -1406,6 +1296,15 @@ bulk_get_multipart_boundary() ->
receive_request_data(Req) ->
receive_request_data(Req, chttpd:body_length(Req)).
+receive_request_data(Req, Len) when Len == chunked ->
+ Ref = make_ref(),
+ ChunkFun = fun({_Length, Binary}, _State) ->
+ self() ! {chunk, Ref, Binary}
+ end,
+ couch_httpd:recv_chunked(Req, 4096, ChunkFun, ok),
+ GetChunk = fun GC() -> receive {chunk, Ref, Binary} -> {Binary, GC} end end,
+ {receive {chunk, Ref, Binary} -> Binary end, GetChunk};
+
receive_request_data(Req, LenLeft) when LenLeft > 0 ->
Len = erlang:min(4096, LenLeft),
Data = chttpd:recv(Req, Len),
@@ -1430,24 +1329,6 @@ update_doc_result_to_json(DocId, Error) ->
{_Code, ErrorStr, Reason} = chttpd:error_info(Error),
{[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}.
-purge_results_to_json([], []) ->
- {201, []};
-purge_results_to_json([{DocId, _Revs} | RIn], [{ok, PRevs} | ROut]) ->
- {Code, Results} = purge_results_to_json(RIn, ROut),
- couch_stats:increment_counter([couchdb, document_purges, success]),
- {Code, [{DocId, couch_doc:revs_to_strs(PRevs)} | Results]};
-purge_results_to_json([{DocId, _Revs} | RIn], [{accepted, PRevs} | ROut]) ->
- {Code, Results} = purge_results_to_json(RIn, ROut),
- couch_stats:increment_counter([couchdb, document_purges, success]),
- NewResults = [{DocId, couch_doc:revs_to_strs(PRevs)} | Results],
- {erlang:max(Code, 202), NewResults};
-purge_results_to_json([{DocId, _Revs} | RIn], [Error | ROut]) ->
- {Code, Results} = purge_results_to_json(RIn, ROut),
- {NewCode, ErrorStr, Reason} = chttpd:error_info(Error),
- couch_stats:increment_counter([couchdb, document_purges, failure]),
- NewResults = [{DocId, {[{error, ErrorStr}, {reason, Reason}]}} | Results],
- {erlang:max(NewCode, Code), NewResults}.
-
send_updated_doc(Req, Db, DocId, Json) ->
send_updated_doc(Req, Db, DocId, Json, []).
@@ -2076,6 +1957,10 @@ monitor_attachments(Atts) when is_list(Atts) ->
stub ->
Monitors;
Else ->
+ ?LOG_ERROR(#{
+ what => malformed_attachment_data,
+ attachment => Att
+ }),
couch_log:error("~p from couch_att:fetch(data, ~p)", [Else, Att]),
Monitors
end
@@ -2092,7 +1977,7 @@ set_namespace(<<"_local_docs">>, Args) ->
set_namespace(<<"_design_docs">>, Args) ->
set_namespace(<<"_design">>, Args);
set_namespace(NS, #mrargs{} = Args) ->
- couch_mrview_util:set_extra(Args, namespace, NS).
+ couch_views_util:set_extra(Args, namespace, NS).
%% /db/_bulk_get stuff
diff --git a/src/chttpd/src/chttpd_handlers.erl b/src/chttpd/src/chttpd_handlers.erl
index 17d2952b3..d46875d75 100644
--- a/src/chttpd/src/chttpd_handlers.erl
+++ b/src/chttpd/src/chttpd_handlers.erl
@@ -22,6 +22,7 @@
-define(SERVICE_ID, chttpd_handlers).
-include_lib("couch/include/couch_db.hrl").
+-include_lib("kernel/include/logger.hrl").
%% ------------------------------------------------------------------
%% API Function Definitions
@@ -44,8 +45,13 @@ handler_info(HttpReq) ->
Default = {'unknown.unknown', #{}},
try
select(collect(handler_info, [Method, PathParts, HttpReq]), Default)
- catch Type:Reason ->
- Stack = erlang:get_stacktrace(),
+ catch Type:Reason:Stack ->
+ ?LOG_ERROR(#{
+ what => handler_info_failure,
+ result => Type,
+ details => Reason,
+ stack => Stack
+ }),
couch_log:error("~s :: handler_info failure for ~p : ~p:~p :: ~p", [
?MODULE,
get(nonce),
diff --git a/src/chttpd/src/chttpd_httpd_handlers.erl b/src/chttpd/src/chttpd_httpd_handlers.erl
index d50115917..e5374b1b6 100644
--- a/src/chttpd/src/chttpd_httpd_handlers.erl
+++ b/src/chttpd/src/chttpd_httpd_handlers.erl
@@ -15,9 +15,11 @@
-export([url_handler/1, db_handler/1, design_handler/1, handler_info/3]).
-export([
- not_supported/2,
not_supported/3,
- not_implemented/2
+ not_supported/2,
+ not_supported/1,
+ not_implemented/2,
+ not_implemented/1
]).
@@ -38,16 +40,22 @@ url_handler(<<"_replicate">>) -> fun chttpd_misc:handle_replicate_req/1;
url_handler(<<"_uuids">>) -> fun chttpd_misc:handle_uuids_req/1;
url_handler(<<"_session">>) -> fun chttpd_auth:handle_session_req/1;
url_handler(<<"_up">>) -> fun chttpd_misc:handle_up_req/1;
+url_handler(<<"_membership">>) -> fun ?MODULE:not_supported/1;
+url_handler(<<"_reshard">>) -> fun ?MODULE:not_supported/1;
+url_handler(<<"_db_updates">>) -> fun ?MODULE:not_implemented/1;
+url_handler(<<"_cluster_setup">>) -> fun ?MODULE:not_implemented/1;
url_handler(_) -> no_match.
db_handler(<<"_view_cleanup">>) -> fun chttpd_db:handle_view_cleanup_req/2;
db_handler(<<"_compact">>) -> fun chttpd_db:handle_compact_req/2;
db_handler(<<"_design">>) -> fun chttpd_db:handle_design_req/2;
-db_handler(<<"_partition">>) -> fun chttpd_db:handle_partition_req/2;
+db_handler(<<"_partition">>) -> fun ?MODULE:not_implemented/2;
db_handler(<<"_temp_view">>) -> fun ?MODULE:not_supported/2;
db_handler(<<"_changes">>) -> fun chttpd_db:handle_changes_req/2;
db_handler(<<"_purge">>) -> fun ?MODULE:not_implemented/2;
db_handler(<<"_purged_infos_limit">>) -> fun ?MODULE:not_implemented/2;
+db_handler(<<"_shards">>) -> fun ?MODULE:not_supported/2;
+db_handler(<<"_sync_shards">>) -> fun ?MODULE:not_supported/2;
db_handler(_) -> no_match.
design_handler(<<"_view">>) -> fun chttpd_view:handle_view_req/3;
@@ -186,7 +194,6 @@ handler_info(Method, [<<"_", _/binary>> = Part| Rest], Req) ->
% on for known system databases.
DbName = case Part of
<<"_dbs">> -> '_dbs';
- <<"_global_changes">> -> '_global_changes';
<<"_metadata">> -> '_metadata';
<<"_nodes">> -> '_nodes';
<<"_replicator">> -> '_replicator';
@@ -497,7 +504,7 @@ handler_info(_, _, _) ->
get_copy_destination(Req) ->
try
- {DocIdStr, _} = couch_httpd_db:parse_copy_destination_header(Req),
+ {DocIdStr, _} = chttpd_util:parse_copy_destination_header(Req),
list_to_binary(mochiweb_util:unquote(DocIdStr))
catch _:_ ->
unknown
@@ -509,10 +516,18 @@ not_supported(#httpd{} = Req, Db, _DDoc) ->
not_supported(#httpd{} = Req, _Db) ->
+ not_supported(Req).
+
+
+not_supported(#httpd{} = Req) ->
Msg = <<"resource is not supported in CouchDB >= 4.x">>,
chttpd:send_error(Req, 410, gone, Msg).
not_implemented(#httpd{} = Req, _Db) ->
+ not_implemented(Req).
+
+
+not_implemented(#httpd{} = Req) ->
Msg = <<"resource is not implemented">>,
chttpd:send_error(Req, 501, not_implemented, Msg).
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index 5cfd0f7cb..3f81c1b0c 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -30,10 +30,10 @@
]).
-include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
-import(chttpd,
- [send_json/2,send_json/3,send_method_not_allowed/2,
+ [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
send_chunk/2,start_chunked_response/3]).
-define(MAX_DB_NUM_FOR_DBS_INFO, 100).
@@ -61,12 +61,7 @@ handle_welcome_req(Req, _) ->
send_method_not_allowed(Req, "GET,HEAD").
get_features() ->
- case clouseau_rpc:connected() of
- true ->
- [search | config:features()];
- false ->
- config:features()
- end.
+ config:features().
handle_favicon_req(Req) ->
handle_favicon_req(Req, get_docroot()).
@@ -120,7 +115,7 @@ handle_all_dbs_req(#httpd{method='GET'}=Req) ->
direction = Dir,
limit = Limit,
skip = Skip
- } = couch_mrview_http:parse_params(Req, undefined),
+ } = couch_views_http_util:parse_params(Req, undefined),
Options = [
{start_key, StartKey},
@@ -142,7 +137,7 @@ all_dbs_callback({meta, _Meta}, #vacc{resp=Resp0}=Acc) ->
{ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
{ok, Acc#vacc{resp=Resp1}};
all_dbs_callback({row, Row}, #vacc{resp=Resp0}=Acc) ->
- Prepend = couch_mrview_http:prepend_val(Acc),
+ Prepend = couch_views_http_util:prepend_val(Acc),
DbName = couch_util:get_value(id, Row),
{ok, Resp1} = chttpd:send_delayed_chunk(Resp0, [Prepend, ?JSON_ENCODE(DbName)]),
{ok, Acc#vacc{prepend=",", resp=Resp1}};
@@ -160,7 +155,7 @@ handle_dbs_info_req(#httpd{method = 'GET'} = Req) ->
handle_dbs_info_req(#httpd{method='POST', user_ctx=UserCtx}=Req) ->
chttpd:validate_ctype(Req, "application/json"),
Props = chttpd:json_body_obj(Req),
- Keys = couch_mrview_util:get_view_keys(Props),
+ Keys = couch_views_util:get_view_keys(Props),
case Keys of
undefined -> throw({bad_request, "`keys` member must exist."});
_ -> ok
@@ -253,7 +248,7 @@ send_db_infos(Req, ListFunctionName) ->
direction = Dir,
limit = Limit,
skip = Skip
- } = couch_mrview_http:parse_params(Req, undefined),
+ } = couch_views_http_util:parse_params(Req, undefined),
Options = [
{start_key, StartKey},
@@ -280,7 +275,7 @@ dbs_info_callback({meta, _Meta}, #vacc{resp = Resp0} = Acc) ->
{ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
{ok, Acc#vacc{resp = Resp1}};
dbs_info_callback({row, Props}, #vacc{resp = Resp0} = Acc) ->
- Prepend = couch_mrview_http:prepend_val(Acc),
+ Prepend = couch_views_http_util:prepend_val(Acc),
Chunk = [Prepend, ?JSON_ENCODE({Props})],
{ok, Resp1} = chttpd:send_delayed_chunk(Resp0, Chunk),
{ok, Acc#vacc{prepend = ",", resp = Resp1}};
@@ -334,9 +329,33 @@ handle_reload_query_servers_req(#httpd{method='POST'}=Req) ->
handle_reload_query_servers_req(Req) ->
send_method_not_allowed(Req, "POST").
+handle_uuids_req(#httpd{method='GET'}=Req) ->
+ Max = list_to_integer(config:get("uuids","max_count","1000")),
+ Count = try list_to_integer(couch_httpd:qs_value(Req, "count", "1")) of
+ N when N > Max ->
+ throw({bad_request, <<"count parameter too large">>});
+ N when N < 0 ->
+ throw({bad_request, <<"count must be a positive integer">>});
+ N -> N
+ catch
+ error:badarg ->
+ throw({bad_request, <<"count must be a positive integer">>})
+ end,
+ UUIDs = [couch_uuids:new() || _ <- lists:seq(1, Count)],
+ Etag = couch_httpd:make_etag(UUIDs),
+ couch_httpd:etag_respond(Req, Etag, fun() ->
+ CacheBustingHeaders = [
+ {"Date", couch_util:rfc1123_date()},
+ {"Cache-Control", "no-cache"},
+ % Past date, ON PURPOSE!
+ {"Expires", "Mon, 01 Jan 1990 00:00:00 GMT"},
+ {"Pragma", "no-cache"},
+ {"ETag", Etag}
+ ],
+ send_json(Req, 200, CacheBustingHeaders, {[{<<"uuids">>, UUIDs}]})
+ end);
handle_uuids_req(Req) ->
- couch_httpd_misc_handlers:handle_uuids_req(Req).
-
+ send_method_not_allowed(Req, "GET").
handle_up_req(#httpd{method='GET'} = Req) ->
case config:get("couchdb", "maintenance_mode") of
diff --git a/src/chttpd/src/chttpd_node.erl b/src/chttpd/src/chttpd_node.erl
index 0159672f5..54e0e489e 100644
--- a/src/chttpd/src/chttpd_node.erl
+++ b/src/chttpd/src/chttpd_node.erl
@@ -72,7 +72,7 @@ handle_node_req(#httpd{method='PUT', path_parts=[_, Node, <<"_config">>, Section
Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
OldValue = call_node(Node, config, get, [Section, Key, ""]),
IsSensitive = Section == <<"admins">>,
- Opts = #{persisit => Persist, sensitive => IsSensitive},
+ Opts = #{persist => Persist, sensitive => IsSensitive},
case call_node(Node, config, set, [Section, Key, ?b2l(Value), Opts]) of
ok ->
send_json(Req, 200, list_to_binary(OldValue));
@@ -117,6 +117,14 @@ handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_stats">> | Path]}=
chttpd:send_json(Req, EJSON1);
handle_node_req(#httpd{path_parts=[_, _Node, <<"_stats">>]}=Req) ->
send_method_not_allowed(Req, "GET");
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_prometheus">>]}=Req) ->
+ Metrics = call_node(Node, couch_prometheus_server, scrape, []),
+ Version = call_node(Node, couch_prometheus_server, version, []),
+ Type = "text/plain; version=" ++ Version,
+ Header = [{<<"Content-Type">>, ?l2b(Type)}],
+ chttpd:send_response(Req, 200, Header, Metrics);
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_prometheus">>]}=Req) ->
+ send_method_not_allowed(Req, "GET");
% GET /_node/$node/_system
handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_system">>]}=Req) ->
Stats = call_node(Node, chttpd_node, get_stats, []),
@@ -130,54 +138,14 @@ handle_node_req(#httpd{method='POST', path_parts=[_, Node, <<"_restart">>]}=Req)
send_json(Req, 200, {[{ok, true}]});
handle_node_req(#httpd{path_parts=[_, _Node, <<"_restart">>]}=Req) ->
send_method_not_allowed(Req, "POST");
-handle_node_req(#httpd{path_parts=[_, Node | PathParts],
- mochi_req=MochiReq0}) ->
- % strip /_node/{node} from Req0 before descending further
- RawUri = MochiReq0:get(raw_path),
- {_, Query, Fragment} = mochiweb_util:urlsplit_path(RawUri),
- NewPath0 = "/" ++ lists:join("/", [couch_util:url_encode(P) || P <- PathParts]),
- NewRawPath = mochiweb_util:urlunsplit_path({NewPath0, Query, Fragment}),
- MaxSize = config:get_integer("httpd", "max_http_request_size", 4294967296),
- NewOpts = [{body, MochiReq0:recv_body(MaxSize)} | MochiReq0:get(opts)],
- Ref = erlang:make_ref(),
- MochiReq = mochiweb_request:new({remote, self(), Ref},
- NewOpts,
- MochiReq0:get(method),
- NewRawPath,
- MochiReq0:get(version),
- MochiReq0:get(headers)),
- call_node(Node, couch_httpd, handle_request, [MochiReq]),
- recv_loop(Ref, MochiReq0);
+handle_node_req(#httpd{path_parts=[_, _Node | _PathParts]}=Req) ->
+ % Local (backend) dbs are not support any more
+ chttpd_httpd_handlers:not_supported(Req);
handle_node_req(#httpd{path_parts=[_]}=Req) ->
chttpd:send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
handle_node_req(Req) ->
chttpd:send_error(Req, not_found).
-recv_loop(Ref, ReqResp) ->
- receive
- {Ref, Code, Headers, _Args, start_response} ->
- recv_loop(Ref, ReqResp:start({Code, Headers}));
- {Ref, Code, Headers, Len, start_response_length} ->
- recv_loop(Ref, ReqResp:start_response_length({Code, Headers, Len}));
- {Ref, Code, Headers, chunked, respond} ->
- Resp = ReqResp:respond({Code, Headers, chunked}),
- recv_loop(Ref, Resp);
- {Ref, Code, Headers, Args, respond} ->
- Resp = ReqResp:respond({Code, Headers, Args}),
- {ok, Resp};
- {Ref, send, Data} ->
- ReqResp:send(Data),
- {ok, ReqResp};
- {Ref, chunk, <<>>} ->
- ReqResp:write_chunk(<<>>),
- {ok, ReqResp};
- {Ref, chunk, Data} ->
- ReqResp:write_chunk(Data),
- recv_loop(Ref, ReqResp);
- _Else ->
- recv_loop(Ref, ReqResp)
- end.
-
call_node(Node0, Mod, Fun, Args) when is_binary(Node0) ->
Node1 = try
list_to_existing_atom(?b2l(Node0))
@@ -231,7 +199,6 @@ get_stats() ->
{process_count, erlang:system_info(process_count)},
{process_limit, erlang:system_info(process_limit)},
{message_queues, {MessageQueues}},
- {internal_replication_jobs, mem3_sync:get_backlog()},
{distribution, {get_distribution_stats()}}
].
diff --git a/src/chttpd/src/chttpd_rewrite.erl b/src/chttpd/src/chttpd_rewrite.erl
deleted file mode 100644
index 1c2c1f333..000000000
--- a/src/chttpd/src/chttpd_rewrite.erl
+++ /dev/null
@@ -1,487 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% bind_path is based on bind method from Webmachine
-
-
-%% @doc Module for URL rewriting by pattern matching.
-
--module(chttpd_rewrite).
-
--compile(tuple_calls).
-
--export([handle_rewrite_req/3]).
--include_lib("couch/include/couch_db.hrl").
-
--define(SEPARATOR, $\/).
--define(MATCH_ALL, {bind, <<"*">>}).
-
-
-handle_rewrite_req(#httpd{}=Req, Db, DDoc) ->
- RewritesSoFar = erlang:get(?REWRITE_COUNT),
- MaxRewrites = config:get_integer("httpd", "rewrite_limit", 100),
- case RewritesSoFar >= MaxRewrites of
- true ->
- throw({bad_request, <<"Exceeded rewrite recursion limit">>});
- false ->
- erlang:put(?REWRITE_COUNT, RewritesSoFar + 1)
- end,
- case get_rules(DDoc) of
- Rules when is_list(Rules) ->
- do_rewrite(Req, Rules);
- Rules when is_binary(Rules) ->
- case couch_query_servers:rewrite(Req, Db, DDoc) of
- undefined ->
- chttpd:send_error(Req, 404, <<"rewrite_error">>,
- <<"Invalid path.">>);
- Rewrite ->
- do_rewrite(Req, Rewrite)
- end;
- undefined ->
- chttpd:send_error(Req, 404, <<"rewrite_error">>,
- <<"Invalid path.">>)
- end.
-
-
-get_rules(#doc{body={Props}}) ->
- couch_util:get_value(<<"rewrites">>, Props).
-
-
-do_rewrite(#httpd{mochi_req=MochiReq}=Req, {Props}=Rewrite) when is_list(Props) ->
- case couch_util:get_value(<<"code">>, Props) of
- undefined ->
- Method = rewrite_method(Req, Rewrite),
- Headers = rewrite_headers(Req, Rewrite),
- Path = ?b2l(rewrite_path(Req, Rewrite)),
- NewMochiReq = mochiweb_request:new(MochiReq:get(socket),
- Method,
- Path,
- MochiReq:get(version),
- Headers),
- Body = case couch_util:get_value(<<"body">>, Props) of
- undefined -> erlang:get(mochiweb_request_body);
- B -> B
- end,
- NewMochiReq:cleanup(),
- case Body of
- undefined -> [];
- _ -> erlang:put(mochiweb_request_body, Body)
- end,
- couch_log:debug("rewrite to ~p", [Path]),
- chttpd:handle_request_int(NewMochiReq);
- Code ->
- chttpd:send_response(
- Req,
- Code,
- case couch_util:get_value(<<"headers">>, Props) of
- undefined -> [];
- {H1} -> H1
- end,
- rewrite_body(Rewrite))
- end;
-do_rewrite(#httpd{method=Method,
- path_parts=[_DbName, <<"_design">>, _DesignName, _Rewrite|PathParts],
- mochi_req=MochiReq}=Req,
- Rules) when is_list(Rules) ->
- % create dispatch list from rules
- Prefix = path_prefix(Req),
- QueryList = lists:map(fun decode_query_value/1, chttpd:qs(Req)),
-
- DispatchList = [make_rule(Rule) || {Rule} <- Rules],
- Method1 = couch_util:to_binary(Method),
-
- %% get raw path by matching url to a rule.
- RawPath = case try_bind_path(DispatchList, Method1,
- PathParts, QueryList) of
- no_dispatch_path ->
- throw(not_found);
- {NewPathParts, Bindings} ->
- Parts = [quote_plus(X) || X <- NewPathParts],
-
- % build new path, reencode query args, eventually convert
- % them to json
- Bindings1 = maybe_encode_bindings(Bindings),
- Path = iolist_to_binary([
- string:join(Parts, [?SEPARATOR]),
- [["?", mochiweb_util:urlencode(Bindings1)] || Bindings1 =/= []]
- ]),
-
- % if path is relative detect it and rewrite path
- safe_relative_path(Prefix, Path)
- end,
-
- % normalize final path (fix levels "." and "..")
- RawPath1 = ?b2l(normalize_path(RawPath)),
-
- couch_log:debug("rewrite to ~p ~n", [RawPath1]),
-
- % build a new mochiweb request
- MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
- MochiReq:get(method),
- RawPath1,
- MochiReq:get(version),
- MochiReq:get(headers)),
-
- % cleanup, It force mochiweb to reparse raw uri.
- MochiReq1:cleanup(),
-
- chttpd:handle_request_int(MochiReq1).
-
-
-rewrite_method(#httpd{method=Method}, {Props}) ->
- DefaultMethod = couch_util:to_binary(Method),
- couch_util:get_value(<<"method">>, Props, DefaultMethod).
-
-rewrite_path(#httpd{}=Req, {Props}=Rewrite) ->
- Prefix = path_prefix(Req),
- RewritePath = case couch_util:get_value(<<"path">>, Props) of
- undefined ->
- throw({<<"rewrite_error">>,
- <<"Rewrite result must produce a new path.">>});
- P -> P
- end,
- SafeRelativePath = safe_relative_path(Prefix, RewritePath),
- NormalizedPath = normalize_path(SafeRelativePath),
- QueryParams = rewrite_query_params(Req, Rewrite),
- case QueryParams of
- <<"">> ->
- NormalizedPath;
- QueryParams ->
- <<NormalizedPath/binary, "?", QueryParams/binary>>
- end.
-
-rewrite_query_params(#httpd{}=Req, {Props}) ->
- RequestQS = chttpd:qs(Req),
- RewriteQS = case couch_util:get_value(<<"query">>, Props) of
- undefined -> RequestQS;
- {V} -> V
- end,
- RewriteQSEsc = [{chttpd:quote(K), chttpd:quote(V)} || {K, V} <- RewriteQS],
- iolist_to_binary(string:join([[K, "=", V] || {K, V} <- RewriteQSEsc], "&")).
-
-rewrite_headers(#httpd{mochi_req=MochiReq}, {Props}) ->
- case couch_util:get_value(<<"headers">>, Props) of
- undefined ->
- MochiReq:get(headers);
- {H} ->
- mochiweb_headers:enter_from_list(
- lists:map(fun({Key, Val}) -> {?b2l(Key), ?b2l(Val)} end, H),
- MochiReq:get(headers))
- end.
-
-rewrite_body({Props}) ->
- Body = case couch_util:get_value(<<"body">>, Props) of
- undefined -> erlang:get(mochiweb_request_body);
- B -> B
- end,
- case Body of
- undefined ->
- [];
- _ ->
- erlang:put(mochiweb_request_body, Body),
- Body
- end.
-
-
-path_prefix(#httpd{path_parts=[DbName, <<"_design">>, DesignName | _]}) ->
- EscapedDesignName = ?l2b(couch_util:url_encode(DesignName)),
- EscapedDbName = ?l2b(couch_util:url_encode(DbName)),
- DesignId = <<"_design/", EscapedDesignName/binary>>,
- <<"/", EscapedDbName/binary, "/", DesignId/binary>>.
-
-safe_relative_path(Prefix, Path) ->
- case mochiweb_util:safe_relative_path(?b2l(Path)) of
- undefined ->
- <<Prefix/binary, "/", Path/binary>>;
- V0 ->
- V1 = ?l2b(V0),
- <<Prefix/binary, "/", V1/binary>>
- end.
-
-
-quote_plus({bind, X}) ->
- mochiweb_util:quote_plus(X);
-quote_plus(X) ->
- mochiweb_util:quote_plus(X).
-
-%% @doc Try to find a rule matching current url. If none is found
-%% 404 error not_found is raised
-try_bind_path([], _Method, _PathParts, _QueryList) ->
- no_dispatch_path;
-try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
- [{PathParts1, Method1}, RedirectPath, QueryArgs, Formats] = Dispatch,
- case bind_method(Method1, Method) of
- true ->
- case bind_path(PathParts1, PathParts, []) of
- {ok, Remaining, Bindings} ->
- Bindings1 = Bindings ++ QueryList,
- % we parse query args from the rule and fill
- % it eventually with bindings vars
- QueryArgs1 = make_query_list(QueryArgs, Bindings1,
- Formats, []),
- % remove params in QueryLists1 that are already in
- % QueryArgs1
- Bindings2 = lists:foldl(fun({K, V}, Acc) ->
- K1 = to_binding(K),
- KV = case couch_util:get_value(K1, QueryArgs1) of
- undefined -> [{K1, V}];
- _V1 -> []
- end,
- Acc ++ KV
- end, [], Bindings1),
-
- FinalBindings = Bindings2 ++ QueryArgs1,
- NewPathParts = make_new_path(RedirectPath, FinalBindings,
- Remaining, []),
- {NewPathParts, FinalBindings};
- fail ->
- try_bind_path(Rest, Method, PathParts, QueryList)
- end;
- false ->
- try_bind_path(Rest, Method, PathParts, QueryList)
- end.
-
-%% rewriting dynamically the quey list given as query member in
-%% rewrites. Each value is replaced by one binding or an argument
-%% passed in url.
-make_query_list([], _Bindings, _Formats, Acc) ->
- Acc;
-make_query_list([{Key, {Value}}|Rest], Bindings, Formats, Acc) ->
- Value1 = {Value},
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_binary(Value) ->
- Value1 = replace_var(Value, Bindings, Formats),
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_list(Value) ->
- Value1 = replace_var(Value, Bindings, Formats),
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) ->
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value}|Acc]).
-
-replace_var(<<"*">>=Value, Bindings, Formats) ->
- get_var(Value, Bindings, Value, Formats);
-replace_var(<<":", Var/binary>> = Value, Bindings, Formats) ->
- get_var(Var, Bindings, Value, Formats);
-replace_var(Value, _Bindings, _Formats) when is_binary(Value) ->
- Value;
-replace_var(Value, Bindings, Formats) when is_list(Value) ->
- lists:reverse(lists:foldl(fun
- (<<":", Var/binary>>=Value1, Acc) ->
- [get_var(Var, Bindings, Value1, Formats)|Acc];
- (Value1, Acc) ->
- [Value1|Acc]
- end, [], Value));
-replace_var(Value, _Bindings, _Formats) ->
- Value.
-
-maybe_json(Key, Value) ->
- case lists:member(Key, [<<"key">>, <<"startkey">>, <<"start_key">>,
- <<"endkey">>, <<"end_key">>, <<"keys">>]) of
- true ->
- ?JSON_ENCODE(Value);
- false ->
- Value
- end.
-
-get_var(VarName, Props, Default, Formats) ->
- VarName1 = to_binding(VarName),
- Val = couch_util:get_value(VarName1, Props, Default),
- maybe_format(VarName, Val, Formats).
-
-maybe_format(VarName, Value, Formats) ->
- case couch_util:get_value(VarName, Formats) of
- undefined ->
- Value;
- Format ->
- format(Format, Value)
- end.
-
-format(<<"int">>, Value) when is_integer(Value) ->
- Value;
-format(<<"int">>, Value) when is_binary(Value) ->
- format(<<"int">>, ?b2l(Value));
-format(<<"int">>, Value) when is_list(Value) ->
- case (catch list_to_integer(Value)) of
- IntVal when is_integer(IntVal) ->
- IntVal;
- _ ->
- Value
- end;
-format(<<"bool">>, Value) when is_binary(Value) ->
- format(<<"bool">>, ?b2l(Value));
-format(<<"bool">>, Value) when is_list(Value) ->
- case string:to_lower(Value) of
- "true" -> true;
- "false" -> false;
- _ -> Value
- end;
-format(_Format, Value) ->
- Value.
-
-%% doc: build new patch from bindings. bindings are query args
-%% (+ dynamic query rewritten if needed) and bindings found in
-%% bind_path step.
-make_new_path([], _Bindings, _Remaining, Acc) ->
- lists:reverse(Acc);
-make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
- Acc1 = lists:reverse(Acc) ++ Remaining,
- Acc1;
-make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
- Acc1 = lists:reverse(Acc) ++ Remaining,
- Acc1;
-make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) ->
- P2 = case couch_util:get_value({bind, P}, Bindings) of
- undefined -> << "undefined">>;
- P1 ->
- iolist_to_binary(P1)
- end,
- make_new_path(Rest, Bindings, Remaining, [P2|Acc]);
-make_new_path([P|Rest], Bindings, Remaining, Acc) ->
- make_new_path(Rest, Bindings, Remaining, [P|Acc]).
-
-
-%% @doc If method of the query fith the rule method. If the
-%% method rule is '*', which is the default, all
-%% request method will bind. It allows us to make rules
-%% depending on HTTP method.
-bind_method(?MATCH_ALL, _Method) ->
- true;
-bind_method({bind, Method}, Method) ->
- true;
-bind_method(_, _) ->
- false.
-
-
-%% @doc bind path. Using the rule from we try to bind variables given
-%% to the current url by pattern matching
-bind_path([], [], Bindings) ->
- {ok, [], Bindings};
-bind_path([?MATCH_ALL], Rest, Bindings) when is_list(Rest) ->
- {ok, Rest, Bindings};
-bind_path(_, [], _) ->
- fail;
-bind_path([{bind, Token}|RestToken],[Match|RestMatch],Bindings) ->
- bind_path(RestToken, RestMatch, [{{bind, Token}, Match}|Bindings]);
-bind_path([Token|RestToken], [Token|RestMatch], Bindings) ->
- bind_path(RestToken, RestMatch, Bindings);
-bind_path(_, _, _) ->
- fail.
-
-
-%% normalize path.
-normalize_path(Path) when is_binary(Path)->
- normalize_path(?b2l(Path));
-normalize_path(Path) when is_list(Path)->
- Segments = normalize_path1(string:tokens(Path, "/"), []),
- NormalizedPath = string:join(Segments, [?SEPARATOR]),
- iolist_to_binary(["/", NormalizedPath]).
-
-
-normalize_path1([], Acc) ->
- lists:reverse(Acc);
-normalize_path1([".."|Rest], Acc) ->
- Acc1 = case Acc of
- [] -> [".."|Acc];
- [T|_] when T =:= ".." -> [".."|Acc];
- [_|R] -> R
- end,
- normalize_path1(Rest, Acc1);
-normalize_path1(["."|Rest], Acc) ->
- normalize_path1(Rest, Acc);
-normalize_path1([Path|Rest], Acc) ->
- normalize_path1(Rest, [Path|Acc]).
-
-
-%% @doc transform json rule in erlang for pattern matching
-make_rule(Rule) ->
- Method = case couch_util:get_value(<<"method">>, Rule) of
- undefined -> ?MATCH_ALL;
- M -> to_binding(M)
- end,
- QueryArgs = case couch_util:get_value(<<"query">>, Rule) of
- undefined -> [];
- {Args} -> Args
- end,
- FromParts = case couch_util:get_value(<<"from">>, Rule) of
- undefined -> [?MATCH_ALL];
- From ->
- parse_path(From)
- end,
- ToParts = case couch_util:get_value(<<"to">>, Rule) of
- undefined ->
- throw({error, invalid_rewrite_target});
- To ->
- parse_path(To)
- end,
- Formats = case couch_util:get_value(<<"formats">>, Rule) of
- undefined -> [];
- {Fmts} -> Fmts
- end,
- [{FromParts, Method}, ToParts, QueryArgs, Formats].
-
-parse_path(Path) ->
- {ok, SlashRE} = re:compile(<<"\\/">>),
- path_to_list(re:split(Path, SlashRE), [], 0).
-
-%% @doc convert a path rule (from or to) to an erlang list
-%% * and path variable starting by ":" are converted
-%% in erlang atom.
-path_to_list([], Acc, _DotDotCount) ->
- lists:reverse(Acc);
-path_to_list([<<>>|R], Acc, DotDotCount) ->
- path_to_list(R, Acc, DotDotCount);
-path_to_list([<<"*">>|R], Acc, DotDotCount) ->
- path_to_list(R, [?MATCH_ALL|Acc], DotDotCount);
-path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 ->
- case config:get("httpd", "secure_rewrites", "true") of
- "false" ->
- path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
- _Else ->
- couch_log:notice("insecure_rewrite_rule ~p blocked", [lists:reverse(Acc) ++ [<<"..">>] ++ R]),
- throw({insecure_rewrite_rule, "too many ../.. segments"})
- end;
-path_to_list([<<"..">>|R], Acc, DotDotCount) ->
- path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-path_to_list([P|R], Acc, DotDotCount) ->
- P1 = case P of
- <<":", Var/binary>> ->
- to_binding(Var);
- _ -> P
- end,
- path_to_list(R, [P1|Acc], DotDotCount).
-
-maybe_encode_bindings([]) ->
- [];
-maybe_encode_bindings(Props) ->
- lists:foldl(fun
- ({{bind, <<"*">>}, _V}, Acc) ->
- Acc;
- ({{bind, K}, V}, Acc) ->
- V1 = iolist_to_binary(maybe_json(K, V)),
- [{K, V1}|Acc]
- end, [], Props).
-
-decode_query_value({K,V}) ->
- case lists:member(K, ["key", "startkey", "start_key",
- "endkey", "end_key", "keys"]) of
- true ->
- {to_binding(K), ?JSON_DECODE(V)};
- false ->
- {to_binding(K), ?l2b(V)}
- end.
-
-to_binding({bind, V}) ->
- {bind, V};
-to_binding(V) when is_list(V) ->
- to_binding(?l2b(V));
-to_binding(V) ->
- {bind, V}.
diff --git a/src/chttpd/src/chttpd_show.erl b/src/chttpd/src/chttpd_show.erl
index 8a15bdcbe..9fda7ff89 100644
--- a/src/chttpd/src/chttpd_show.erl
+++ b/src/chttpd/src/chttpd_show.erl
@@ -12,15 +12,11 @@
-module(chttpd_show).
--export([handle_doc_show_req/3, handle_doc_update_req/3, handle_view_list_req/3]).
+-export([handle_doc_update_req/3]).
-include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
-% /db/_design/foo/_show/bar/docid
-% show converts a json doc to a response of any content-type.
-% it looks up the doc an then passes it to the query server.
-% then it sends the response from the query server to the http client.
maybe_open_doc(Db, DocId, Options) ->
case fabric:open_doc(Db, DocId, Options) of
@@ -31,70 +27,6 @@ maybe_open_doc(Db, DocId, Options) ->
nil
end.
-handle_doc_show_req(#httpd{
- path_parts=[_, _, _, _, ShowName, DocId]
- }=Req, Db, DDoc) ->
-
- % open the doc
- Options = [conflicts, {user_ctx, Req#httpd.user_ctx}],
- Doc = maybe_open_doc(Db, DocId, Options),
-
- % we don't handle revs here b/c they are an internal api
- % returns 404 if there is no doc with DocId
- handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId);
-
-handle_doc_show_req(#httpd{
- path_parts=[_, _, _, _, ShowName, DocId|Rest]
- }=Req, Db, DDoc) ->
-
- DocParts = [DocId|Rest],
- DocId1 = ?l2b(string:join([?b2l(P)|| P <- DocParts], "/")),
-
- % open the doc
- Options = [conflicts, {user_ctx, Req#httpd.user_ctx}],
- Doc = maybe_open_doc(Db, DocId1, Options),
-
- % we don't handle revs here b/c they are an internal api
- % pass 404 docs to the show function
- handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId1);
-
-handle_doc_show_req(#httpd{
- path_parts=[_, _, _, _, ShowName]
- }=Req, Db, DDoc) ->
- % with no docid the doc is nil
- handle_doc_show(Req, Db, DDoc, ShowName, nil);
-
-handle_doc_show_req(Req, _Db, _DDoc) ->
- chttpd:send_error(Req, 404, <<"show_error">>, <<"Invalid path.">>).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc) ->
- handle_doc_show(Req, Db, DDoc, ShowName, Doc, null).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId) ->
- %% Will throw an exception if the _show handler is missing
- couch_util:get_nested_json_value(DDoc#doc.body, [<<"shows">>, ShowName]),
- % get responder for ddoc/showname
- CurrentEtag = show_etag(Req, Doc, DDoc, []),
- chttpd:etag_respond(Req, CurrentEtag, fun() ->
- JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
- JsonDoc = couch_query_servers:json_doc(Doc),
- [<<"resp">>, ExternalResp] =
- couch_query_servers:ddoc_prompt(DDoc, [<<"shows">>, ShowName],
- [JsonDoc, JsonReq]),
- JsonResp = apply_etag(ExternalResp, CurrentEtag),
- chttpd_external:send_external_response(Req, JsonResp)
- end).
-
-
-show_etag(#httpd{user_ctx=UserCtx}=Req, Doc, DDoc, More) ->
- Accept = chttpd:header_value(Req, "Accept"),
- DocPart = case Doc of
- nil -> nil;
- Doc -> chttpd:doc_etag(Doc)
- end,
- couch_httpd:make_etag({couch_httpd:doc_etag(DDoc), DocPart, Accept,
- UserCtx#user_ctx.roles, More}).
-
% /db/_design/foo/update/bar/docid
% updates a doc based on a request
% handle_doc_update_req(#httpd{method = 'GET'}=Req, _Db, _DDoc) ->
@@ -133,7 +65,7 @@ send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
Options = [{user_ctx, Req#httpd.user_ctx}]
end,
NewDoc = couch_db:doc_from_json_obj_validate(Db, {NewJsonDoc}),
- couch_doc:validate_docid(NewDoc#doc.id),
+ fabric2_db:validate_docid(NewDoc#doc.id),
{UpdateResult, NewRev} = fabric:update_doc(Db, NewDoc, Options),
chttpd_stats:incr_writes(),
NewRevStr = couch_doc:rev_to_str(NewRev),
@@ -154,86 +86,6 @@ send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
% todo set location field
chttpd_external:send_external_response(Req, JsonResp).
-
-% view-list request with view and list from same design doc.
-handle_view_list_req(#httpd{method=Method,
- path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc)
- when Method =:= 'GET' orelse Method =:= 'OPTIONS' ->
- Keys = chttpd:qs_json_value(Req, "keys", undefined),
- handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, Keys);
-
-% view-list request with view and list from different design docs.
-handle_view_list_req(#httpd{method=Method,
- path_parts=[_, _, _, _, ListName, DesignName, ViewName]}=Req, Db, DDoc)
- when Method =:= 'GET' orelse Method =:= 'OPTIONS' ->
- Keys = chttpd:qs_json_value(Req, "keys", undefined),
- handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method=Method}=Req, _Db, _DDoc)
- when Method =:= 'GET' orelse Method =:= 'OPTIONS' ->
- chttpd:send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
-
-handle_view_list_req(#httpd{method='POST',
- path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc) ->
- chttpd:validate_ctype(Req, "application/json"),
- ReqBody = chttpd:body(Req),
- {Props2} = ?JSON_DECODE(ReqBody),
- Keys = proplists:get_value(<<"keys">>, Props2, undefined),
- handle_view_list(Req#httpd{req_body=ReqBody}, Db, DDoc, ListName,
- {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method='POST',
- path_parts=[_, _, _, _, ListName, DesignName, ViewName]}=Req, Db, DDoc) ->
- chttpd:validate_ctype(Req, "application/json"),
- ReqBody = chttpd:body(Req),
- {Props2} = ?JSON_DECODE(ReqBody),
- Keys = proplists:get_value(<<"keys">>, Props2, undefined),
- handle_view_list(Req#httpd{req_body=ReqBody}, Db, DDoc, ListName,
- {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method='POST'}=Req, _Db, _DDoc) ->
- chttpd:send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
-
-handle_view_list_req(Req, _Db, _DDoc) ->
- chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_view_list(Req, Db, DDoc, LName, {ViewDesignName, ViewName}, Keys) ->
- %% Will throw an exception if the _list handler is missing
- couch_util:get_nested_json_value(DDoc#doc.body, [<<"lists">>, LName]),
- DbName = couch_db:name(Db),
- {ok, VDoc} = ddoc_cache:open(DbName, <<"_design/", ViewDesignName/binary>>),
- CB = fun list_cb/2,
- QueryArgs = couch_mrview_http:parse_body_and_query(Req, Keys),
- Options = [{user_ctx, Req#httpd.user_ctx}],
- couch_query_servers:with_ddoc_proc(DDoc, fun(QServer) ->
- Acc = #lacc{
- lname = LName,
- req = Req,
- qserver = QServer,
- db = Db
- },
- case ViewName of
- <<"_all_docs">> ->
- fabric:all_docs(Db, Options, CB, Acc, QueryArgs);
- _ ->
- fabric:query_view(Db, Options, VDoc, ViewName,
- CB, Acc, QueryArgs)
- end
- end).
-
-
-list_cb({row, Row} = Msg, Acc) ->
- case lists:keymember(doc, 1, Row) of
- true -> chttpd_stats:incr_reads();
- false -> ok
- end,
- chttpd_stats:incr_rows(),
- couch_mrview_show:list_cb(Msg, Acc);
-
-list_cb(Msg, Acc) ->
- couch_mrview_show:list_cb(Msg, Acc).
-
-
% Maybe this is in the proplists API
% todo move to couch_util
json_apply_field(H, {L}) ->
diff --git a/src/chttpd/src/chttpd_stats.erl b/src/chttpd/src/chttpd_stats.erl
index 27e9c3180..18622783c 100644
--- a/src/chttpd/src/chttpd_stats.erl
+++ b/src/chttpd/src/chttpd_stats.erl
@@ -12,6 +12,7 @@
-module(chttpd_stats).
+-include_lib("kernel/include/logger.hrl").
-export([
init/1,
@@ -61,14 +62,19 @@ report(HttpResp) ->
_ ->
ok
end
- catch T:R ->
- S = erlang:get_stacktrace(),
+ catch T:R:S ->
+ ?LOG_ERROR(#{
+ what => stats_report_failure,
+ tag => T,
+ details => R,
+ stacktrace => S
+ }),
Fmt = "Failed to report chttpd request stats: ~p:~p ~p",
couch_log:error(Fmt, [T, R, S])
end.
-report(HttpResp, #st{reporter = undefined}) ->
+report(_HttpResp, #st{reporter = undefined}) ->
ok;
report(HttpResp, #st{reporter = Reporter} = St) ->
diff --git a/src/chttpd/src/chttpd_sup.erl b/src/chttpd/src/chttpd_sup.erl
index 8b51e6c40..250690d51 100644
--- a/src/chttpd/src/chttpd_sup.erl
+++ b/src/chttpd/src/chttpd_sup.erl
@@ -22,6 +22,8 @@
-export([handle_config_change/5, handle_config_terminate/3]).
+-include_lib("kernel/include/logger.hrl").
+
%% Helper macro for declaring children of supervisor
-define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 100, Type, [I]}).
@@ -33,6 +35,7 @@ start_link() ->
supervisor:start_link({local,?MODULE}, ?MODULE, Arg).
init(disabled) ->
+ ?LOG_NOTICE(#{what => http_api_disabled}),
couch_log:notice("~p : api_frontend disabled", [?MODULE]),
{ok, {{one_for_one, 3, 10}, []}};
@@ -99,6 +102,13 @@ append_if_set({Key, Value}, Opts) when Value > 0 ->
append_if_set({_Key, 0}, Opts) ->
Opts;
append_if_set({Key, Value}, Opts) ->
+ ?LOG_ERROR(#{
+ what => invalid_config_setting,
+ section => chttpd_auth_cache,
+ key => Key,
+ value => Value,
+ details => "value must be a non-negative integer"
+ }),
couch_log:error(
"The value for `~s` should be string convertable "
"to integer which is >= 0 (got `~p`)", [Key, Value]),
diff --git a/src/chttpd/src/chttpd_util.erl b/src/chttpd/src/chttpd_util.erl
new file mode 100644
index 000000000..fcaa09de0
--- /dev/null
+++ b/src/chttpd/src/chttpd_util.erl
@@ -0,0 +1,41 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_util).
+
+
+-export([
+ parse_copy_destination_header/1
+]).
+
+
+parse_copy_destination_header(Req) ->
+ case couch_httpd:header_value(Req, "Destination") of
+ undefined ->
+ throw({bad_request, "Destination header is mandatory for COPY."});
+ Destination ->
+ case re:run(Destination, "^https?://", [{capture, none}]) of
+ match ->
+ throw({bad_request, "Destination URL must be relative."});
+ nomatch ->
+ % see if ?rev=revid got appended to the Destination header
+ case re:run(Destination, "\\?", [{capture, none}]) of
+ nomatch ->
+ {list_to_binary(Destination), {0, []}};
+ match ->
+ [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
+ [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
+ {Pos, RevId} = couch_doc:parse_rev(Rev),
+ {list_to_binary(DocId), {Pos, [RevId]}}
+ end
+ end
+ end.
diff --git a/src/chttpd/src/chttpd_view.erl b/src/chttpd/src/chttpd_view.erl
index 8d401013c..e0001da67 100644
--- a/src/chttpd/src/chttpd_view.erl
+++ b/src/chttpd/src/chttpd_view.erl
@@ -12,7 +12,7 @@
-module(chttpd_view).
-include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
-export([
handle_view_req/3,
@@ -35,10 +35,10 @@ multi_query_view(Req, Db, DDoc, ViewName, Queries) ->
stream_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
- {ok, #mrst{views=Views}} = couch_mrview_util:ddoc_to_mrst(Db, DDoc),
- Args1 = couch_mrview_util:set_view_type(Args0, ViewName, Views),
+ {ok, #mrst{views=Views}} = couch_views_util:ddoc_to_mrst(Db, DDoc),
+ Args1 = couch_views_util:set_view_type(Args0, ViewName, Views),
ArgQueries = parse_queries(Req, Args1, Queries, fun(QueryArg) ->
- couch_mrview_util:set_view_type(QueryArg, ViewName, Views)
+ couch_views_util:set_view_type(QueryArg, ViewName, Views)
end),
VAcc0 = #vacc{db=Db, req=Req, prepend="\r\n"},
FirstChunk = "{\"results\":[",
@@ -54,9 +54,9 @@ stream_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
paginate_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
- {ok, #mrst{views=Views}} = couch_mrview_util:ddoc_to_mrst(Db, DDoc),
+ {ok, #mrst{views=Views}} = couch_views_util:ddoc_to_mrst(Db, DDoc),
ArgQueries = parse_queries(Req, Args0, Queries, fun(QueryArg) ->
- couch_mrview_util:set_view_type(QueryArg, ViewName, Views)
+ couch_views_util:set_view_type(QueryArg, ViewName, Views)
end),
KeyFun = fun({Props}) ->
{couch_util:get_value(id, Props), couch_util:get_value(key, Props)}
@@ -76,7 +76,7 @@ paginate_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
design_doc_post_view(Req, Props, Db, DDoc, ViewName, Keys) ->
- Args = couch_mrview_http:parse_body_and_query(Req, Props, Keys),
+ Args = couch_views_http_util:parse_body_and_query(Req, Props, Keys),
fabric_query_view(Db, Req, DDoc, ViewName, Args).
design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
@@ -134,7 +134,7 @@ handle_view_req(#httpd{method='POST',
path_parts=[_, _, _, _, ViewName, <<"queries">>]}=Req, Db, DDoc) ->
chttpd:validate_ctype(Req, "application/json"),
Props = couch_httpd:json_body_obj(Req),
- case couch_mrview_util:get_view_queries(Props) of
+ case couch_views_util:get_view_queries(Props) of
undefined ->
throw({bad_request,
<<"POST body must include `queries` parameter.">>});
@@ -156,8 +156,8 @@ handle_view_req(#httpd{method='POST',
path_parts=[_, _, _, _, ViewName]}=Req, Db, DDoc) ->
chttpd:validate_ctype(Req, "application/json"),
Props = couch_httpd:json_body_obj(Req),
- assert_no_queries_param(couch_mrview_util:get_view_queries(Props)),
- Keys = couch_mrview_util:get_view_keys(Props),
+ assert_no_queries_param(couch_views_util:get_view_queries(Props)),
+ Keys = couch_views_util:get_view_keys(Props),
couch_stats:increment_counter([couchdb, httpd, view_reads]),
design_doc_post_view(Req, Props, Db, DDoc, ViewName, Keys);
@@ -299,7 +299,7 @@ t_check_user_can_override_individual_query_type() ->
setup_all() ->
Views = [#mrview{reduce_funs = [{<<"v">>, <<"_count">>}]}],
- meck:expect(couch_mrview_util, ddoc_to_mrst, 2, {ok, #mrst{views = Views}}),
+ meck:expect(couch_views_util, ddoc_to_mrst, 2, {ok, #mrst{views = Views}}),
meck:expect(chttpd, start_delayed_json_response, 4, {ok, resp}),
meck:expect(couch_views, query, 6, {ok, #vacc{}}),
meck:expect(chttpd, send_delayed_chunk, 2, {ok, resp}),
@@ -314,7 +314,7 @@ setup() ->
meck:reset([
chttpd,
couch_views,
- couch_mrview_util
+ couch_views_util
]).
diff --git a/src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl b/src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl
index 0ab08dd80..227b29c5b 100644
--- a/src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl
@@ -56,7 +56,8 @@ attachment_size_test_() ->
fun put_inline/1,
fun put_simple/1,
fun put_simple_chunked/1,
- fun put_mp_related/1
+ fun put_mp_related/1,
+ fun put_chunked_mp_related/1
]
}
}
@@ -111,6 +112,15 @@ put_mp_related(Url) ->
end).
+put_chunked_mp_related(Url) ->
+ ?_test(begin
+ Headers = [?CONTENT_MULTI_RELATED],
+ Body = mp_body(50),
+ Status = put_req_chunked(Url ++ "/doc4", Headers, Body),
+ ?assert(Status =:= 201 orelse Status =:= 202)
+ end).
+
+
% Helper functions
create_db(Url) ->
diff --git a/src/chttpd/test/eunit/chttpd_session_tests.erl b/src/chttpd/test/eunit/chttpd_session_tests.erl
index a802d9ec2..1e1fbf5e4 100644
--- a/src/chttpd/test/eunit/chttpd_session_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_session_tests.erl
@@ -44,7 +44,8 @@ session_test_() ->
fun cleanup/1,
[
?TDEF_FE(session_authentication_db_absent),
- ?TDEF_FE(session_authentication_db_present)
+ ?TDEF_FE(session_authentication_db_present),
+ ?TDEF_FE(session_authentication_gzip_request)
]
}
}
@@ -62,6 +63,15 @@ session_authentication_db_present(Url) ->
?assertEqual(list_to_binary(Name), session_authentication_db(Url)).
+session_authentication_gzip_request(Url) ->
+ {ok, 200, _, Body} = test_request:request(
+ post,
+ Url,
+ [{"Content-Type", "application/json"}, {"Content-Encoding", "gzip"}],
+ zlib:gzip(jiffy:encode({[{username, list_to_binary(?USER)}, {password, list_to_binary(?PASS)}]}))),
+ {BodyJson} = jiffy:decode(Body),
+ ?assert(lists:member({<<"name">>, list_to_binary(?USER)}, BodyJson)).
+
session_authentication_db(Url) ->
{ok, 200, _, Body} = test_request:get(Url, [{basic_auth, {?USER, ?PASS}}]),
couch_util:get_nested_json_value(
diff --git a/src/chttpd/test/exunit/pagination_test.exs b/src/chttpd/test/exunit/pagination_test.exs
index 6544017df..4e0a5d6fe 100644
--- a/src/chttpd/test/exunit/pagination_test.exs
+++ b/src/chttpd/test/exunit/pagination_test.exs
@@ -255,7 +255,7 @@ defmodule Couch.Test.Pagination do
@describetag descending: descending
setup [:with_session, :random_db, :with_docs]
- test "total_rows matches the length of rows array", ctx do
+ test "total_rows matches the number of docs", ctx do
resp =
Couch.Session.get(ctx.session, "/#{ctx.db_name}/_all_docs",
query: %{descending: ctx.descending}
@@ -263,7 +263,7 @@ defmodule Couch.Test.Pagination do
assert resp.status_code == 200, "got error #{inspect(resp.body)}"
body = resp.body
- assert body["total_rows"] == length(body["rows"])
+ assert body["total_rows"] == ctx.n_docs
end
test "the rows are correctly sorted", ctx do
@@ -371,7 +371,7 @@ defmodule Couch.Test.Pagination do
@describetag page_size: 4
setup [:with_session, :random_db, :with_view, :with_docs]
- test "total_rows matches the length of rows array", ctx do
+ test "total_rows matches the number of documents in view", ctx do
resp =
Couch.Session.get(
ctx.session,
@@ -381,7 +381,7 @@ defmodule Couch.Test.Pagination do
assert resp.status_code == 200, "got error #{inspect(resp.body)}"
body = resp.body
- assert body["total_rows"] == length(body["rows"])
+ assert body["total_rows"] == ctx.n_docs
end
end
@@ -593,14 +593,9 @@ defmodule Couch.Test.Pagination do
assert Map.has_key?(body, "next")
end
- test "total_rows matches the length of rows array", ctx do
+ test "total_rows matches the number of documents", ctx do
body = ctx.response
- assert body["total_rows"] == length(body["rows"])
- end
-
- test "total_rows matches the requested page_size", ctx do
- body = ctx.response
- assert body["total_rows"] == ctx.page_size
+ assert body["total_rows"] == ctx.n_docs
end
test "can use 'next' bookmark to get remaining results", ctx do
@@ -613,8 +608,8 @@ defmodule Couch.Test.Pagination do
assert resp.status_code == 200, "got error #{inspect(resp.body)}"
body = resp.body
- assert body["total_rows"] == length(body["rows"])
- assert body["total_rows"] <= ctx.page_size
+ assert body["total_rows"] == ctx.n_docs
+ assert length(body["rows"]) <= ctx.page_size
end
end
@@ -721,7 +716,7 @@ defmodule Couch.Test.Pagination do
test "final page doesn't include 'next' bookmark", ctx do
assert not Map.has_key?(ctx.response, "next")
- assert ctx.response["total_rows"] == rem(ctx.n_docs, ctx.page_size)
+ assert length(ctx.response["rows"]) == rem(ctx.n_docs, ctx.page_size)
end
test "each but last page has page_size rows", ctx do
@@ -768,14 +763,9 @@ defmodule Couch.Test.Pagination do
assert not Map.has_key?(body, "next")
end
- test "total_rows matches the length of rows array", ctx do
- body = ctx.response
- assert body["total_rows"] == length(body["rows"])
- end
-
- test "total_rows less than the requested page_size", ctx do
+ test "total_rows matches the number of documents", ctx do
body = ctx.response
- assert body["total_rows"] <= ctx.page_size
+ assert body["total_rows"] == ctx.n_docs
end
end
end
@@ -950,7 +940,7 @@ defmodule Couch.Test.Pagination do
assert not Map.has_key?(resp.body, "previous")
end
- test "total_rows matches the length of rows array", ctx do
+ test "total_rows matches the number of documents in view", ctx do
resp =
Couch.Session.get(
ctx.session,
@@ -960,19 +950,7 @@ defmodule Couch.Test.Pagination do
assert resp.status_code == 200, "got error #{inspect(resp.body)}"
body = resp.body
- assert body["total_rows"] == length(body["rows"])
- end
-
- test "total_rows matches the requested page_size", ctx do
- resp =
- Couch.Session.get(
- ctx.session,
- "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}",
- query: %{page_size: ctx.page_size, descending: ctx.descending}
- )
-
- assert resp.status_code == 200, "got error #{inspect(resp.body)}"
- assert resp.body["total_rows"] == ctx.page_size
+ assert body["total_rows"] == ctx.n_docs
end
test "can use 'next' bookmark to get remaining results", ctx do
@@ -994,8 +972,8 @@ defmodule Couch.Test.Pagination do
assert resp.status_code == 200, "got error #{inspect(resp.body)}"
body = resp.body
- assert body["total_rows"] == length(body["rows"])
- assert body["total_rows"] <= ctx.page_size
+ assert body["total_rows"] == ctx.n_docs
+ assert length(body["rows"]) <= ctx.page_size
end
test "can use 'previous' bookmark", ctx do
@@ -1065,7 +1043,7 @@ defmodule Couch.Test.Pagination do
assert resp.status_code == 200, "got error #{inspect(resp.body)}"
body = resp.body
- assert body["total_rows"] == length(body["rows"])
+ assert body["total_rows"] == ctx.n_docs
end
test "total_rows less than the requested page_size", ctx do
@@ -1077,7 +1055,7 @@ defmodule Couch.Test.Pagination do
)
assert resp.status_code == 200, "got error #{inspect(resp.body)}"
- assert resp.body["total_rows"] <= ctx.page_size
+ assert length(resp.body["rows"]) <= ctx.page_size
end
end
end
diff --git a/src/couch/include/couch_db.hrl b/src/couch/include/couch_db.hrl
index cc1fb5def..22890895a 100644
--- a/src/couch/include/couch_db.hrl
+++ b/src/couch/include/couch_db.hrl
@@ -13,18 +13,13 @@
-define(LOCAL_DOC_PREFIX, "_local/").
-define(DESIGN_DOC_PREFIX0, "_design").
-define(DESIGN_DOC_PREFIX, "_design/").
--define(DEFAULT_COMPRESSION, snappy).
-define(MIN_STR, <<"">>).
-define(MAX_STR, <<255>>). % illegal utf string
--define(REWRITE_COUNT, couch_rewrite_count).
-
-define(JSON_ENCODE(V), couch_util:json_encode(V)).
-define(JSON_DECODE(V), couch_util:json_decode(V)).
--define(IS_OLD_RECORD(V, R), (tuple_size(V) /= tuple_size(R))).
-
-define(b2l(V), binary_to_list(V)).
-define(l2b(V), list_to_binary(V)).
-define(i2b(V), couch_util:integer_to_boolean(V)).
@@ -39,7 +34,6 @@
-define(SYSTEM_DATABASES, [
<<"_dbs">>,
- <<"_global_changes">>,
<<"_metadata">>,
<<"_nodes">>,
<<"_replicator">>,
@@ -128,18 +122,6 @@
handler
}).
--record(view_fold_helper_funs, {
- reduce_count,
- passed_end,
- start_response,
- send_row
-}).
-
--record(reduce_fold_helper_funs, {
- start_response,
- send_row
-}).
-
-record(extern_resp_args, {
code = 200,
stop = false,
@@ -149,13 +131,6 @@
json = nil
}).
--record(index_header, {
- seq=0,
- purge_seq=0,
- id_btree_state=nil,
- view_states=nil
-}).
-
% small value used in revision trees to indicate the revision isn't stored
-define(REV_MISSING, []).
@@ -176,16 +151,6 @@
db_open_options = []
}).
--record(btree, {
- fd,
- root,
- extract_kv,
- assemble_kv,
- less,
- reduce = nil,
- compression = ?DEFAULT_COMPRESSION
-}).
-
-record(proc, {
pid,
lang,
@@ -204,15 +169,6 @@
atts = []
}).
--record (fabric_changes_acc, {
- db,
- seq,
- args,
- options,
- pending,
- epochs
-}).
-
-type doc() :: #doc{}.
-type ddoc() :: #doc{}.
-type user_ctx() :: #user_ctx{}.
diff --git a/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c b/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c
index ad3d0cdd6..49d6cd812 100644
--- a/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c
+++ b/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c
@@ -13,6 +13,7 @@
*/
#include <stdio.h>
+#include <string.h>
#include <assert.h>
#include "erl_nif.h"
#include "unicode/ucol.h"
@@ -65,6 +66,11 @@ static __inline int compare_lists(int, ctx_t*, ERL_NIF_TERM, ERL_NIF_TERM);
static __inline int compare_props(int, ctx_t*, ERL_NIF_TERM, ERL_NIF_TERM);
static __inline UCollator* get_collator();
+/* Should match the <<255,255,255,255>> in:
+ * - src/mango/src/mango_idx_view.hrl#L13
+ * - src/couch_mrview/src/couch_mrview_util.erl#L40 */
+static const unsigned char max_utf8_marker[] = {255, 255, 255, 255};
+
UCollator*
get_collator()
@@ -357,12 +363,46 @@ compare_props(int depth, ctx_t* ctx, ERL_NIF_TERM a, ERL_NIF_TERM b)
int
+is_max_utf8_marker(ErlNifBinary bin)
+{
+ if (bin.size == sizeof(max_utf8_marker)) {
+ if(memcmp(bin.data, max_utf8_marker, sizeof(max_utf8_marker)) == 0) {
+ return 1;
+ }
+ return 0;
+ }
+ return 0;
+}
+
+
+int
compare_strings(ctx_t* ctx, ErlNifBinary a, ErlNifBinary b)
{
UErrorCode status = U_ZERO_ERROR;
UCharIterator iterA, iterB;
int result;
+ /* libicu versions earlier than 59 (at least) don't consider the
+ * {255,255,255,255} to be the highest sortable string as CouchDB expects.
+ * While we are still shipping CentOS 7 packages with libicu 50, we should
+ * explicitly check for the marker, later on we can remove the max
+ * logic */
+
+ int a_is_max = is_max_utf8_marker(a);
+ int b_is_max = is_max_utf8_marker(b);
+
+ if(a_is_max && b_is_max) {
+ return 0;
+ }
+
+ if(a_is_max) {
+ return 1;
+ }
+
+ if(b_is_max) {
+ return -1;
+ }
+
uiter_setUTF8(&iterA, (const char *) a.data, (uint32_t) a.size);
uiter_setUTF8(&iterB, (const char *) b.data, (uint32_t) b.size);
diff --git a/src/couch/priv/couch_js/86/help.h b/src/couch/priv/couch_js/86/help.h
new file mode 100644
index 000000000..6a23172af
--- /dev/null
+++ b/src/couch/priv/couch_js/86/help.h
@@ -0,0 +1,79 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#ifndef COUCHJS_HELP_H
+#define COUCHJS_HELP_H
+
+#include "config.h"
+
+static const char VERSION_TEMPLATE[] =
+ "%s - %s (SpiderMonkey 86)\n"
+ "\n"
+ "Licensed under the Apache License, Version 2.0 (the \"License\"); you may "
+ "not use\n"
+ "this file except in compliance with the License. You may obtain a copy of"
+ "the\n"
+ "License at\n"
+ "\n"
+ " http://www.apache.org/licenses/LICENSE-2.0\n"
+ "\n"
+ "Unless required by applicable law or agreed to in writing, software "
+ "distributed\n"
+ "under the License is distributed on an \"AS IS\" BASIS, WITHOUT "
+ "WARRANTIES OR\n"
+ "CONDITIONS OF ANY KIND, either express or implied. See the License "
+ "for the\n"
+ "specific language governing permissions and limitations under the "
+ "License.\n";
+
+static const char USAGE_TEMPLATE[] =
+ "Usage: %s [FILE]\n"
+ "\n"
+ "The %s command runs the %s JavaScript interpreter.\n"
+ "\n"
+ "The exit status is 0 for success or 1 for failure.\n"
+ "\n"
+ "Options:\n"
+ "\n"
+ " -h display a short help message and exit\n"
+ " -V display version information and exit\n"
+ " -S SIZE specify that the runtime should allow at\n"
+ " most SIZE bytes of memory to be allocated\n"
+ " default is 64 MiB\n"
+ " --eval Enable runtime code evaluation (dangerous!)\n"
+ "\n"
+ "Report bugs at <%s>.\n";
+
+#define BASENAME COUCHJS_NAME
+
+#define couch_version(basename) \
+ fprintf( \
+ stdout, \
+ VERSION_TEMPLATE, \
+ basename, \
+ PACKAGE_STRING)
+
+#define DISPLAY_VERSION couch_version(BASENAME)
+
+
+#define couch_usage(basename) \
+ fprintf( \
+ stdout, \
+ USAGE_TEMPLATE, \
+ basename, \
+ basename, \
+ PACKAGE_NAME, \
+ PACKAGE_BUGREPORT)
+
+#define DISPLAY_USAGE couch_usage(BASENAME)
+
+#endif // Included help.h
diff --git a/src/couch/priv/couch_js/86/main.cpp b/src/couch/priv/couch_js/86/main.cpp
new file mode 100644
index 000000000..291409944
--- /dev/null
+++ b/src/couch/priv/couch_js/86/main.cpp
@@ -0,0 +1,341 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#ifdef XP_WIN
+#define NOMINMAX
+#include <windows.h>
+#else
+#include <unistd.h>
+#endif
+
+#include <jsapi.h>
+#include <js/CompilationAndEvaluation.h>
+#include <js/Conversions.h>
+#include <js/Initialization.h>
+#include <js/SourceText.h>
+#include <js/StableStringChars.h>
+#include <js/Warnings.h>
+#include <js/Wrapper.h>
+
+#include "config.h"
+#include "util.h"
+
+static bool enableSharedMemory = true;
+static bool enableToSource = true;
+
+static JSClassOps global_ops = {
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ JS_GlobalObjectTraceHook
+};
+
+/* The class of the global object. */
+static JSClass global_class = {
+ "global",
+ JSCLASS_GLOBAL_FLAGS,
+ &global_ops
+};
+
+static JSObject*
+NewSandbox(JSContext* cx, bool lazy)
+{
+ JS::RealmOptions options;
+ options.creationOptions().setSharedMemoryAndAtomicsEnabled(enableSharedMemory);
+ options.creationOptions().setNewCompartmentAndZone();
+ // we need this in the query server error handling
+ options.creationOptions().setToSourceEnabled(enableToSource);
+ JS::RootedObject obj(cx, JS_NewGlobalObject(cx, &global_class, nullptr,
+ JS::DontFireOnNewGlobalHook, options));
+ if (!obj)
+ return nullptr;
+
+ {
+ JSAutoRealm ac(cx, obj);
+ if (!lazy && !JS::InitRealmStandardClasses(cx))
+ return nullptr;
+
+ JS::RootedValue value(cx, JS::BooleanValue(lazy));
+ if (!JS_DefineProperty(cx, obj, "lazy", value, JSPROP_PERMANENT | JSPROP_READONLY))
+ return nullptr;
+
+ JS_FireOnNewGlobalObject(cx, obj);
+ }
+
+ if (!JS_WrapObject(cx, &obj))
+ return nullptr;
+ return obj;
+}
+
+static bool
+evalcx(JSContext *cx, unsigned int argc, JS::Value* vp)
+{
+ JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+ bool ret = false;
+
+ JS::RootedString str(cx, args[0].toString());
+ if (!str)
+ return false;
+
+ JS::RootedObject sandbox(cx);
+ if (args.hasDefined(1)) {
+ sandbox = JS::ToObject(cx, args[1]);
+ if (!sandbox)
+ return false;
+ }
+
+ if (!sandbox) {
+ sandbox = NewSandbox(cx, false);
+ if (!sandbox)
+ return false;
+ }
+
+ JS::AutoStableStringChars strChars(cx);
+ if (!strChars.initTwoByte(cx, str))
+ return false;
+
+ mozilla::Range<const char16_t> chars = strChars.twoByteRange();
+ JS::SourceText<char16_t> srcBuf;
+ if (!srcBuf.init(cx, chars.begin().get(), chars.length(),
+ JS::SourceOwnership::Borrowed)) {
+ return false;
+ }
+
+ if(srcBuf.length() == 0) {
+ args.rval().setObject(*sandbox);
+ } else {
+ mozilla::Maybe<JSAutoRealm> ar;
+ unsigned flags;
+ JSObject* unwrapped = UncheckedUnwrap(sandbox, true, &flags);
+ if (flags & js::Wrapper::CROSS_COMPARTMENT) {
+ sandbox = unwrapped;
+ ar.emplace(cx, sandbox);
+ }
+
+ JS::CompileOptions opts(cx);
+ JS::RootedValue rval(cx);
+ opts.setFileAndLine("<unknown>", 1);
+
+ if (!JS::Evaluate(cx, opts, srcBuf, args.rval())) {
+ return false;
+ }
+ }
+ ret = true;
+ if (!JS_WrapValue(cx, args.rval()))
+ return false;
+
+ return ret;
+}
+
+
+static bool
+gc(JSContext* cx, unsigned int argc, JS::Value* vp)
+{
+ JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+ JS_GC(cx);
+ args.rval().setUndefined();
+ return true;
+}
+
+
+static bool
+print(JSContext* cx, unsigned int argc, JS::Value* vp)
+{
+ JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+
+ bool use_stderr = false;
+ if(argc > 1 && args[1].isTrue()) {
+ use_stderr = true;
+ }
+
+ if(!args[0].isString()) {
+ JS_ReportErrorUTF8(cx, "Unable to print non-string value.");
+ return false;
+ }
+
+ couch_print(cx, args[0], use_stderr);
+
+ args.rval().setUndefined();
+ return true;
+}
+
+
+static bool
+quit(JSContext* cx, unsigned int argc, JS::Value* vp)
+{
+ JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+
+ int exit_code = args[0].toInt32();;
+ exit(exit_code);
+}
+
+
+static bool
+readline(JSContext* cx, unsigned int argc, JS::Value* vp)
+{
+ JSString* line;
+ JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+
+ /* GC Occasionally */
+ JS_MaybeGC(cx);
+
+ line = couch_readline(cx, stdin);
+ if(line == NULL) return false;
+
+ // return with JSString* instead of JSValue in the past
+ args.rval().setString(line);
+ return true;
+}
+
+
+static bool
+seal(JSContext* cx, unsigned int argc, JS::Value* vp)
+{
+ JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+ JS::RootedObject target(cx);
+ target = JS::ToObject(cx, args[0]);
+ if (!target) {
+ args.rval().setUndefined();
+ return true;
+ }
+ bool deep = false;
+ deep = args[1].toBoolean();
+ bool ret = deep ? JS_DeepFreezeObject(cx, target) : JS_FreezeObject(cx, target);
+ args.rval().setUndefined();
+ return ret;
+}
+
+
+static JSFunctionSpec global_functions[] = {
+ JS_FN("evalcx", evalcx, 0, 0),
+ JS_FN("gc", gc, 0, 0),
+ JS_FN("print", print, 0, 0),
+ JS_FN("quit", quit, 0, 0),
+ JS_FN("readline", readline, 0, 0),
+ JS_FN("seal", seal, 0, 0),
+ JS_FS_END
+};
+
+
+static bool
+csp_allows(JSContext* cx, JS::HandleString code)
+{
+ couch_args* args = static_cast<couch_args*>(JS_GetContextPrivate(cx));
+ if(args->eval) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
+static JSSecurityCallbacks security_callbacks = {
+ csp_allows,
+ nullptr
+};
+
+
+int
+main(int argc, const char* argv[])
+{
+ JSContext* cx = NULL;
+ int i;
+
+ couch_args* args = couch_parse_args(argc, argv);
+
+ JS_Init();
+ cx = JS_NewContext(args->stack_size);
+ if(cx == NULL)
+ return 1;
+
+ JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_BASELINE_ENABLE, 0);
+ JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_ION_ENABLE, 0);
+
+ if (!JS::InitSelfHostedCode(cx))
+ return 1;
+
+ JS::SetWarningReporter(cx, couch_error);
+ JS::SetOutOfMemoryCallback(cx, couch_oom, NULL);
+ JS_SetContextPrivate(cx, args);
+ JS_SetSecurityCallbacks(cx, &security_callbacks);
+
+ JS::RealmOptions options;
+ JS::RootedObject global(cx, JS_NewGlobalObject(cx, &global_class, nullptr,
+ JS::FireOnNewGlobalHook, options));
+ if (!global)
+ return 1;
+
+ JSAutoRealm ar(cx, global);
+
+ if(!JS::InitRealmStandardClasses(cx))
+ return 1;
+
+ if(couch_load_funcs(cx, global, global_functions) != true)
+ return 1;
+
+ for(i = 0 ; args->scripts[i] ; i++) {
+ const char* filename = args->scripts[i];
+
+ // Compile and run
+ JS::CompileOptions options(cx);
+ options.setFileAndLine(filename, 1);
+ JS::RootedScript script(cx);
+ FILE* fp;
+
+ fp = fopen(args->scripts[i], "r");
+ if(fp == NULL) {
+ fprintf(stderr, "Failed to read file: %s\n", filename);
+ return 3;
+ }
+ script = JS::CompileUtf8File(cx, options, fp);
+ fclose(fp);
+ if (!script) {
+ JS::RootedValue exc(cx);
+ if(!JS_GetPendingException(cx, &exc)) {
+ fprintf(stderr, "Failed to compile file: %s\n", filename);
+ } else {
+ JS::RootedObject exc_obj(cx, &exc.toObject());
+ JSErrorReport* report = JS_ErrorFromException(cx, exc_obj);
+ couch_error(cx, report);
+ }
+ return 1;
+ }
+
+ JS::RootedValue result(cx);
+ if(JS_ExecuteScript(cx, script, &result) != true) {
+ JS::RootedValue exc(cx);
+ if(!JS_GetPendingException(cx, &exc)) {
+ fprintf(stderr, "Failed to execute script.\n");
+ } else {
+ JS::RootedObject exc_obj(cx, &exc.toObject());
+ JSErrorReport* report = JS_ErrorFromException(cx, exc_obj);
+ couch_error(cx, report);
+ }
+ }
+
+ // Give the GC a chance to run.
+ JS_MaybeGC(cx);
+ }
+
+ return 0;
+}
diff --git a/src/couch/priv/couch_js/86/util.cpp b/src/couch/priv/couch_js/86/util.cpp
new file mode 100644
index 000000000..cd120a03f
--- /dev/null
+++ b/src/couch/priv/couch_js/86/util.cpp
@@ -0,0 +1,348 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <sstream>
+
+#include <jsapi.h>
+#include <jsfriendapi.h>
+#include <js/CharacterEncoding.h>
+#include <js/Conversions.h>
+#include <js/Initialization.h>
+#include <js/MemoryFunctions.h>
+#include <js/RegExp.h>
+
+#include "help.h"
+#include "util.h"
+
+std::string
+js_to_string(JSContext* cx, JS::HandleValue val)
+{
+ JS::AutoSaveExceptionState exc_state(cx);
+ JS::RootedString sval(cx);
+ sval = val.toString();
+
+ JS::UniqueChars chars(JS_EncodeStringToUTF8(cx, sval));
+ if(!chars) {
+ JS_ClearPendingException(cx);
+ return std::string();
+ }
+
+ return chars.get();
+}
+
+bool
+js_to_string(JSContext* cx, JS::HandleValue val, std::string& str)
+{
+ if(!val.isString()) {
+ return false;
+ }
+
+ if(JS_GetStringLength(val.toString()) == 0) {
+ str = "";
+ return true;
+ }
+
+ std::string conv = js_to_string(cx, val);
+ if(!conv.size()) {
+ return false;
+ }
+
+ str = conv;
+ return true;
+}
+
+JSString*
+string_to_js(JSContext* cx, const std::string& raw)
+{
+ JS::UTF8Chars utf8(raw.c_str(), raw.size());
+ JS::UniqueTwoByteChars utf16;
+ size_t len;
+
+ utf16.reset(JS::UTF8CharsToNewTwoByteCharsZ(cx, utf8, &len, js::MallocArena).get());
+ if(!utf16) {
+ return nullptr;
+ }
+
+ return JS_NewUCString(cx, std::move(utf16), len);
+}
+
+size_t
+couch_readfile(const char* file, char** outbuf_p)
+{
+ FILE* fp;
+ char fbuf[16384];
+ char *buf = NULL;
+ char* tmp;
+ size_t nread = 0;
+ size_t buflen = 0;
+
+ if(strcmp(file, "-") == 0) {
+ fp = stdin;
+ } else {
+ fp = fopen(file, "r");
+ if(fp == NULL) {
+ fprintf(stderr, "Failed to read file: %s\n", file);
+ exit(3);
+ }
+ }
+
+ while((nread = fread(fbuf, 1, 16384, fp)) > 0) {
+ if(buf == NULL) {
+ buf = new char[nread + 1];
+ if(buf == NULL) {
+ fprintf(stderr, "Out of memory.\n");
+ exit(3);
+ }
+ memcpy(buf, fbuf, nread);
+ } else {
+ tmp = new char[buflen + nread + 1];
+ if(tmp == NULL) {
+ fprintf(stderr, "Out of memory.\n");
+ exit(3);
+ }
+ memcpy(tmp, buf, buflen);
+ memcpy(tmp+buflen, fbuf, nread);
+ delete buf;
+ buf = tmp;
+ }
+ buflen += nread;
+ buf[buflen] = '\0';
+ }
+ *outbuf_p = buf;
+ return buflen ;
+}
+
+couch_args*
+couch_parse_args(int argc, const char* argv[])
+{
+ couch_args* args;
+ int i = 1;
+
+ args = new couch_args();
+ if(args == NULL)
+ return NULL;
+
+ args->eval = 0;
+ args->stack_size = 64L * 1024L * 1024L;
+ args->scripts = nullptr;
+
+ while(i < argc) {
+ if(strcmp("-h", argv[i]) == 0) {
+ DISPLAY_USAGE;
+ exit(0);
+ } else if(strcmp("-V", argv[i]) == 0) {
+ DISPLAY_VERSION;
+ exit(0);
+ } else if(strcmp("-S", argv[i]) == 0) {
+ args->stack_size = atoi(argv[++i]);
+ if(args->stack_size <= 0) {
+ fprintf(stderr, "Invalid stack size.\n");
+ exit(2);
+ }
+ } else if(strcmp("--eval", argv[i]) == 0) {
+ args->eval = 1;
+ } else if(strcmp("--", argv[i]) == 0) {
+ i++;
+ break;
+ } else {
+ break;
+ }
+ i++;
+ }
+
+ if(i >= argc) {
+ DISPLAY_USAGE;
+ exit(3);
+ }
+ args->scripts = argv + i;
+
+ return args;
+}
+
+
+int
+couch_fgets(char* buf, int size, FILE* fp)
+{
+ int n, i, c;
+
+ if(size <= 0) return -1;
+ n = size - 1;
+
+ for(i = 0; i < n && (c = getc(fp)) != EOF; i++) {
+ buf[i] = c;
+ if(c == '\n') {
+ i++;
+ break;
+ }
+ }
+
+ buf[i] = '\0';
+ return i;
+}
+
+
+JSString*
+couch_readline(JSContext* cx, FILE* fp)
+{
+ JSString* str;
+ char* bytes = NULL;
+ char* tmp = NULL;
+ size_t used = 0;
+ size_t byteslen = 256;
+ size_t oldbyteslen = 256;
+ size_t readlen = 0;
+
+ bytes = static_cast<char*>(JS_malloc(cx, byteslen));
+ if(bytes == NULL) return NULL;
+
+ while((readlen = couch_fgets(bytes+used, byteslen-used, fp)) > 0) {
+ used += readlen;
+
+ if(bytes[used-1] == '\n') {
+ bytes[used-1] = '\0';
+ break;
+ }
+
+ // Double our buffer and read more.
+ oldbyteslen = byteslen;
+ byteslen *= 2;
+ tmp = static_cast<char*>(JS_realloc(cx, bytes, oldbyteslen, byteslen));
+ if(!tmp) {
+ JS_free(cx, bytes);
+ return NULL;
+ }
+
+ bytes = tmp;
+ }
+
+ // Treat empty strings specially
+ if(used == 0) {
+ JS_free(cx, bytes);
+ return JS_NewStringCopyZ(cx, nullptr);
+ }
+
+ // Shrink the buffer to the actual data size
+ tmp = static_cast<char*>(JS_realloc(cx, bytes, byteslen, used));
+ if(!tmp) {
+ JS_free(cx, bytes);
+ return NULL;
+ }
+ bytes = tmp;
+ byteslen = used;
+
+ str = string_to_js(cx, std::string(tmp));
+ JS_free(cx, bytes);
+ return str;
+}
+
+
+void
+couch_print(JSContext* cx, JS::HandleValue obj, bool use_stderr)
+{
+ FILE *stream = stdout;
+
+ if (use_stderr) {
+ stream = stderr;
+ }
+ std::string val = js_to_string(cx, obj);
+ fprintf(stream, "%s\n", val.c_str());
+ fflush(stream);
+}
+
+
+void
+couch_error(JSContext* cx, JSErrorReport* report)
+{
+ if(!report) {
+ return;
+ }
+
+ if(report->isWarning()) {
+ return;
+ }
+
+ std::ostringstream msg;
+ msg << "error: " << report->message().c_str();
+
+ mozilla::Maybe<JSAutoRealm> ar;
+ JS::RootedValue exc(cx);
+ JS::RootedObject exc_obj(cx);
+ JS::RootedObject stack_obj(cx);
+ JS::RootedString stack_str(cx);
+ JS::RootedValue stack_val(cx);
+ JSPrincipals* principals = GetRealmPrincipals(js::GetContextRealm(cx));
+
+ if(!JS_GetPendingException(cx, &exc)) {
+ goto done;
+ }
+
+ // Clear the exception before an JS method calls or the result is
+ // infinite, recursive error report generation.
+ JS_ClearPendingException(cx);
+
+ exc_obj.set(exc.toObjectOrNull());
+ stack_obj.set(JS::ExceptionStackOrNull(exc_obj));
+
+ if(!stack_obj) {
+ // Compilation errors don't have a stack
+
+ msg << " at ";
+
+ if(report->filename) {
+ msg << report->filename;
+ } else {
+ msg << "<unknown>";
+ }
+
+ if(report->lineno) {
+ msg << ':' << report->lineno << ':' << report->column;
+ }
+
+ goto done;
+ }
+
+ if(!JS::BuildStackString(cx, principals, stack_obj, &stack_str, 2)) {
+ goto done;
+ }
+
+ stack_val.set(JS::StringValue(stack_str));
+ msg << std::endl << std::endl << js_to_string(cx, stack_val).c_str();
+
+done:
+ msg << std::endl;
+ fprintf(stderr, "%s", msg.str().c_str());
+}
+
+
+void
+couch_oom(JSContext* cx, void* data)
+{
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+}
+
+
+bool
+couch_load_funcs(JSContext* cx, JS::HandleObject obj, JSFunctionSpec* funcs)
+{
+ JSFunctionSpec* f;
+ for(f = funcs; f->name; f++) {
+ if(!JS_DefineFunction(cx, obj, f->name.string(), f->call.op, f->nargs, f->flags)) {
+ fprintf(stderr, "Failed to create function: %s\n", f->name.string());
+ return false;
+ }
+ }
+ return true;
+}
diff --git a/src/couch/priv/couch_js/86/util.h b/src/couch/priv/couch_js/86/util.h
new file mode 100644
index 000000000..bd7843eb9
--- /dev/null
+++ b/src/couch/priv/couch_js/86/util.h
@@ -0,0 +1,41 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#ifndef COUCHJS_UTIL_H
+#define COUCHJS_UTIL_H
+
+#include <jsapi.h>
+
+typedef struct {
+ int eval;
+ int use_http;
+ int use_test_funs;
+ int stack_size;
+ const char** scripts;
+ const char* uri_file;
+ JSString* uri;
+} couch_args;
+
+std::string js_to_string(JSContext* cx, JS::HandleValue val);
+bool js_to_string(JSContext* cx, JS::HandleValue val, std::string& str);
+JSString* string_to_js(JSContext* cx, const std::string& s);
+
+couch_args* couch_parse_args(int argc, const char* argv[]);
+int couch_fgets(char* buf, int size, FILE* fp);
+JSString* couch_readline(JSContext* cx, FILE* fp);
+size_t couch_readfile(const char* file, char** outbuf_p);
+void couch_print(JSContext* cx, JS::HandleValue str, bool use_stderr);
+void couch_error(JSContext* cx, JSErrorReport* report);
+void couch_oom(JSContext* cx, void* data);
+bool couch_load_funcs(JSContext* cx, JS::HandleObject obj, JSFunctionSpec* funcs);
+
+#endif // Included util.h
diff --git a/src/couch/rebar.config.script b/src/couch/rebar.config.script
index 59bd40fbb..0ed2d4bc6 100644
--- a/src/couch/rebar.config.script
+++ b/src/couch/rebar.config.script
@@ -41,7 +41,7 @@ end.
GitSha = case os:getenv("COUCHDB_GIT_SHA") of
false ->
- ""; % release builds won't get a fallback
+ ""; % release builds won\'t get a fallback
GitSha0 ->
string:strip(GitSha0, right)
end.
@@ -61,6 +61,10 @@ SMVsn = case lists:keyfind(spidermonkey_version, 1, CouchConfig) of
"60";
{_, "68"} ->
"68";
+ {_, "78"} ->
+ "78";
+ {_, "86"} ->
+ "86";
undefined ->
"1.8.5";
{_, Unsupported} ->
@@ -82,7 +86,12 @@ ConfigH = [
{"PACKAGE_VERSION", "\"" ++ Version ++ "\""}
].
-CouchJSConfig = "priv/couch_js/" ++ SMVsn ++ "/config.h".
+CouchJSConfig = case SMVsn of
+ "78" ->
+ "priv/couch_js/86/config.h";
+ _ ->
+ "priv/couch_js/" ++ SMVsn ++ "/config.h"
+end.
ConfigSrc = [["#define ", K, " ", V, $\n] || {K, V} <- ConfigH].
ConfigBin = iolist_to_binary(ConfigSrc).
ok = CopyIfDifferent(CouchJSConfig, ConfigBin).
@@ -129,13 +138,25 @@ end.
{
"-DXP_UNIX -I/usr/include/mozjs-68 -I/usr/local/include/mozjs-68 -std=c++14 -Wno-invalid-offsetof",
"-L/usr/local/lib -std=c++14 -lmozjs-68 -lm -lstdc++"
+ };
+ {unix, _} when SMVsn == "78" ->
+ {
+ "-DXP_UNIX -I/usr/include/mozjs-78 -I/usr/local/include/mozjs-78 -std=c++20 -Wno-invalid-offsetof",
+ "-L/usr/local/lib -std=c++20 -lmozjs-78 -lm"
+ };
+ {unix, _} when SMVsn == "86" ->
+ {
+ "-DXP_UNIX -I/usr/include/mozjs-86 -I/usr/local/include/mozjs-86 -I/opt/homebrew/include/mozjs-86/ -std=c++17 -Wno-invalid-offsetof",
+ "-L/usr/local/lib -L /opt/homebrew/lib/ -std=c++17 -lmozjs-86 -lm"
}
end.
CouchJSSrc = case SMVsn of
"1.8.5" -> ["priv/couch_js/1.8.5/*.c"];
"60" -> ["priv/couch_js/60/*.cpp"];
- "68" -> ["priv/couch_js/68/*.cpp"]
+ "68" -> ["priv/couch_js/68/*.cpp"];
+ "78" -> ["priv/couch_js/86/*.cpp"];
+ "86" -> ["priv/couch_js/86/*.cpp"]
end.
CouchJSEnv = case SMVsn of
@@ -155,8 +176,8 @@ IcuPath = "priv/couch_icu_driver.so".
IcuSrc = ["priv/icu_driver/*.c"].
IcuEnv = [{"DRV_CFLAGS", "$DRV_CFLAGS -DPIC -O2 -fno-common"},
{"DRV_LDFLAGS", "$DRV_LDFLAGS -lm -licuuc -licudata -licui18n -lpthread"}].
-IcuDarwinEnv = [{"CFLAGS", "-DXP_UNIX -I/usr/local/opt/icu4c/include"},
- {"LDFLAGS", "-L/usr/local/opt/icu4c/lib"}].
+IcuDarwinEnv = [{"CFLAGS", "-DXP_UNIX -I/usr/local/opt/icu4c/include -I/opt/homebrew/opt/icu4c/include"},
+ {"LDFLAGS", "-L/usr/local/opt/icu4c/lib -L/opt/homebrew/opt/icu4c/lib"}].
IcuBsdEnv = [{"CFLAGS", "-DXP_UNIX -I/usr/local/include"},
{"LDFLAGS", "-L/usr/local/lib"}].
IcuWinEnv = [{"CFLAGS", "$DRV_CFLAGS /DXP_WIN"},
diff --git a/src/couch/src/couch.app.src b/src/couch/src/couch.app.src
index 6116c79ba..af277c161 100644
--- a/src/couch/src/couch.app.src
+++ b/src/couch/src/couch.app.src
@@ -14,15 +14,12 @@
{description, "Apache CouchDB"},
{vsn, git},
{registered, [
- couch_db_update,
- couch_db_update_notifier_sup,
couch_httpd,
couch_primary_services,
couch_proc_manager,
couch_secondary_services,
couch_server,
- couch_sup,
- couch_task_status
+ couch_sup
]},
{mod, {couch_app, []}},
{applications, [
@@ -42,43 +39,8 @@
couch_epi,
b64url,
couch_log,
- couch_event,
- ioq,
couch_stats,
- hyper
- ]},
- {env, [
- { httpd_global_handlers, [
- {"/", "{couch_httpd_misc_handlers, handle_welcome_req, <<\"Welcome\">>}"},
- {"favicon.ico", "{couch_httpd_misc_handlers, handle_favicon_req, \"{{prefix}}/share/www\"}"},
- {"_utils", "{couch_httpd_misc_handlers, handle_utils_dir_req, \"{{prefix}}/share/www\"}"},
- {"_all_dbs", "{couch_httpd_misc_handlers, handle_all_dbs_req}"},
- {"_active_tasks", "{couch_httpd_misc_handlers, handle_task_status_req}"},
- {"_config", "{couch_httpd_misc_handlers, handle_config_req}"},
- {"_replicate", "{couch_replicator_httpd, handle_req}"},
- {"_uuids", "{couch_httpd_misc_handlers, handle_uuids_req}"},
- {"_stats", "{couch_stats_httpd, handle_stats_req}"},
- {"_session", "{couch_httpd_auth, handle_session_req}"},
- {"_plugins", "{couch_plugins_httpd, handle_req}"}
- ]},
- { httpd_db_handlers, [
- {"_all_docs", "{couch_mrview_http, handle_all_docs_req}"},
- {"_local_docs", "{couch_mrview_http, handle_local_docs_req}"},
- {"_design_docs", "{couch_mrview_http, handle_design_docs_req}"},
- {"_changes", "{couch_httpd_db, handle_db_changes_req}"},
- {"_compact", "{couch_httpd_db, handle_compact_req}"},
- {"_design", "{couch_httpd_db, handle_design_req}"},
- {"_temp_view", "{couch_mrview_http, handle_temp_view_req}"},
- {"_view_cleanup", "{couch_mrview_http, handle_cleanup_req}"}
- ]},
- { httpd_design_handlers, [
- {"_compact", "{couch_mrview_http, handle_compact_req}"},
- {"_info", "{couch_mrview_http, handle_info_req}"},
- {"_list", "{couch_mrview_show, handle_view_list_req}"},
- {"_rewrite", "{couch_httpd_rewrite, handle_rewrite_req}"},
- {"_show", "{couch_mrview_show, handle_doc_show_req}"},
- {"_update", "{couch_mrview_show, handle_doc_update_req}"},
- {"_view", "{couch_mrview_http, handle_view_req}"}
- ]}
+ hyper,
+ couch_prometheus
]}
]}.
diff --git a/src/couch/src/couch_att.erl b/src/couch/src/couch_att.erl
index b4c95e933..9009b5226 100644
--- a/src/couch/src/couch_att.erl
+++ b/src/couch/src/couch_att.erl
@@ -690,192 +690,3 @@ validate_attachment_size(AttName, AttSize, MaxAttSize)
throw({request_entity_too_large, {attachment, AttName}});
validate_attachment_size(_AttName, _AttSize, _MAxAttSize) ->
ok.
-
-
-%% -ifdef(TEST).
-%% -include_lib("eunit/include/eunit.hrl").
-%%
-%% % Eww...
-%% -include("couch_bt_engine.hrl").
-%%
-%% %% Test utilities
-%%
-%%
-%% empty_att() -> new().
-%%
-%%
-%% upgraded_empty_att() ->
-%% new([{headers, undefined}]).
-%%
-%%
-%% %% Test groups
-%%
-%%
-%% attachment_upgrade_test_() ->
-%% {"Lazy record upgrade tests", [
-%% {"Existing record fields don't upgrade",
-%% {with, empty_att(), [fun test_non_upgrading_fields/1]}
-%% },
-%% {"New fields upgrade",
-%% {with, empty_att(), [fun test_upgrading_fields/1]}
-%% }
-%% ]}.
-%%
-%%
-%% attachment_defaults_test_() ->
-%% {"Attachment defaults tests", [
-%% {"Records retain old default values", [
-%% {with, empty_att(), [fun test_legacy_defaults/1]}
-%% ]},
-%% {"Upgraded records inherit defaults", [
-%% {with, upgraded_empty_att(), [fun test_legacy_defaults/1]}
-%% ]},
-%% {"Undefined entries are elided on upgrade", [
-%% {with, upgraded_empty_att(), [fun test_elided_entries/1]}
-%% ]}
-%% ]}.
-%%
-%% attachment_field_api_test_() ->
-%% {"Basic attachment field api", [
-%% fun test_construction/0,
-%% fun test_store_and_fetch/0,
-%% fun test_transform/0
-%% ]}.
-%%
-%%
-%% attachment_disk_term_test_() ->
-%% BaseAttachment = new([
-%% {name, <<"empty">>},
-%% {type, <<"application/octet-stream">>},
-%% {att_len, 0},
-%% {disk_len, 0},
-%% {md5, <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>},
-%% {revpos, 4},
-%% {data, {stream, {couch_bt_engine_stream, {fake_fd, fake_sp}}}},
-%% {encoding, identity}
-%% ]),
-%% BaseDiskTerm = {
-%% <<"empty">>,
-%% <<"application/octet-stream">>,
-%% fake_sp,
-%% 0, 0, 4,
-%% <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>,
-%% identity
-%% },
-%% Headers = [{<<"X-Foo">>, <<"bar">>}],
-%% ExtendedAttachment = store(headers, Headers, BaseAttachment),
-%% ExtendedDiskTerm = {BaseDiskTerm, [{headers, Headers}]},
-%% FakeDb = test_util:fake_db([{engine, {couch_bt_engine, #st{fd=fake_fd}}}]),
-%% {"Disk term tests", [
-%% ?_assertEqual(BaseDiskTerm, to_disk_term(BaseAttachment)),
-%% ?_assertEqual(BaseAttachment, from_disk_term(FakeDb, BaseDiskTerm)),
-%% ?_assertEqual(ExtendedDiskTerm, to_disk_term(ExtendedAttachment)),
-%% ?_assertEqual(ExtendedAttachment, from_disk_term(FakeDb, ExtendedDiskTerm))
-%% ]}.
-%%
-%%
-%% attachment_json_term_test_() ->
-%% Props = [
-%% {<<"content_type">>, <<"application/json">>},
-%% {<<"digest">>, <<"md5-QCNtWUNXV0UzJnEjMk92YUk1JA==">>},
-%% {<<"length">>, 14},
-%% {<<"revpos">>, 1}
-%% ],
-%% PropsInline = [{<<"data">>, <<"eyJhbnN3ZXIiOiA0Mn0=">>}] ++ Props,
-%% InvalidProps = [{<<"data">>, <<"!Base64Encoded$">>}] ++ Props,
-%% Att = couch_att:new([
-%% {name, <<"attachment.json">>},
-%% {type, <<"application/json">>}
-%% ]),
-%% ResultStub = couch_att:new([
-%% {name, <<"attachment.json">>},
-%% {type, <<"application/json">>},
-%% {att_len, 14},
-%% {disk_len, 14},
-%% {md5, <<"@#mYCWWE3&q#2OvaI5$">>},
-%% {revpos, 1},
-%% {data, stub},
-%% {encoding, identity}
-%% ]),
-%% ResultFollows = ResultStub#att{data = follows},
-%% ResultInline = ResultStub#att{md5 = <<>>, data = <<"{\"answer\": 42}">>},
-%% {"JSON term tests", [
-%% ?_assertEqual(ResultStub, stub_from_json(Att, Props)),
-%% ?_assertEqual(ResultFollows, follow_from_json(Att, Props)),
-%% ?_assertEqual(ResultInline, inline_from_json(Att, PropsInline)),
-%% ?_assertThrow({bad_request, _}, inline_from_json(Att, Props)),
-%% ?_assertThrow({bad_request, _}, inline_from_json(Att, InvalidProps))
-%% ]}.
-%%
-%%
-%% attachment_stub_merge_test_() ->
-%% %% Stub merging needs to demonstrate revpos matching, skipping, and missing
-%% %% attachment errors.
-%% {"Attachment stub merging tests", []}.
-%%
-%%
-%% %% Test generators
-%%
-%%
-%% test_non_upgrading_fields(Attachment) ->
-%% Pairs = [
-%% {name, "cat.gif"},
-%% {type, "text/very-very-plain"},
-%% {att_len, 1024},
-%% {disk_len, 42},
-%% {md5, <<"md5-hashhashhash">>},
-%% {revpos, 4},
-%% {data, stub},
-%% {encoding, gzip}
-%% ],
-%% lists:foreach(
-%% fun({Field, Value}) ->
-%% ?assertMatch(#att{}, Attachment),
-%% Updated = store(Field, Value, Attachment),
-%% ?assertMatch(#att{}, Updated)
-%% end,
-%% Pairs).
-%%
-%%
-%% test_upgrading_fields(Attachment) ->
-%% ?assertMatch(#att{}, Attachment),
-%% UpdatedHeaders = store(headers, [{<<"Ans">>, <<"42">>}], Attachment),
-%% ?assertMatch(X when is_list(X), UpdatedHeaders),
-%% UpdatedHeadersUndefined = store(headers, undefined, Attachment),
-%% ?assertMatch(X when is_list(X), UpdatedHeadersUndefined).
-%%
-%%
-%% test_legacy_defaults(Attachment) ->
-%% ?assertEqual(<<>>, fetch(md5, Attachment)),
-%% ?assertEqual(0, fetch(revpos, Attachment)),
-%% ?assertEqual(identity, fetch(encoding, Attachment)).
-%%
-%%
-%% test_elided_entries(Attachment) ->
-%% ?assertNot(lists:keymember(name, 1, Attachment)),
-%% ?assertNot(lists:keymember(type, 1, Attachment)),
-%% ?assertNot(lists:keymember(att_len, 1, Attachment)),
-%% ?assertNot(lists:keymember(disk_len, 1, Attachment)),
-%% ?assertNot(lists:keymember(data, 1, Attachment)).
-%%
-%%
-%% test_construction() ->
-%% ?assert(new() == new()),
-%% Initialized = new([{name, <<"foo.bar">>}, {type, <<"application/qux">>}]),
-%% ?assertEqual(<<"foo.bar">>, fetch(name, Initialized)),
-%% ?assertEqual(<<"application/qux">>, fetch(type, Initialized)).
-%%
-%%
-%% test_store_and_fetch() ->
-%% Attachment = empty_att(),
-%% ?assertEqual(<<"abc">>, fetch(name, store(name, <<"abc">>, Attachment))),
-%% ?assertEqual(42, fetch(ans, store(ans, 42, Attachment))).
-%%
-%%
-%% test_transform() ->
-%% Attachment = new([{counter, 0}]),
-%% Transformed = transform(counter, fun(Count) -> Count + 1 end, Attachment),
-%% ?assertEqual(1, fetch(counter, Transformed)).
-%%
-%%
-%% -endif.
diff --git a/src/couch/src/couch_auth_cache.erl b/src/couch/src/couch_auth_cache.erl
index c564cee00..919d5614f 100644
--- a/src/couch/src/couch_auth_cache.erl
+++ b/src/couch/src/couch_auth_cache.erl
@@ -16,11 +16,9 @@
-export([
get_user_creds/1,
get_user_creds/2,
- update_user_creds/3,
get_admin/1,
add_roles/2,
- auth_design_doc/1,
- ensure_users_db_exists/0
+ auth_design_doc/1
]).
@@ -41,25 +39,7 @@ get_user_creds(Req, UserName) when is_list(UserName) ->
get_user_creds(Req, ?l2b(UserName));
get_user_creds(_Req, UserName) ->
- UserCreds = case get_admin(UserName) of
- nil ->
- get_from_db(UserName);
- Props ->
- case get_from_db(UserName) of
- nil ->
- Props;
- UserProps when is_list(UserProps) ->
- add_roles(Props, couch_util:get_value(<<"roles">>, UserProps))
- end
- end,
- validate_user_creds(UserCreds).
-
-update_user_creds(_Req, UserDoc, _AuthCtx) ->
- ok = ensure_users_db_exists(),
- couch_util:with_db(users_db(), fun(UserDb) ->
- {ok, _NewRev} = couch_db:update_doc(UserDb, UserDoc, []),
- ok
- end).
+ get_admin(UserName).
add_roles(Props, ExtraRoles) ->
CurrentRoles = couch_util:get_value(<<"roles">>, Props),
@@ -94,75 +74,6 @@ make_admin_doc(DerivedKey, Salt, Iterations) ->
{<<"password_scheme">>, <<"pbkdf2">>},
{<<"derived_key">>, ?l2b(DerivedKey)}].
-
-get_from_db(UserName) ->
- ok = ensure_users_db_exists(),
- couch_util:with_db(users_db(), fun(Db) ->
- DocId = <<"org.couchdb.user:", UserName/binary>>,
- try
- {ok, Doc} = couch_db:open_doc(Db, DocId, [conflicts]),
- {DocProps} = couch_doc:to_json_obj(Doc, []),
- DocProps
- catch
- _:_Error ->
- nil
- end
- end).
-
-
-validate_user_creds(nil) ->
- nil;
-validate_user_creds(UserCreds) ->
- case couch_util:get_value(<<"_conflicts">>, UserCreds) of
- undefined ->
- ok;
- _ConflictList ->
- throw({unauthorized,
- <<"User document conflicts must be resolved before the document",
- " is used for authentication purposes.">>
- })
- end,
- {ok, UserCreds, nil}.
-
-
-users_db() ->
- DbNameList = config:get("couch_httpd_auth", "authentication_db", "_users"),
- ?l2b(DbNameList).
-
-
-ensure_users_db_exists() ->
- Options = [?ADMIN_CTX, nologifmissing],
- case couch_db:open(users_db(), Options) of
- {ok, Db} ->
- ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
- couch_db:close(Db);
- _Error ->
- {ok, Db} = couch_db:create(users_db(), Options),
- ok = ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
- couch_db:close(Db)
- end,
- ok.
-
-
-ensure_auth_ddoc_exists(Db, DDocId) ->
- case couch_db:open_doc(Db, DDocId) of
- {not_found, _Reason} ->
- {ok, AuthDesign} = auth_design_doc(DDocId),
- {ok, _Rev} = couch_db:update_doc(Db, AuthDesign, []);
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, []),
- case couch_util:get_value(<<"validate_doc_update">>, Props, []) of
- ?AUTH_DB_DOC_VALIDATE_FUNCTION ->
- ok;
- _ ->
- Props1 = lists:keyreplace(<<"validate_doc_update">>, 1, Props,
- {<<"validate_doc_update">>,
- ?AUTH_DB_DOC_VALIDATE_FUNCTION}),
- couch_db:update_doc(Db, couch_doc:from_json_obj({Props1}), [])
- end
- end,
- ok.
-
auth_design_doc(DocId) ->
DocProps = [
{<<"_id">>, DocId},
diff --git a/src/couch/src/couch_bt_engine.erl b/src/couch/src/couch_bt_engine.erl
deleted file mode 100644
index 48e751a82..000000000
--- a/src/couch/src/couch_bt_engine.erl
+++ /dev/null
@@ -1,1246 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine).
--behavior(couch_db_engine).
-
--export([
- exists/1,
-
- delete/3,
- delete_compaction_files/3,
-
- init/2,
- terminate/2,
- handle_db_updater_call/2,
- handle_db_updater_info/2,
-
- incref/1,
- decref/1,
- monitored_by/1,
-
- last_activity/1,
-
- get_compacted_seq/1,
- get_del_doc_count/1,
- get_disk_version/1,
- get_doc_count/1,
- get_epochs/1,
- get_purge_seq/1,
- get_oldest_purge_seq/1,
- get_purge_infos_limit/1,
- get_revs_limit/1,
- get_security/1,
- get_props/1,
- get_size_info/1,
- get_partition_info/2,
- get_update_seq/1,
- get_uuid/1,
-
- set_revs_limit/2,
- set_purge_infos_limit/2,
- set_security/2,
- set_props/2,
-
- set_update_seq/2,
-
- open_docs/2,
- open_local_docs/2,
- read_doc_body/2,
- load_purge_infos/2,
-
- serialize_doc/2,
- write_doc_body/2,
- write_doc_infos/3,
- purge_docs/3,
- copy_purge_infos/2,
-
- commit_data/1,
-
- open_write_stream/2,
- open_read_stream/2,
- is_active_stream/2,
-
- fold_docs/4,
- fold_local_docs/4,
- fold_changes/5,
- fold_purge_infos/5,
- count_changes_since/2,
-
- start_compaction/4,
- finish_compaction/4
-]).
-
-
--export([
- init_state/4
-]).
-
-
--export([
- id_tree_split/1,
- id_tree_join/2,
- id_tree_reduce/2,
-
- seq_tree_split/1,
- seq_tree_join/2,
- seq_tree_reduce/2,
-
- local_tree_split/1,
- local_tree_join/2,
-
- purge_tree_split/1,
- purge_tree_join/2,
- purge_tree_reduce/2,
- purge_seq_tree_split/1,
- purge_seq_tree_join/2
-]).
-
-
-% Used by the compactor
--export([
- update_header/2,
- copy_security/2,
- copy_props/2
-]).
-
-
--include_lib("kernel/include/file.hrl").
--include_lib("couch/include/couch_db.hrl").
--include("couch_bt_engine.hrl").
-
-
-exists(FilePath) ->
- case is_file(FilePath) of
- true ->
- true;
- false ->
- is_file(FilePath ++ ".compact")
- end.
-
-
-delete(RootDir, FilePath, Async) ->
- %% Delete any leftover compaction files. If we don't do this a
- %% subsequent request for this DB will try to open them to use
- %% as a recovery.
- delete_compaction_files(RootDir, FilePath, [{context, compaction}]),
-
- % Delete the actual database file
- couch_file:delete(RootDir, FilePath, Async).
-
-
-delete_compaction_files(RootDir, FilePath, DelOpts) ->
- lists:foreach(fun(Ext) ->
- couch_file:delete(RootDir, FilePath ++ Ext, DelOpts)
- end, [".compact", ".compact.data", ".compact.meta"]).
-
-
-init(FilePath, Options) ->
- {ok, Fd} = open_db_file(FilePath, Options),
- Header = case lists:member(create, Options) of
- true ->
- delete_compaction_files(FilePath),
- Header0 = couch_bt_engine_header:new(),
- Header1 = init_set_props(Fd, Header0, Options),
- ok = couch_file:write_header(Fd, Header1),
- Header1;
- false ->
- case couch_file:read_header(Fd) of
- {ok, Header0} ->
- Header0;
- no_valid_header ->
- delete_compaction_files(FilePath),
- Header0 = couch_bt_engine_header:new(),
- ok = couch_file:write_header(Fd, Header0),
- Header0
- end
- end,
- {ok, init_state(FilePath, Fd, Header, Options)}.
-
-
-terminate(_Reason, St) ->
- % If the reason we died is because our fd disappeared
- % then we don't need to try closing it again.
- Ref = St#st.fd_monitor,
- if Ref == closed -> ok; true ->
- ok = couch_file:close(St#st.fd),
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
- after 500 ->
- ok
- end
- end,
- couch_util:shutdown_sync(St#st.fd),
- ok.
-
-
-handle_db_updater_call(Msg, St) ->
- {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
-
-handle_db_updater_info({'DOWN', Ref, _, _, _}, #st{fd_monitor=Ref} = St) ->
- {stop, normal, St#st{fd=undefined, fd_monitor=closed}}.
-
-
-incref(St) ->
- {ok, St#st{fd_monitor = erlang:monitor(process, St#st.fd)}}.
-
-
-decref(St) ->
- true = erlang:demonitor(St#st.fd_monitor, [flush]),
- ok.
-
-
-monitored_by(St) ->
- case erlang:process_info(St#st.fd, monitored_by) of
- {monitored_by, Pids} ->
- lists:filter(fun is_pid/1, Pids);
- _ ->
- []
- end.
-
-
-last_activity(#st{fd = Fd}) ->
- couch_file:last_read(Fd).
-
-
-get_compacted_seq(#st{header = Header}) ->
- couch_bt_engine_header:get(Header, compacted_seq).
-
-
-get_del_doc_count(#st{} = St) ->
- {ok, Reds} = couch_btree:full_reduce(St#st.id_tree),
- element(2, Reds).
-
-
-get_disk_version(#st{header = Header}) ->
- couch_bt_engine_header:get(Header, disk_version).
-
-
-get_doc_count(#st{} = St) ->
- {ok, Reds} = couch_btree:full_reduce(St#st.id_tree),
- element(1, Reds).
-
-
-get_epochs(#st{header = Header}) ->
- couch_bt_engine_header:get(Header, epochs).
-
-
-get_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) ->
- Fun = fun({PurgeSeq, _, _, _}, _Reds, _Acc) ->
- {stop, PurgeSeq}
- end,
- {ok, _, PurgeSeq} = couch_btree:fold(PurgeSeqTree, Fun, 0, [{dir, rev}]),
- PurgeSeq.
-
-
-get_oldest_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) ->
- Fun = fun({PurgeSeq, _, _, _}, _Reds, _Acc) ->
- {stop, PurgeSeq}
- end,
- {ok, _, PurgeSeq} = couch_btree:fold(PurgeSeqTree, Fun, 0, []),
- PurgeSeq.
-
-
-get_purge_infos_limit(#st{header = Header}) ->
- couch_bt_engine_header:get(Header, purge_infos_limit).
-
-
-get_revs_limit(#st{header = Header}) ->
- couch_bt_engine_header:get(Header, revs_limit).
-
-
-get_size_info(#st{} = St) ->
- {ok, FileSize} = couch_file:bytes(St#st.fd),
- {ok, DbReduction} = couch_btree:full_reduce(St#st.id_tree),
- SizeInfo0 = element(3, DbReduction),
- SizeInfo = case SizeInfo0 of
- SI when is_record(SI, size_info) ->
- SI;
- {AS, ES} ->
- #size_info{active=AS, external=ES};
- AS ->
- #size_info{active=AS}
- end,
- ActiveSize = active_size(St, SizeInfo),
- ExternalSize = SizeInfo#size_info.external,
- [
- {active, ActiveSize},
- {external, ExternalSize},
- {file, FileSize}
- ].
-
-
-partition_size_cb(traverse, Key, {DC, DDC, Sizes}, {Partition, DCAcc, DDCAcc, SizesAcc}) ->
- case couch_partition:is_member(Key, Partition) of
- true ->
- {skip, {Partition, DC + DCAcc, DDC + DDCAcc, reduce_sizes(Sizes, SizesAcc)}};
- false ->
- {ok, {Partition, DCAcc, DDCAcc, SizesAcc}}
- end;
-
-partition_size_cb(visit, FDI, _PrevReds, {Partition, DCAcc, DDCAcc, Acc}) ->
- InPartition = couch_partition:is_member(FDI#full_doc_info.id, Partition),
- Deleted = FDI#full_doc_info.deleted,
- case {InPartition, Deleted} of
- {true, true} ->
- {ok, {Partition, DCAcc, DDCAcc + 1,
- reduce_sizes(FDI#full_doc_info.sizes, Acc)}};
- {true, false} ->
- {ok, {Partition, DCAcc + 1, DDCAcc,
- reduce_sizes(FDI#full_doc_info.sizes, Acc)}};
- {false, _} ->
- {ok, {Partition, DCAcc, DDCAcc, Acc}}
- end.
-
-
-get_partition_info(#st{} = St, Partition) ->
- StartKey = couch_partition:start_key(Partition),
- EndKey = couch_partition:end_key(Partition),
- Fun = fun partition_size_cb/4,
- InitAcc = {Partition, 0, 0, #size_info{}},
- Options = [{start_key, StartKey}, {end_key, EndKey}],
- {ok, _, OutAcc} = couch_btree:fold(St#st.id_tree, Fun, InitAcc, Options),
- {Partition, DocCount, DocDelCount, SizeInfo} = OutAcc,
- [
- {partition, Partition},
- {doc_count, DocCount},
- {doc_del_count, DocDelCount},
- {sizes, [
- {active, SizeInfo#size_info.active},
- {external, SizeInfo#size_info.external}
- ]}
- ].
-
-
-get_security(#st{header = Header} = St) ->
- case couch_bt_engine_header:get(Header, security_ptr) of
- undefined ->
- [];
- Pointer ->
- {ok, SecProps} = couch_file:pread_term(St#st.fd, Pointer),
- SecProps
- end.
-
-
-get_props(#st{header = Header} = St) ->
- case couch_bt_engine_header:get(Header, props_ptr) of
- undefined ->
- [];
- Pointer ->
- {ok, Props} = couch_file:pread_term(St#st.fd, Pointer),
- Props
- end.
-
-
-get_update_seq(#st{header = Header}) ->
- couch_bt_engine_header:get(Header, update_seq).
-
-
-get_uuid(#st{header = Header}) ->
- couch_bt_engine_header:get(Header, uuid).
-
-
-set_revs_limit(#st{header = Header} = St, RevsLimit) ->
- NewSt = St#st{
- header = couch_bt_engine_header:set(Header, [
- {revs_limit, RevsLimit}
- ]),
- needs_commit = true
- },
- {ok, increment_update_seq(NewSt)}.
-
-
-set_purge_infos_limit(#st{header = Header} = St, PurgeInfosLimit) ->
- NewSt = St#st{
- header = couch_bt_engine_header:set(Header, [
- {purge_infos_limit, PurgeInfosLimit}
- ]),
- needs_commit = true
- },
- {ok, increment_update_seq(NewSt)}.
-
-
-set_security(#st{header = Header} = St, NewSecurity) ->
- Options = [{compression, St#st.compression}],
- {ok, Ptr, _} = couch_file:append_term(St#st.fd, NewSecurity, Options),
- NewSt = St#st{
- header = couch_bt_engine_header:set(Header, [
- {security_ptr, Ptr}
- ]),
- needs_commit = true
- },
- {ok, increment_update_seq(NewSt)}.
-
-
-set_props(#st{header = Header} = St, Props) ->
- Options = [{compression, St#st.compression}],
- {ok, Ptr, _} = couch_file:append_term(St#st.fd, Props, Options),
- NewSt = St#st{
- header = couch_bt_engine_header:set(Header, [
- {props_ptr, Ptr}
- ]),
- needs_commit = true
- },
- {ok, increment_update_seq(NewSt)}.
-
-
-open_docs(#st{} = St, DocIds) ->
- Results = couch_btree:lookup(St#st.id_tree, DocIds),
- lists:map(fun
- ({ok, FDI}) -> FDI;
- (not_found) -> not_found
- end, Results).
-
-
-open_local_docs(#st{} = St, DocIds) ->
- Results = couch_btree:lookup(St#st.local_tree, DocIds),
- lists:map(fun
- ({ok, Doc}) -> Doc;
- (not_found) -> not_found
- end, Results).
-
-
-read_doc_body(#st{} = St, #doc{} = Doc) ->
- {ok, {Body, Atts}} = couch_file:pread_term(St#st.fd, Doc#doc.body),
- Doc#doc{
- body = Body,
- atts = Atts
- }.
-
-
-load_purge_infos(St, UUIDs) ->
- Results = couch_btree:lookup(St#st.purge_tree, UUIDs),
- lists:map(fun
- ({ok, Info}) -> Info;
- (not_found) -> not_found
- end, Results).
-
-
-serialize_doc(#st{} = St, #doc{} = Doc) ->
- Compress = fun(Term) ->
- case couch_compress:is_compressed(Term, St#st.compression) of
- true -> Term;
- false -> couch_compress:compress(Term, St#st.compression)
- end
- end,
- Body = Compress(Doc#doc.body),
- Atts = Compress(Doc#doc.atts),
- SummaryBin = ?term_to_bin({Body, Atts}),
- Md5 = couch_hash:md5_hash(SummaryBin),
- Data = couch_file:assemble_file_chunk(SummaryBin, Md5),
- % TODO: This is a terrible hack to get around the issues
- % in COUCHDB-3255. We'll need to come back and figure
- % out a better approach to handling the case when we
- % need to generate a new revision id after the doc
- % has been serialized.
- Doc#doc{
- body = Data,
- meta = [{comp_body, Body} | Doc#doc.meta]
- }.
-
-
-write_doc_body(St, #doc{} = Doc) ->
- #st{
- fd = Fd
- } = St,
- {ok, Ptr, Written} = couch_file:append_raw_chunk(Fd, Doc#doc.body),
- {ok, Doc#doc{body = Ptr}, Written}.
-
-
-write_doc_infos(#st{} = St, Pairs, LocalDocs) ->
- #st{
- id_tree = IdTree,
- seq_tree = SeqTree,
- local_tree = LocalTree
- } = St,
- FinalAcc = lists:foldl(fun({OldFDI, NewFDI}, Acc) ->
- {AddAcc, RemIdsAcc, RemSeqsAcc} = Acc,
- case {OldFDI, NewFDI} of
- {not_found, #full_doc_info{}} ->
- {[NewFDI | AddAcc], RemIdsAcc, RemSeqsAcc};
- {#full_doc_info{id = Id}, #full_doc_info{id = Id}} ->
- NewAddAcc = [NewFDI | AddAcc],
- NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc],
- {NewAddAcc, RemIdsAcc, NewRemSeqsAcc};
- {#full_doc_info{id = Id}, not_found} ->
- NewRemIdsAcc = [Id | RemIdsAcc],
- NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc],
- {AddAcc, NewRemIdsAcc, NewRemSeqsAcc}
- end
- end, {[], [], []}, Pairs),
-
- {Add, RemIds, RemSeqs} = FinalAcc,
- {ok, IdTree2} = couch_btree:add_remove(IdTree, Add, RemIds),
- {ok, SeqTree2} = couch_btree:add_remove(SeqTree, Add, RemSeqs),
-
- {AddLDocs, RemLDocIds} = lists:foldl(fun(Doc, {AddAcc, RemAcc}) ->
- case Doc#doc.deleted of
- true ->
- {AddAcc, [Doc#doc.id | RemAcc]};
- false ->
- {[Doc | AddAcc], RemAcc}
- end
- end, {[], []}, LocalDocs),
- {ok, LocalTree2} = couch_btree:add_remove(LocalTree, AddLDocs, RemLDocIds),
-
- NewUpdateSeq = lists:foldl(fun(#full_doc_info{update_seq=Seq}, Acc) ->
- erlang:max(Seq, Acc)
- end, get_update_seq(St), Add),
-
- NewHeader = couch_bt_engine_header:set(St#st.header, [
- {update_seq, NewUpdateSeq}
- ]),
-
- {ok, St#st{
- header = NewHeader,
- id_tree = IdTree2,
- seq_tree = SeqTree2,
- local_tree = LocalTree2,
- needs_commit = true
- }}.
-
-
-purge_docs(#st{} = St, Pairs, PurgeInfos) ->
- #st{
- id_tree = IdTree,
- seq_tree = SeqTree,
- purge_tree = PurgeTree,
- purge_seq_tree = PurgeSeqTree
- } = St,
-
- RemDocIds = [Old#full_doc_info.id || {Old, not_found} <- Pairs],
- RemSeqs = [Old#full_doc_info.update_seq || {Old, _} <- Pairs],
- DocsToAdd = [New || {_, New} <- Pairs, New /= not_found],
- CurrSeq = couch_bt_engine_header:get(St#st.header, update_seq),
- Seqs = [FDI#full_doc_info.update_seq || FDI <- DocsToAdd],
- NewSeq = lists:max([CurrSeq | Seqs]),
-
- % We bump NewUpdateSeq because we have to ensure that
- % indexers see that they need to process the new purge
- % information.
- UpdateSeq = case NewSeq == CurrSeq of
- true -> CurrSeq + 1;
- false -> NewSeq
- end,
- Header = couch_bt_engine_header:set(St#st.header, [
- {update_seq, UpdateSeq}
- ]),
-
- {ok, IdTree2} = couch_btree:add_remove(IdTree, DocsToAdd, RemDocIds),
- {ok, SeqTree2} = couch_btree:add_remove(SeqTree, DocsToAdd, RemSeqs),
- {ok, PurgeTree2} = couch_btree:add(PurgeTree, PurgeInfos),
- {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, PurgeInfos),
- {ok, St#st{
- header = Header,
- id_tree = IdTree2,
- seq_tree = SeqTree2,
- purge_tree = PurgeTree2,
- purge_seq_tree = PurgeSeqTree2,
- needs_commit = true
- }}.
-
-
-copy_purge_infos(#st{} = St, PurgeInfos) ->
- #st{
- purge_tree = PurgeTree,
- purge_seq_tree = PurgeSeqTree
- } = St,
- {ok, PurgeTree2} = couch_btree:add(PurgeTree, PurgeInfos),
- {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, PurgeInfos),
- {ok, St#st{
- purge_tree = PurgeTree2,
- purge_seq_tree = PurgeSeqTree2,
- needs_commit = true
- }}.
-
-
-commit_data(St) ->
- #st{
- fd = Fd,
- header = OldHeader,
- needs_commit = NeedsCommit
- } = St,
-
- NewHeader = update_header(St, OldHeader),
-
- case NewHeader /= OldHeader orelse NeedsCommit of
- true ->
- couch_file:sync(Fd),
- ok = couch_file:write_header(Fd, NewHeader),
- couch_file:sync(Fd),
- {ok, St#st{
- header = NewHeader,
- needs_commit = false
- }};
- false ->
- {ok, St}
- end.
-
-
-open_write_stream(#st{} = St, Options) ->
- couch_stream:open({couch_bt_engine_stream, {St#st.fd, []}}, Options).
-
-
-open_read_stream(#st{} = St, StreamSt) ->
- {ok, {couch_bt_engine_stream, {St#st.fd, StreamSt}}}.
-
-
-is_active_stream(#st{} = St, {couch_bt_engine_stream, {Fd, _}}) ->
- St#st.fd == Fd;
-is_active_stream(_, _) ->
- false.
-
-
-fold_docs(St, UserFun, UserAcc, Options) ->
- fold_docs_int(St, St#st.id_tree, UserFun, UserAcc, Options).
-
-
-fold_local_docs(St, UserFun, UserAcc, Options) ->
- case fold_docs_int(St, St#st.local_tree, UserFun, UserAcc, Options) of
- {ok, _Reds, FinalAcc} -> {ok, null, FinalAcc};
- {ok, FinalAcc} -> {ok, FinalAcc}
- end.
-
-
-fold_changes(St, SinceSeq, UserFun, UserAcc, Options) ->
- Fun = fun drop_reductions/4,
- InAcc = {UserFun, UserAcc},
- Opts = [{start_key, SinceSeq + 1}] ++ Options,
- {ok, _, OutAcc} = couch_btree:fold(St#st.seq_tree, Fun, InAcc, Opts),
- {_, FinalUserAcc} = OutAcc,
- {ok, FinalUserAcc}.
-
-
-fold_purge_infos(St, StartSeq0, UserFun, UserAcc, Options) ->
- PurgeSeqTree = St#st.purge_seq_tree,
- StartSeq = StartSeq0 + 1,
- MinSeq = get_oldest_purge_seq(St),
- if MinSeq =< StartSeq -> ok; true ->
- erlang:error({invalid_start_purge_seq, StartSeq0})
- end,
- Wrapper = fun(Info, _Reds, UAcc) ->
- UserFun(Info, UAcc)
- end,
- Opts = [{start_key, StartSeq}] ++ Options,
- {ok, _, OutAcc} = couch_btree:fold(PurgeSeqTree, Wrapper, UserAcc, Opts),
- {ok, OutAcc}.
-
-
-count_changes_since(St, SinceSeq) ->
- BTree = St#st.seq_tree,
- FoldFun = fun(_SeqStart, PartialReds, 0) ->
- {ok, couch_btree:final_reduce(BTree, PartialReds)}
- end,
- Opts = [{start_key, SinceSeq + 1}],
- {ok, Changes} = couch_btree:fold_reduce(BTree, FoldFun, 0, Opts),
- Changes.
-
-
-start_compaction(St, DbName, Options, Parent) ->
- Args = [St, DbName, Options, Parent],
- Pid = spawn_link(couch_bt_engine_compactor, start, Args),
- {ok, St, Pid}.
-
-
-finish_compaction(OldState, DbName, Options, CompactFilePath) ->
- {ok, NewState1} = ?MODULE:init(CompactFilePath, Options),
- OldSeq = get_update_seq(OldState),
- NewSeq = get_update_seq(NewState1),
- case OldSeq == NewSeq of
- true ->
- finish_compaction_int(OldState, NewState1);
- false ->
- couch_log:info("Compaction file still behind main file "
- "(update seq=~p. compact update seq=~p). Retrying.",
- [OldSeq, NewSeq]),
- ok = decref(NewState1),
- start_compaction(OldState, DbName, Options, self())
- end.
-
-
-id_tree_split(#full_doc_info{}=Info) ->
- #full_doc_info{
- id = Id,
- update_seq = Seq,
- deleted = Deleted,
- sizes = SizeInfo,
- rev_tree = Tree
- } = Info,
- {Id, {Seq, ?b2i(Deleted), split_sizes(SizeInfo), disk_tree(Tree)}}.
-
-
-id_tree_join(Id, {HighSeq, Deleted, DiskTree}) ->
- % Handle old formats before data_size was added
- id_tree_join(Id, {HighSeq, Deleted, #size_info{}, DiskTree});
-
-id_tree_join(Id, {HighSeq, Deleted, Sizes, DiskTree}) ->
- #full_doc_info{
- id = Id,
- update_seq = HighSeq,
- deleted = ?i2b(Deleted),
- sizes = couch_db_updater:upgrade_sizes(Sizes),
- rev_tree = rev_tree(DiskTree)
- }.
-
-
-id_tree_reduce(reduce, FullDocInfos) ->
- lists:foldl(fun(Info, {NotDeleted, Deleted, Sizes}) ->
- Sizes2 = reduce_sizes(Sizes, Info#full_doc_info.sizes),
- case Info#full_doc_info.deleted of
- true ->
- {NotDeleted, Deleted + 1, Sizes2};
- false ->
- {NotDeleted + 1, Deleted, Sizes2}
- end
- end, {0, 0, #size_info{}}, FullDocInfos);
-id_tree_reduce(rereduce, Reds) ->
- lists:foldl(fun
- ({NotDeleted, Deleted}, {AccNotDeleted, AccDeleted, _AccSizes}) ->
- % pre 1.2 format, will be upgraded on compaction
- {AccNotDeleted + NotDeleted, AccDeleted + Deleted, nil};
- ({NotDeleted, Deleted, Sizes}, {AccNotDeleted, AccDeleted, AccSizes}) ->
- AccSizes2 = reduce_sizes(AccSizes, Sizes),
- {AccNotDeleted + NotDeleted, AccDeleted + Deleted, AccSizes2}
- end, {0, 0, #size_info{}}, Reds).
-
-
-seq_tree_split(#full_doc_info{}=Info) ->
- #full_doc_info{
- id = Id,
- update_seq = Seq,
- deleted = Del,
- sizes = SizeInfo,
- rev_tree = Tree
- } = Info,
- {Seq, {Id, ?b2i(Del), split_sizes(SizeInfo), disk_tree(Tree)}}.
-
-
-seq_tree_join(Seq, {Id, Del, DiskTree}) when is_integer(Del) ->
- seq_tree_join(Seq, {Id, Del, {0, 0}, DiskTree});
-
-seq_tree_join(Seq, {Id, Del, Sizes, DiskTree}) when is_integer(Del) ->
- #full_doc_info{
- id = Id,
- update_seq = Seq,
- deleted = ?i2b(Del),
- sizes = join_sizes(Sizes),
- rev_tree = rev_tree(DiskTree)
- };
-
-seq_tree_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) ->
- % Older versions stored #doc_info records in the seq_tree.
- % Compact to upgrade.
- Revs = lists:map(fun({Rev, Seq, Bp}) ->
- #rev_info{rev = Rev, seq = Seq, deleted = false, body_sp = Bp}
- end, RevInfos),
- DeletedRevs = lists:map(fun({Rev, Seq, Bp}) ->
- #rev_info{rev = Rev, seq = Seq, deleted = true, body_sp = Bp}
- end, DeletedRevInfos),
- #doc_info{
- id = Id,
- high_seq = KeySeq,
- revs = Revs ++ DeletedRevs
- }.
-
-
-seq_tree_reduce(reduce, DocInfos) ->
- % count the number of documents
- length(DocInfos);
-seq_tree_reduce(rereduce, Reds) ->
- lists:sum(Reds).
-
-
-local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_binary(Rev) ->
- #doc{
- id = Id,
- body = BodyData
- } = Doc,
- {Id, {binary_to_integer(Rev), BodyData}};
-
-local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_integer(Rev) ->
- #doc{
- id = Id,
- body = BodyData
- } = Doc,
- {Id, {Rev, BodyData}}.
-
-
-local_tree_join(Id, {Rev, BodyData}) when is_binary(Rev) ->
- #doc{
- id = Id,
- revs = {0, [Rev]},
- body = BodyData
- };
-
-local_tree_join(Id, {Rev, BodyData}) when is_integer(Rev) ->
- #doc{
- id = Id,
- revs = {0, [integer_to_binary(Rev)]},
- body = BodyData
- }.
-
-
-purge_tree_split({PurgeSeq, UUID, DocId, Revs}) ->
- {UUID, {PurgeSeq, DocId, Revs}}.
-
-
-purge_tree_join(UUID, {PurgeSeq, DocId, Revs}) ->
- {PurgeSeq, UUID, DocId, Revs}.
-
-
-purge_seq_tree_split({PurgeSeq, UUID, DocId, Revs}) ->
- {PurgeSeq, {UUID, DocId, Revs}}.
-
-
-purge_seq_tree_join(PurgeSeq, {UUID, DocId, Revs}) ->
- {PurgeSeq, UUID, DocId, Revs}.
-
-
-purge_tree_reduce(reduce, IdRevs) ->
- % count the number of purge requests
- length(IdRevs);
-purge_tree_reduce(rereduce, Reds) ->
- lists:sum(Reds).
-
-
-set_update_seq(#st{header = Header} = St, UpdateSeq) ->
- {ok, St#st{
- header = couch_bt_engine_header:set(Header, [
- {update_seq, UpdateSeq}
- ]),
- needs_commit = true
- }}.
-
-
-copy_security(#st{header = Header} = St, SecProps) ->
- Options = [{compression, St#st.compression}],
- {ok, Ptr, _} = couch_file:append_term(St#st.fd, SecProps, Options),
- {ok, St#st{
- header = couch_bt_engine_header:set(Header, [
- {security_ptr, Ptr}
- ]),
- needs_commit = true
- }}.
-
-
-copy_props(#st{header = Header} = St, Props) ->
- Options = [{compression, St#st.compression}],
- {ok, Ptr, _} = couch_file:append_term(St#st.fd, Props, Options),
- {ok, St#st{
- header = couch_bt_engine_header:set(Header, [
- {props_ptr, Ptr}
- ]),
- needs_commit = true
- }}.
-
-
-open_db_file(FilePath, Options) ->
- case couch_file:open(FilePath, Options) of
- {ok, Fd} ->
- {ok, Fd};
- {error, enoent} ->
- % Couldn't find file. is there a compact version? This ca
- % happen (rarely) if we crashed during the file switch.
- case couch_file:open(FilePath ++ ".compact", [nologifmissing]) of
- {ok, Fd} ->
- Fmt = "Recovering from compaction file: ~s~s",
- couch_log:info(Fmt, [FilePath, ".compact"]),
- ok = file:rename(FilePath ++ ".compact", FilePath),
- ok = couch_file:sync(Fd),
- {ok, Fd};
- {error, enoent} ->
- throw({not_found, no_db_file})
- end;
- Error ->
- throw(Error)
- end.
-
-
-init_state(FilePath, Fd, Header0, Options) ->
- ok = couch_file:sync(Fd),
-
- Compression = couch_compress:get_compression_method(),
-
- Header1 = couch_bt_engine_header:upgrade(Header0),
- Header2 = set_default_security_object(Fd, Header1, Compression, Options),
- Header = upgrade_purge_info(Fd, Header2),
-
- IdTreeState = couch_bt_engine_header:id_tree_state(Header),
- {ok, IdTree} = couch_btree:open(IdTreeState, Fd, [
- {split, fun ?MODULE:id_tree_split/1},
- {join, fun ?MODULE:id_tree_join/2},
- {reduce, fun ?MODULE:id_tree_reduce/2},
- {compression, Compression}
- ]),
-
- SeqTreeState = couch_bt_engine_header:seq_tree_state(Header),
- {ok, SeqTree} = couch_btree:open(SeqTreeState, Fd, [
- {split, fun ?MODULE:seq_tree_split/1},
- {join, fun ?MODULE:seq_tree_join/2},
- {reduce, fun ?MODULE:seq_tree_reduce/2},
- {compression, Compression}
- ]),
-
- LocalTreeState = couch_bt_engine_header:local_tree_state(Header),
- {ok, LocalTree} = couch_btree:open(LocalTreeState, Fd, [
- {split, fun ?MODULE:local_tree_split/1},
- {join, fun ?MODULE:local_tree_join/2},
- {compression, Compression}
- ]),
-
- PurgeTreeState = couch_bt_engine_header:purge_tree_state(Header),
- {ok, PurgeTree} = couch_btree:open(PurgeTreeState, Fd, [
- {split, fun ?MODULE:purge_tree_split/1},
- {join, fun ?MODULE:purge_tree_join/2},
- {reduce, fun ?MODULE:purge_tree_reduce/2}
- ]),
-
- PurgeSeqTreeState = couch_bt_engine_header:purge_seq_tree_state(Header),
- {ok, PurgeSeqTree} = couch_btree:open(PurgeSeqTreeState, Fd, [
- {split, fun ?MODULE:purge_seq_tree_split/1},
- {join, fun ?MODULE:purge_seq_tree_join/2},
- {reduce, fun ?MODULE:purge_tree_reduce/2}
- ]),
-
- ok = couch_file:set_db_pid(Fd, self()),
-
- St = #st{
- filepath = FilePath,
- fd = Fd,
- fd_monitor = erlang:monitor(process, Fd),
- header = Header,
- needs_commit = false,
- id_tree = IdTree,
- seq_tree = SeqTree,
- local_tree = LocalTree,
- compression = Compression,
- purge_tree = PurgeTree,
- purge_seq_tree = PurgeSeqTree
- },
-
- % If this is a new database we've just created a
- % new UUID and default security object which need
- % to be written to disk.
- case Header /= Header0 of
- true ->
- {ok, NewSt} = commit_data(St#st{needs_commit = true}),
- NewSt;
- false ->
- St
- end.
-
-
-update_header(St, Header) ->
- couch_bt_engine_header:set(Header, [
- {seq_tree_state, couch_btree:get_state(St#st.seq_tree)},
- {id_tree_state, couch_btree:get_state(St#st.id_tree)},
- {local_tree_state, couch_btree:get_state(St#st.local_tree)},
- {purge_tree_state, couch_btree:get_state(St#st.purge_tree)},
- {purge_seq_tree_state, couch_btree:get_state(St#st.purge_seq_tree)}
- ]).
-
-
-increment_update_seq(#st{header = Header} = St) ->
- UpdateSeq = couch_bt_engine_header:get(Header, update_seq),
- St#st{
- header = couch_bt_engine_header:set(Header, [
- {update_seq, UpdateSeq + 1}
- ])
- }.
-
-
-set_default_security_object(Fd, Header, Compression, Options) ->
- case couch_bt_engine_header:get(Header, security_ptr) of
- Pointer when is_integer(Pointer) ->
- Header;
- _ ->
- Default = couch_util:get_value(default_security_object, Options),
- AppendOpts = [{compression, Compression}],
- {ok, Ptr, _} = couch_file:append_term(Fd, Default, AppendOpts),
- couch_bt_engine_header:set(Header, security_ptr, Ptr)
- end.
-
-
-% This function is here, and not in couch_bt_engine_header
-% because it requires modifying file contents
-upgrade_purge_info(Fd, Header) ->
- case couch_bt_engine_header:get(Header, purge_tree_state) of
- nil ->
- Header;
- Ptr when is_tuple(Ptr) ->
- Header;
- PurgeSeq when is_integer(PurgeSeq)->
- % Pointer to old purged ids/revs is in purge_seq_tree_state
- Ptr = couch_bt_engine_header:get(Header, purge_seq_tree_state),
-
- case Ptr of
- nil ->
- PTS = couch_bt_engine_header:purge_tree_state(Header),
- PurgeTreeSt = case PTS of 0 -> nil; Else -> Else end,
- couch_bt_engine_header:set(Header, [
- {purge_tree_state, PurgeTreeSt}
- ]);
- _ ->
- {ok, PurgedIdsRevs} = couch_file:pread_term(Fd, Ptr),
-
- {Infos, _} = lists:foldl(fun({Id, Revs}, {InfoAcc, PSeq}) ->
- Info = {PSeq, couch_uuids:random(), Id, Revs},
- {[Info | InfoAcc], PSeq + 1}
- end, {[], PurgeSeq}, PurgedIdsRevs),
-
- {ok, PurgeTree} = couch_btree:open(nil, Fd, [
- {split, fun ?MODULE:purge_tree_split/1},
- {join, fun ?MODULE:purge_tree_join/2},
- {reduce, fun ?MODULE:purge_tree_reduce/2}
- ]),
- {ok, PurgeTree2} = couch_btree:add(PurgeTree, Infos),
- PurgeTreeSt = couch_btree:get_state(PurgeTree2),
-
- {ok, PurgeSeqTree} = couch_btree:open(nil, Fd, [
- {split, fun ?MODULE:purge_seq_tree_split/1},
- {join, fun ?MODULE:purge_seq_tree_join/2},
- {reduce, fun ?MODULE:purge_tree_reduce/2}
- ]),
- {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, Infos),
- PurgeSeqTreeSt = couch_btree:get_state(PurgeSeqTree2),
-
- couch_bt_engine_header:set(Header, [
- {purge_tree_state, PurgeTreeSt},
- {purge_seq_tree_state, PurgeSeqTreeSt}
- ])
- end
- end.
-
-
-init_set_props(Fd, Header, Options) ->
- case couch_util:get_value(props, Options) of
- undefined ->
- Header;
- InitialProps ->
- Compression = couch_compress:get_compression_method(),
- AppendOpts = [{compression, Compression}],
- {ok, Ptr, _} = couch_file:append_term(Fd, InitialProps, AppendOpts),
- couch_bt_engine_header:set(Header, props_ptr, Ptr)
- end.
-
-
-delete_compaction_files(FilePath) ->
- RootDir = config:get("couchdb", "database_dir", "."),
- DelOpts = [{context, compaction}],
- delete_compaction_files(RootDir, FilePath, DelOpts).
-
-
-rev_tree(DiskTree) ->
- couch_key_tree:map(fun
- (_RevId, {Del, Ptr, Seq}) ->
- #leaf{
- deleted = ?i2b(Del),
- ptr = Ptr,
- seq = Seq
- };
- (_RevId, {Del, Ptr, Seq, Size}) ->
- #leaf{
- deleted = ?i2b(Del),
- ptr = Ptr,
- seq = Seq,
- sizes = couch_db_updater:upgrade_sizes(Size)
- };
- (_RevId, {Del, Ptr, Seq, Sizes, Atts}) ->
- #leaf{
- deleted = ?i2b(Del),
- ptr = Ptr,
- seq = Seq,
- sizes = couch_db_updater:upgrade_sizes(Sizes),
- atts = Atts
- };
- (_RevId, ?REV_MISSING) ->
- ?REV_MISSING
- end, DiskTree).
-
-
-disk_tree(RevTree) ->
- couch_key_tree:map(fun
- (_RevId, ?REV_MISSING) ->
- ?REV_MISSING;
- (_RevId, #leaf{} = Leaf) ->
- #leaf{
- deleted = Del,
- ptr = Ptr,
- seq = Seq,
- sizes = Sizes,
- atts = Atts
- } = Leaf,
- {?b2i(Del), Ptr, Seq, split_sizes(Sizes), Atts}
- end, RevTree).
-
-
-split_sizes(#size_info{}=SI) ->
- {SI#size_info.active, SI#size_info.external}.
-
-
-join_sizes({Active, External}) when is_integer(Active), is_integer(External) ->
- #size_info{active=Active, external=External}.
-
-
-reduce_sizes(nil, _) ->
- nil;
-reduce_sizes(_, nil) ->
- nil;
-reduce_sizes(#size_info{}=S1, #size_info{}=S2) ->
- #size_info{
- active = S1#size_info.active + S2#size_info.active,
- external = S1#size_info.external + S2#size_info.external
- };
-reduce_sizes(S1, S2) ->
- US1 = couch_db_updater:upgrade_sizes(S1),
- US2 = couch_db_updater:upgrade_sizes(S2),
- reduce_sizes(US1, US2).
-
-
-active_size(#st{} = St, #size_info{} = SI) ->
- Trees = [
- St#st.id_tree,
- St#st.seq_tree,
- St#st.local_tree,
- St#st.purge_tree,
- St#st.purge_seq_tree
- ],
- lists:foldl(fun(T, Acc) ->
- case couch_btree:size(T) of
- _ when Acc == null ->
- null;
- nil ->
- null;
- Size ->
- Acc + Size
- end
- end, SI#size_info.active, Trees).
-
-
-fold_docs_int(St, Tree, UserFun, UserAcc, Options) ->
- Fun = case lists:member(include_deleted, Options) of
- true -> fun include_deleted/4;
- false -> fun skip_deleted/4
- end,
- RedFun = case lists:member(include_reductions, Options) of
- true -> fun include_reductions/4;
- false -> fun drop_reductions/4
- end,
- InAcc = {RedFun, {UserFun, UserAcc}},
- {ok, Reds, OutAcc} = couch_btree:fold(Tree, Fun, InAcc, Options),
- {_, {_, FinalUserAcc}} = OutAcc,
- case lists:member(include_reductions, Options) of
- true when Tree == St#st.id_tree ->
- {ok, fold_docs_reduce_to_count(Reds), FinalUserAcc};
- true when Tree == St#st.local_tree ->
- {ok, 0, FinalUserAcc};
- false ->
- {ok, FinalUserAcc}
- end.
-
-
-include_deleted(Case, Entry, Reds, {UserFun, UserAcc}) ->
- {Go, NewUserAcc} = UserFun(Case, Entry, Reds, UserAcc),
- {Go, {UserFun, NewUserAcc}}.
-
-
-% First element of the reductions is the total
-% number of undeleted documents.
-skip_deleted(traverse, _Entry, {0, _, _} = _Reds, Acc) ->
- {skip, Acc};
-skip_deleted(visit, #full_doc_info{deleted = true}, _, Acc) ->
- {ok, Acc};
-skip_deleted(Case, Entry, Reds, {UserFun, UserAcc}) ->
- {Go, NewUserAcc} = UserFun(Case, Entry, Reds, UserAcc),
- {Go, {UserFun, NewUserAcc}}.
-
-
-include_reductions(visit, FDI, Reds, {UserFun, UserAcc}) ->
- {Go, NewUserAcc} = UserFun(FDI, Reds, UserAcc),
- {Go, {UserFun, NewUserAcc}};
-include_reductions(_, _, _, Acc) ->
- {ok, Acc}.
-
-
-drop_reductions(visit, FDI, _Reds, {UserFun, UserAcc}) ->
- {Go, NewUserAcc} = UserFun(FDI, UserAcc),
- {Go, {UserFun, NewUserAcc}};
-drop_reductions(_, _, _, Acc) ->
- {ok, Acc}.
-
-
-fold_docs_reduce_to_count(Reds) ->
- RedFun = fun id_tree_reduce/2,
- FinalRed = couch_btree:final_reduce(RedFun, Reds),
- element(1, FinalRed).
-
-
-finish_compaction_int(#st{} = OldSt, #st{} = NewSt1) ->
- #st{
- filepath = FilePath,
- local_tree = OldLocal
- } = OldSt,
- #st{
- filepath = CompactDataPath,
- header = Header,
- local_tree = NewLocal1
- } = NewSt1,
-
- % suck up all the local docs into memory and write them to the new db
- LoadFun = fun(Value, _Offset, Acc) ->
- {ok, [Value | Acc]}
- end,
- {ok, _, LocalDocs} = couch_btree:foldl(OldLocal, LoadFun, []),
- {ok, NewLocal2} = couch_btree:add(NewLocal1, LocalDocs),
-
- {ok, NewSt2} = commit_data(NewSt1#st{
- header = couch_bt_engine_header:set(Header, [
- {compacted_seq, get_update_seq(OldSt)},
- {revs_limit, get_revs_limit(OldSt)},
- {purge_infos_limit, get_purge_infos_limit(OldSt)}
- ]),
- local_tree = NewLocal2
- }),
-
- % Rename our *.compact.data file to *.compact so that if we
- % die between deleting the old file and renaming *.compact
- % we can recover correctly.
- ok = file:rename(CompactDataPath, FilePath ++ ".compact"),
-
- % Remove the uncompacted database file
- RootDir = config:get("couchdb", "database_dir", "."),
- couch_file:delete(RootDir, FilePath),
-
- % Move our compacted file into its final location
- ok = file:rename(FilePath ++ ".compact", FilePath),
-
- % Delete the old meta compaction file after promoting
- % the compaction file.
- couch_file:delete(RootDir, FilePath ++ ".compact.meta"),
-
- % We're finished with our old state
- decref(OldSt),
-
- % And return our finished new state
- {ok, NewSt2#st{
- filepath = FilePath
- }, undefined}.
-
-
-is_file(Path) ->
- case file:read_file_info(Path, [raw]) of
- {ok, #file_info{type = regular}} -> true;
- {ok, #file_info{type = directory}} -> true;
- _ -> false
- end.
diff --git a/src/couch/src/couch_bt_engine.hrl b/src/couch/src/couch_bt_engine.hrl
deleted file mode 100644
index e3c1d4983..000000000
--- a/src/couch/src/couch_bt_engine.hrl
+++ /dev/null
@@ -1,27 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(st, {
- filepath,
- fd,
- fd_monitor,
- % deprecated but keeping it here to avoid altering the record size
- fsync_options_deprecated,
- header,
- needs_commit,
- id_tree,
- seq_tree,
- local_tree,
- compression,
- purge_tree,
- purge_seq_tree
-}).
diff --git a/src/couch/src/couch_bt_engine_compactor.erl b/src/couch/src/couch_bt_engine_compactor.erl
deleted file mode 100644
index 0b3fb22ef..000000000
--- a/src/couch/src/couch_bt_engine_compactor.erl
+++ /dev/null
@@ -1,590 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_compactor).
-
-
--export([
- start/4
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_bt_engine.hrl").
-
-
--record(comp_header, {
- db_header,
- meta_state
-}).
-
--record(merge_st, {
- id_tree,
- seq_tree,
- curr,
- rem_seqs,
- infos
-}).
-
-
-start(#st{} = St, DbName, Options, Parent) ->
- erlang:put(io_priority, {db_compact, DbName}),
- #st{
- filepath = FilePath,
- header = Header
- } = St,
- couch_log:debug("Compaction process spawned for db \"~s\"", [DbName]),
-
- couch_db_engine:trigger_on_compact(DbName),
-
- {ok, NewSt, DName, DFd, MFd, Retry} =
- open_compaction_files(Header, FilePath, Options),
- erlang:monitor(process, MFd),
-
- % This is a bit worrisome. init_db/4 will monitor the data fd
- % but it doesn't know about the meta fd. For now I'll maintain
- % that the data fd is the old normal fd and meta fd is special
- % and hope everything works out for the best.
- unlink(DFd),
-
- NewSt1 = copy_purge_info(DbName, St, NewSt, Retry),
- NewSt2 = copy_compact(DbName, St, NewSt1, Retry),
- NewSt3 = sort_meta_data(NewSt2),
- NewSt4 = commit_compaction_data(NewSt3),
- NewSt5 = copy_meta_data(NewSt4),
- {ok, NewSt6} = couch_bt_engine:commit_data(NewSt5),
- ok = couch_bt_engine:decref(NewSt6),
- ok = couch_file:close(MFd),
-
- % Done
- gen_server:cast(Parent, {compact_done, couch_bt_engine, DName}).
-
-
-open_compaction_files(SrcHdr, DbFilePath, Options) ->
- DataFile = DbFilePath ++ ".compact.data",
- MetaFile = DbFilePath ++ ".compact.meta",
- {ok, DataFd, DataHdr} = open_compaction_file(DataFile),
- {ok, MetaFd, MetaHdr} = open_compaction_file(MetaFile),
- DataHdrIsDbHdr = couch_bt_engine_header:is_header(DataHdr),
- case {DataHdr, MetaHdr} of
- {#comp_header{}=A, #comp_header{}=A} ->
- DbHeader = A#comp_header.db_header,
- St0 = couch_bt_engine:init_state(
- DataFile, DataFd, DbHeader, Options),
- St1 = bind_emsort(St0, MetaFd, A#comp_header.meta_state),
- {ok, St1, DataFile, DataFd, MetaFd, St0#st.id_tree};
- _ when DataHdrIsDbHdr ->
- Header = couch_bt_engine_header:from(SrcHdr),
- ok = reset_compaction_file(MetaFd, Header),
- St0 = couch_bt_engine:init_state(
- DataFile, DataFd, DataHdr, Options),
- St1 = bind_emsort(St0, MetaFd, nil),
- {ok, St1, DataFile, DataFd, MetaFd, St0#st.id_tree};
- _ ->
- Header = couch_bt_engine_header:from(SrcHdr),
- ok = reset_compaction_file(DataFd, Header),
- ok = reset_compaction_file(MetaFd, Header),
- St0 = couch_bt_engine:init_state(DataFile, DataFd, Header, Options),
- St1 = bind_emsort(St0, MetaFd, nil),
- {ok, St1, DataFile, DataFd, MetaFd, nil}
- end.
-
-
-copy_purge_info(DbName, OldSt, NewSt, Retry) ->
- MinPurgeSeq = couch_util:with_db(DbName, fun(Db) ->
- couch_db:get_minimum_purge_seq(Db)
- end),
- OldPSTree = OldSt#st.purge_seq_tree,
- StartSeq = couch_bt_engine:get_purge_seq(NewSt) + 1,
- BufferSize = config:get_integer(
- "database_compaction", "doc_buffer_size", 524288),
- CheckpointAfter = config:get(
- "database_compaction", "checkpoint_after", BufferSize * 10),
-
- EnumFun = fun(Info, _Reds, {StAcc0, InfosAcc, InfosSize, CopiedSize}) ->
- NewInfosSize = InfosSize + ?term_size(Info),
- if NewInfosSize >= BufferSize ->
- StAcc1 = copy_purge_infos(
- OldSt, StAcc0, [Info | InfosAcc], MinPurgeSeq, Retry),
- NewCopiedSize = CopiedSize + NewInfosSize,
- if NewCopiedSize >= CheckpointAfter ->
- StAcc2 = commit_compaction_data(StAcc1),
- {ok, {StAcc2, [], 0, 0}};
- true ->
- {ok, {StAcc1, [], 0, NewCopiedSize}}
- end;
- true ->
- NewInfosAcc = [Info | InfosAcc],
- {ok, {StAcc0, NewInfosAcc, NewInfosSize, CopiedSize}}
- end
- end,
-
- InitAcc = {NewSt, [], 0, 0},
- Opts = [{start_key, StartSeq}],
- {ok, _, FinalAcc} = couch_btree:fold(OldPSTree, EnumFun, InitAcc, Opts),
- {NewStAcc, Infos, _, _} = FinalAcc,
- copy_purge_infos(OldSt, NewStAcc, Infos, MinPurgeSeq, Retry).
-
-
-copy_purge_infos(OldSt, NewSt0, Infos, MinPurgeSeq, Retry) ->
- #st{
- id_tree = OldIdTree
- } = OldSt,
-
- % Re-bind our id_tree to the backing btree
- NewIdTreeState = couch_bt_engine_header:id_tree_state(NewSt0#st.header),
- MetaFd = couch_emsort:get_fd(NewSt0#st.id_tree),
- MetaState = couch_emsort:get_state(NewSt0#st.id_tree),
- NewSt1 = bind_id_tree(NewSt0, NewSt0#st.fd, NewIdTreeState),
-
- #st{
- id_tree = NewIdTree0,
- seq_tree = NewSeqTree0,
- purge_tree = NewPurgeTree0,
- purge_seq_tree = NewPurgeSeqTree0
- } = NewSt1,
-
- % Copy over the purge infos
- InfosToAdd = lists:filter(fun({PSeq, _, _, _}) ->
- PSeq > MinPurgeSeq
- end, Infos),
- {ok, NewPurgeTree1} = couch_btree:add(NewPurgeTree0, InfosToAdd),
- {ok, NewPurgeSeqTree1} = couch_btree:add(NewPurgeSeqTree0, InfosToAdd),
-
- NewSt2 = NewSt1#st{
- purge_tree = NewPurgeTree1,
- purge_seq_tree = NewPurgeSeqTree1
- },
-
- % If we're peforming a retry compaction we have to check if
- % any of the referenced docs have been completely purged
- % from the database. Any doc that has been completely purged
- % must then be removed from our partially compacted database.
- NewSt3 = if Retry == nil -> NewSt2; true ->
- AllDocIds = [DocId || {_PurgeSeq, _UUID, DocId, _Revs} <- Infos],
- UniqDocIds = lists:usort(AllDocIds),
- OldIdResults = couch_btree:lookup(OldIdTree, UniqDocIds),
- OldZipped = lists:zip(UniqDocIds, OldIdResults),
-
- % The list of non-existant docs in the database being compacted
- MaybeRemDocIds = [DocId || {DocId, not_found} <- OldZipped],
-
- % Removing anything that exists in the partially compacted database
- NewIdResults = couch_btree:lookup(NewIdTree0, MaybeRemDocIds),
- ToRemove = [Doc || {ok, Doc} <- NewIdResults, Doc /= {ok, not_found}],
-
- {RemIds, RemSeqs} = lists:unzip(lists:map(fun(FDI) ->
- #full_doc_info{
- id = Id,
- update_seq = Seq
- } = FDI,
- {Id, Seq}
- end, ToRemove)),
-
- {ok, NewIdTree1} = couch_btree:add_remove(NewIdTree0, [], RemIds),
- {ok, NewSeqTree1} = couch_btree:add_remove(NewSeqTree0, [], RemSeqs),
-
- NewSt2#st{
- id_tree = NewIdTree1,
- seq_tree = NewSeqTree1
- }
- end,
-
- Header = couch_bt_engine:update_header(NewSt3, NewSt3#st.header),
- NewSt4 = NewSt3#st{
- header = Header
- },
- bind_emsort(NewSt4, MetaFd, MetaState).
-
-
-copy_compact(DbName, St, NewSt0, Retry) ->
- Compression = couch_compress:get_compression_method(),
- NewSt = NewSt0#st{compression = Compression},
- NewUpdateSeq = couch_bt_engine:get_update_seq(NewSt0),
- TotalChanges = couch_bt_engine:count_changes_since(St, NewUpdateSeq),
- BufferSize = list_to_integer(
- config:get("database_compaction", "doc_buffer_size", "524288")),
- CheckpointAfter = couch_util:to_integer(
- config:get("database_compaction", "checkpoint_after",
- BufferSize * 10)),
-
- EnumBySeqFun =
- fun(DocInfo, _Offset,
- {AccNewSt, AccUncopied, AccUncopiedSize, AccCopiedSize}) ->
-
- Seq = case DocInfo of
- #full_doc_info{} -> DocInfo#full_doc_info.update_seq;
- #doc_info{} -> DocInfo#doc_info.high_seq
- end,
-
- AccUncopiedSize2 = AccUncopiedSize + ?term_size(DocInfo),
- if AccUncopiedSize2 >= BufferSize ->
- NewSt2 = copy_docs(
- St, AccNewSt, lists:reverse([DocInfo | AccUncopied]), Retry),
- AccCopiedSize2 = AccCopiedSize + AccUncopiedSize2,
- if AccCopiedSize2 >= CheckpointAfter ->
- {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq),
- CommNewSt3 = commit_compaction_data(NewSt3),
- {ok, {CommNewSt3, [], 0, 0}};
- true ->
- {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq),
- {ok, {NewSt3, [], 0, AccCopiedSize2}}
- end;
- true ->
- {ok, {AccNewSt, [DocInfo | AccUncopied], AccUncopiedSize2,
- AccCopiedSize}}
- end
- end,
-
- TaskProps0 = [
- {type, database_compaction},
- {database, DbName},
- {progress, 0},
- {changes_done, 0},
- {total_changes, TotalChanges}
- ],
- case (Retry =/= nil) and couch_task_status:is_task_added() of
- true ->
- couch_task_status:update([
- {retry, true},
- {progress, 0},
- {changes_done, 0},
- {total_changes, TotalChanges}
- ]);
- false ->
- couch_task_status:add_task(TaskProps0),
- couch_task_status:set_update_frequency(500)
- end,
-
- {ok, _, {NewSt2, Uncopied, _, _}} =
- couch_btree:foldl(St#st.seq_tree, EnumBySeqFun,
- {NewSt, [], 0, 0},
- [{start_key, NewUpdateSeq + 1}]),
-
- NewSt3 = copy_docs(St, NewSt2, lists:reverse(Uncopied), Retry),
-
- % Copy the security information over
- SecProps = couch_bt_engine:get_security(St),
- {ok, NewSt4} = couch_bt_engine:copy_security(NewSt3, SecProps),
-
- % Copy general properties over
- Props = couch_bt_engine:get_props(St),
- {ok, NewSt5} = couch_bt_engine:set_props(NewSt4, Props),
-
- FinalUpdateSeq = couch_bt_engine:get_update_seq(St),
- {ok, NewSt6} = couch_bt_engine:set_update_seq(NewSt5, FinalUpdateSeq),
- commit_compaction_data(NewSt6).
-
-
-copy_docs(St, #st{} = NewSt, MixedInfos, Retry) ->
- DocInfoIds = [Id || #doc_info{id=Id} <- MixedInfos],
- LookupResults = couch_btree:lookup(St#st.id_tree, DocInfoIds),
- % COUCHDB-968, make sure we prune duplicates during compaction
- NewInfos0 = lists:usort(fun(#full_doc_info{id=A}, #full_doc_info{id=B}) ->
- A =< B
- end, merge_lookups(MixedInfos, LookupResults)),
-
- NewInfos1 = lists:map(fun(Info) ->
- {NewRevTree, FinalAcc} = couch_key_tree:mapfold(fun
- ({RevPos, RevId}, #leaf{ptr=Sp}=Leaf, leaf, SizesAcc) ->
- {Body, AttInfos} = copy_doc_attachments(St, Sp, NewSt),
- #size_info{external = OldExternalSize} = Leaf#leaf.sizes,
- ExternalSize = case OldExternalSize of
- 0 when is_binary(Body) ->
- couch_compress:uncompressed_size(Body);
- 0 ->
- couch_ejson_size:encoded_size(Body);
- N -> N
- end,
- Doc0 = #doc{
- id = Info#full_doc_info.id,
- revs = {RevPos, [RevId]},
- deleted = Leaf#leaf.deleted,
- body = Body,
- atts = AttInfos
- },
- Doc1 = couch_bt_engine:serialize_doc(NewSt, Doc0),
- {ok, Doc2, ActiveSize} =
- couch_bt_engine:write_doc_body(NewSt, Doc1),
- AttSizes = [{element(3,A), element(4,A)} || A <- AttInfos],
- NewLeaf = Leaf#leaf{
- ptr = Doc2#doc.body,
- sizes = #size_info{
- active = ActiveSize,
- external = ExternalSize
- },
- atts = AttSizes
- },
- {NewLeaf, couch_db_updater:add_sizes(leaf, NewLeaf, SizesAcc)};
- (_Rev, _Leaf, branch, SizesAcc) ->
- {?REV_MISSING, SizesAcc}
- end, {0, 0, []}, Info#full_doc_info.rev_tree),
- {FinalAS, FinalES, FinalAtts} = FinalAcc,
- TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts),
- NewActiveSize = FinalAS + TotalAttSize,
- NewExternalSize = FinalES + TotalAttSize,
- Info#full_doc_info{
- rev_tree = NewRevTree,
- sizes = #size_info{
- active = NewActiveSize,
- external = NewExternalSize
- }
- }
- end, NewInfos0),
-
- Limit = couch_bt_engine:get_revs_limit(St),
- NewInfos = lists:map(fun(FDI) ->
- FDI#full_doc_info{
- rev_tree = couch_key_tree:stem(FDI#full_doc_info.rev_tree, Limit)
- }
- end, NewInfos1),
-
- RemoveSeqs =
- case Retry of
- nil ->
- [];
- OldDocIdTree ->
- % Compaction is being rerun to catch up to writes during the
- % first pass. This means we may have docs that already exist
- % in the seq_tree in the .data file. Here we lookup any old
- % update_seqs so that they can be removed.
- Ids = [Id || #full_doc_info{id=Id} <- NewInfos],
- Existing = couch_btree:lookup(OldDocIdTree, Ids),
- [Seq || {ok, #full_doc_info{update_seq=Seq}} <- Existing]
- end,
-
- {ok, SeqTree} = couch_btree:add_remove(
- NewSt#st.seq_tree, NewInfos, RemoveSeqs),
-
- FDIKVs = lists:map(fun(#full_doc_info{id=Id, update_seq=Seq}=FDI) ->
- {{Id, Seq}, FDI}
- end, NewInfos),
- {ok, IdEms} = couch_emsort:add(NewSt#st.id_tree, FDIKVs),
- update_compact_task(length(NewInfos)),
- NewSt#st{id_tree=IdEms, seq_tree=SeqTree}.
-
-
-copy_doc_attachments(#st{} = SrcSt, SrcSp, DstSt) ->
- {ok, {BodyData, BinInfos0}} = couch_file:pread_term(SrcSt#st.fd, SrcSp),
- BinInfos = case BinInfos0 of
- _ when is_binary(BinInfos0) ->
- couch_compress:decompress(BinInfos0);
- _ when is_list(BinInfos0) ->
- % pre 1.2 file format
- BinInfos0
- end,
- % copy the bin values
- NewBinInfos = lists:map(
- fun({Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}) ->
- % 010 UPGRADE CODE
- {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp),
- {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []),
- ok = couch_stream:copy(SrcStream, DstStream),
- {NewStream, AttLen, AttLen, ActualMd5, _IdentityMd5} =
- couch_stream:close(DstStream),
- {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
- couch_util:check_md5(ExpectedMd5, ActualMd5),
- {Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity};
- ({Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc1}) ->
- {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp),
- {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []),
- ok = couch_stream:copy(SrcStream, DstStream),
- {NewStream, AttLen, _, ActualMd5, _IdentityMd5} =
- couch_stream:close(DstStream),
- {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
- couch_util:check_md5(ExpectedMd5, ActualMd5),
- Enc = case Enc1 of
- true ->
- % 0110 UPGRADE CODE
- gzip;
- false ->
- % 0110 UPGRADE CODE
- identity;
- _ ->
- Enc1
- end,
- {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc}
- end, BinInfos),
- {BodyData, NewBinInfos}.
-
-
-sort_meta_data(St0) ->
- {ok, Ems} = couch_emsort:merge(St0#st.id_tree),
- St0#st{id_tree=Ems}.
-
-
-copy_meta_data(#st{} = St) ->
- #st{
- fd = Fd,
- header = Header,
- id_tree = Src
- } = St,
- DstState = couch_bt_engine_header:id_tree_state(Header),
- {ok, IdTree0} = couch_btree:open(DstState, Fd, [
- {split, fun couch_bt_engine:id_tree_split/1},
- {join, fun couch_bt_engine:id_tree_join/2},
- {reduce, fun couch_bt_engine:id_tree_reduce/2}
- ]),
- {ok, Iter} = couch_emsort:iter(Src),
- Acc0 = #merge_st{
- id_tree=IdTree0,
- seq_tree=St#st.seq_tree,
- rem_seqs=[],
- infos=[]
- },
- Acc = merge_docids(Iter, Acc0),
- {ok, IdTree} = couch_btree:add(Acc#merge_st.id_tree, Acc#merge_st.infos),
- {ok, SeqTree} = couch_btree:add_remove(
- Acc#merge_st.seq_tree, [], Acc#merge_st.rem_seqs
- ),
- St#st{id_tree=IdTree, seq_tree=SeqTree}.
-
-
-open_compaction_file(FilePath) ->
- case couch_file:open(FilePath, [nologifmissing]) of
- {ok, Fd} ->
- case couch_file:read_header(Fd) of
- {ok, Header} -> {ok, Fd, Header};
- no_valid_header -> {ok, Fd, nil}
- end;
- {error, enoent} ->
- {ok, Fd} = couch_file:open(FilePath, [create]),
- {ok, Fd, nil}
- end.
-
-
-reset_compaction_file(Fd, Header) ->
- ok = couch_file:truncate(Fd, 0),
- ok = couch_file:write_header(Fd, Header).
-
-
-commit_compaction_data(#st{}=St) ->
- % Compaction needs to write headers to both the data file
- % and the meta file so if we need to restart we can pick
- % back up from where we left off.
- commit_compaction_data(St, couch_emsort:get_fd(St#st.id_tree)),
- commit_compaction_data(St, St#st.fd).
-
-
-commit_compaction_data(#st{header = OldHeader} = St0, Fd) ->
- DataState = couch_bt_engine_header:id_tree_state(OldHeader),
- MetaFd = couch_emsort:get_fd(St0#st.id_tree),
- MetaState = couch_emsort:get_state(St0#st.id_tree),
- St1 = bind_id_tree(St0, St0#st.fd, DataState),
- Header = couch_bt_engine:update_header(St1, St1#st.header),
- CompHeader = #comp_header{
- db_header = Header,
- meta_state = MetaState
- },
- ok = couch_file:sync(Fd),
- ok = couch_file:write_header(Fd, CompHeader),
- St2 = St1#st{
- header = Header
- },
- bind_emsort(St2, MetaFd, MetaState).
-
-
-bind_emsort(St, Fd, nil) ->
- {ok, Ems} = couch_emsort:open(Fd),
- St#st{id_tree=Ems};
-bind_emsort(St, Fd, State) ->
- {ok, Ems} = couch_emsort:open(Fd, [{root, State}]),
- St#st{id_tree=Ems}.
-
-
-bind_id_tree(St, Fd, State) ->
- {ok, IdBtree} = couch_btree:open(State, Fd, [
- {split, fun couch_bt_engine:id_tree_split/1},
- {join, fun couch_bt_engine:id_tree_join/2},
- {reduce, fun couch_bt_engine:id_tree_reduce/2}
- ]),
- St#st{id_tree=IdBtree}.
-
-
-merge_lookups(Infos, []) ->
- Infos;
-merge_lookups([], _) ->
- [];
-merge_lookups([#doc_info{}=DI | RestInfos], [{ok, FDI} | RestLookups]) ->
- % Assert we've matched our lookups
- if DI#doc_info.id == FDI#full_doc_info.id -> ok; true ->
- erlang:error({mismatched_doc_infos, DI#doc_info.id})
- end,
- [FDI | merge_lookups(RestInfos, RestLookups)];
-merge_lookups([FDI | RestInfos], Lookups) ->
- [FDI | merge_lookups(RestInfos, Lookups)].
-
-
-merge_docids(Iter, #merge_st{infos=Infos}=Acc) when length(Infos) > 1000 ->
- #merge_st{
- id_tree=IdTree0,
- seq_tree=SeqTree0,
- rem_seqs=RemSeqs
- } = Acc,
- {ok, IdTree1} = couch_btree:add(IdTree0, Infos),
- {ok, SeqTree1} = couch_btree:add_remove(SeqTree0, [], RemSeqs),
- Acc1 = Acc#merge_st{
- id_tree=IdTree1,
- seq_tree=SeqTree1,
- rem_seqs=[],
- infos=[]
- },
- merge_docids(Iter, Acc1);
-merge_docids(Iter, #merge_st{curr=Curr}=Acc) ->
- case next_info(Iter, Curr, []) of
- {NextIter, NewCurr, FDI, Seqs} ->
- Acc1 = Acc#merge_st{
- infos = [FDI | Acc#merge_st.infos],
- rem_seqs = Seqs ++ Acc#merge_st.rem_seqs,
- curr = NewCurr
- },
- merge_docids(NextIter, Acc1);
- {finished, FDI, Seqs} ->
- Acc#merge_st{
- infos = [FDI | Acc#merge_st.infos],
- rem_seqs = Seqs ++ Acc#merge_st.rem_seqs,
- curr = undefined
- };
- empty ->
- Acc
- end.
-
-
-next_info(Iter, undefined, []) ->
- case couch_emsort:next(Iter) of
- {ok, {{Id, Seq}, FDI}, NextIter} ->
- next_info(NextIter, {Id, Seq, FDI}, []);
- finished ->
- empty
- end;
-next_info(Iter, {Id, Seq, FDI}, Seqs) ->
- case couch_emsort:next(Iter) of
- {ok, {{Id, NSeq}, NFDI}, NextIter} ->
- next_info(NextIter, {Id, NSeq, NFDI}, [Seq | Seqs]);
- {ok, {{NId, NSeq}, NFDI}, NextIter} ->
- {NextIter, {NId, NSeq, NFDI}, FDI, Seqs};
- finished ->
- {finished, FDI, Seqs}
- end.
-
-
-update_compact_task(NumChanges) ->
- [Changes, Total] = couch_task_status:get([changes_done, total_changes]),
- Changes2 = Changes + NumChanges,
- Progress = case Total of
- 0 ->
- 0;
- _ ->
- (Changes2 * 100) div Total
- end,
- couch_task_status:update([{changes_done, Changes2}, {progress, Progress}]).
-
diff --git a/src/couch/src/couch_bt_engine_header.erl b/src/couch/src/couch_bt_engine_header.erl
deleted file mode 100644
index 3f9f51821..000000000
--- a/src/couch/src/couch_bt_engine_header.erl
+++ /dev/null
@@ -1,451 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_header).
-
-
--export([
- new/0,
- from/1,
- is_header/1,
- upgrade/1,
- get/2,
- get/3,
- set/2,
- set/3
-]).
-
--export([
- disk_version/1,
- latest_disk_version/0,
- update_seq/1,
- id_tree_state/1,
- seq_tree_state/1,
- latest/1,
- local_tree_state/1,
- purge_tree_state/1,
- purge_seq_tree_state/1,
- purge_infos_limit/1,
- security_ptr/1,
- revs_limit/1,
- uuid/1,
- epochs/1,
- compacted_seq/1
-]).
-
-
-% This should be updated anytime a header change happens that requires more
-% than filling in new defaults.
-%
-% As long the changes are limited to new header fields (with inline
-% defaults) added to the end of the record, then there is no need to increment
-% the disk revision number.
-%
-% if the disk revision is incremented, then new upgrade logic will need to be
-% added to couch_db_updater:init_db.
-
--define(LATEST_DISK_VERSION, 8).
-
--record(db_header, {
- disk_version = ?LATEST_DISK_VERSION,
- update_seq = 0,
- unused = 0,
- id_tree_state = nil,
- seq_tree_state = nil,
- local_tree_state = nil,
- purge_tree_state = nil,
- purge_seq_tree_state = nil, %purge tree: purge_seq -> uuid
- security_ptr = nil,
- revs_limit = 1000,
- uuid,
- epochs,
- compacted_seq,
- purge_infos_limit = 1000,
- props_ptr
-}).
-
-
--define(PARTITION_DISK_VERSION, 8).
-
-
-new() ->
- #db_header{
- uuid = couch_uuids:random(),
- epochs = [{node(), 0}]
- }.
-
-
-from(Header0) ->
- Header = upgrade(Header0),
- #db_header{
- uuid = Header#db_header.uuid,
- epochs = Header#db_header.epochs,
- compacted_seq = Header#db_header.compacted_seq
- }.
-
-
-is_header(Header) ->
- try
- upgrade(Header),
- true
- catch _:_ ->
- false
- end.
-
-
-upgrade(Header) ->
- Funs = [
- fun upgrade_tuple/1,
- fun upgrade_disk_version/1,
- fun upgrade_uuid/1,
- fun upgrade_epochs/1,
- fun upgrade_compacted_seq/1
- ],
- lists:foldl(fun(F, HdrAcc) ->
- F(HdrAcc)
- end, Header, Funs).
-
-
-get(Header, Key) ->
- ?MODULE:get(Header, Key, undefined).
-
-
-get(Header, Key, Default) ->
- get_field(Header, Key, Default).
-
-
-set(Header, Key, Value) ->
- ?MODULE:set(Header, [{Key, Value}]).
-
-
-set(Header0, Fields) ->
- % A subtlety here is that if a database was open during
- % the release upgrade that updates to uuids and epochs then
- % this dynamic upgrade also assigns a uuid and epoch.
- Header = upgrade(Header0),
- lists:foldl(fun({Field, Value}, HdrAcc) ->
- set_field(HdrAcc, Field, Value)
- end, Header, Fields).
-
-
-disk_version(Header) ->
- get_field(Header, disk_version).
-
-
-latest_disk_version() ->
- ?LATEST_DISK_VERSION.
-
-
-update_seq(Header) ->
- get_field(Header, update_seq).
-
-
-id_tree_state(Header) ->
- get_field(Header, id_tree_state).
-
-
-seq_tree_state(Header) ->
- get_field(Header, seq_tree_state).
-
-
-local_tree_state(Header) ->
- get_field(Header, local_tree_state).
-
-
-purge_tree_state(Header) ->
- get_field(Header, purge_tree_state).
-
-
-purge_seq_tree_state(Header) ->
- get_field(Header, purge_seq_tree_state).
-
-
-security_ptr(Header) ->
- get_field(Header, security_ptr).
-
-
-revs_limit(Header) ->
- get_field(Header, revs_limit).
-
-
-uuid(Header) ->
- get_field(Header, uuid).
-
-
-epochs(Header) ->
- get_field(Header, epochs).
-
-
-compacted_seq(Header) ->
- get_field(Header, compacted_seq).
-
-
-purge_infos_limit(Header) ->
- get_field(Header, purge_infos_limit).
-
-
-get_field(Header, Field) ->
- get_field(Header, Field, undefined).
-
-
-get_field(Header, Field, Default) ->
- Idx = index(Field),
- case Idx > tuple_size(Header) of
- true -> Default;
- false -> element(index(Field), Header)
- end.
-
-
-set_field(Header, Field, Value) ->
- setelement(index(Field), Header, Value).
-
-
-index(Field) ->
- couch_util:get_value(Field, indexes()).
-
-
-indexes() ->
- Fields = record_info(fields, db_header),
- Indexes = lists:seq(2, record_info(size, db_header)),
- lists:zip(Fields, Indexes).
-
-
-upgrade_tuple(Old) when is_record(Old, db_header) ->
- Old;
-upgrade_tuple(Old) when is_tuple(Old) ->
- NewSize = record_info(size, db_header),
- if tuple_size(Old) < NewSize -> ok; true ->
- erlang:error({invalid_header_size, Old})
- end,
- {_, New} = lists:foldl(fun(Val, {Idx, Hdr}) ->
- {Idx+1, setelement(Idx, Hdr, Val)}
- end, {1, #db_header{}}, tuple_to_list(Old)),
- if is_record(New, db_header) -> ok; true ->
- erlang:error({invalid_header_extension, {Old, New}})
- end,
- New.
-
--define(OLD_DISK_VERSION_ERROR,
- "Database files from versions smaller than 0.10.0 are no longer supported").
-
-upgrade_disk_version(#db_header{}=Header) ->
- case element(2, Header) of
- 1 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- 2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- 3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- 4 -> Header#db_header{security_ptr = nil}; % [0.10 - 0.11)
- 5 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre 1.2
- 6 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre clustered purge
- 7 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre partitioned dbs
- ?LATEST_DISK_VERSION -> Header;
- _ ->
- Reason = "Incorrect disk header version",
- throw({database_disk_version_error, Reason})
- end.
-
-
-upgrade_uuid(#db_header{}=Header) ->
- case Header#db_header.uuid of
- undefined ->
- % Upgrading this old db file to a newer
- % on disk format that includes a UUID.
- Header#db_header{uuid=couch_uuids:random()};
- _ ->
- Header
- end.
-
-
-upgrade_epochs(#db_header{}=Header) ->
- NewEpochs = case Header#db_header.epochs of
- undefined ->
- % This node is taking over ownership of shard with
- % and old version of couch file. Before epochs there
- % was always an implicit assumption that a file was
- % owned since eternity by the node it was on. This
- % just codifies that assumption.
- [{node(), 0}];
- [{Node, _} | _] = Epochs0 when Node == node() ->
- % Current node is the current owner of this db
- Epochs0;
- Epochs1 ->
- % This node is taking over ownership of this db
- % and marking the update sequence where it happened.
- [{node(), Header#db_header.update_seq} | Epochs1]
- end,
- % Its possible for a node to open a db and claim
- % ownership but never make a write to the db. This
- % removes nodes that claimed ownership but never
- % changed the database.
- DedupedEpochs = remove_dup_epochs(NewEpochs),
- Header#db_header{epochs=DedupedEpochs}.
-
-
-% This is slightly relying on the udpate_seq's being sorted
-% in epochs due to how we only ever push things onto the
-% front. Although if we ever had a case where the update_seq
-% is not monotonically increasing I don't know that we'd
-% want to remove dupes (by calling a sort on the input to this
-% function). So for now we don't sort but are relying on the
-% idea that epochs is always sorted.
-remove_dup_epochs([_]=Epochs) ->
- Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S}]) ->
- % Seqs match, keep the most recent owner
- [{N1, S}];
-remove_dup_epochs([_, _]=Epochs) ->
- % Seqs don't match.
- Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S} | Rest]) ->
- % Seqs match, keep the most recent owner
- remove_dup_epochs([{N1, S} | Rest]);
-remove_dup_epochs([{N1, S1}, {N2, S2} | Rest]) ->
- % Seqs don't match, recurse to check others
- [{N1, S1} | remove_dup_epochs([{N2, S2} | Rest])].
-
-
-upgrade_compacted_seq(#db_header{}=Header) ->
- case Header#db_header.compacted_seq of
- undefined ->
- Header#db_header{compacted_seq=0};
- _ ->
- Header
- end.
-
-latest(?LATEST_DISK_VERSION) ->
- true;
-latest(N) when is_integer(N), N < ?LATEST_DISK_VERSION ->
- false;
-latest(_Else) ->
- undefined.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-mk_header(Vsn) ->
- {
- db_header, % record name
- Vsn, % disk version
- 100, % update_seq
- 0, % unused
- foo, % id_tree_state
- bar, % seq_tree_state
- bam, % local_tree_state
- flam, % was purge_seq - now purge_tree_state
- baz, % was purged_docs - now purge_seq_tree_state
- bang, % security_ptr
- 999 % revs_limit
- }.
-
-
--ifdef(run_broken_tests).
-
-upgrade_v3_test() ->
- Vsn3Header = mk_header(3),
- NewHeader = upgrade_tuple(Vsn3Header),
-
- % Tuple upgrades don't change
- ?assert(is_record(NewHeader, db_header)),
- ?assertEqual(3, disk_version(NewHeader)),
- ?assertEqual(100, update_seq(NewHeader)),
- ?assertEqual(foo, id_tree_state(NewHeader)),
- ?assertEqual(bar, seq_tree_state(NewHeader)),
- ?assertEqual(bam, local_tree_state(NewHeader)),
- ?assertEqual(flam, purge_tree_state(NewHeader)),
- ?assertEqual(baz, purge_seq_tree_state(NewHeader)),
- ?assertEqual(bang, security_ptr(NewHeader)),
- ?assertEqual(999, revs_limit(NewHeader)),
- ?assertEqual(undefined, uuid(NewHeader)),
- ?assertEqual(undefined, epochs(NewHeader)),
-
- % Security ptr isn't changed until upgrade_disk_version/1
- NewNewHeader = upgrade_disk_version(NewHeader),
- ?assert(is_record(NewNewHeader, db_header)),
- ?assertEqual(nil, security_ptr(NewNewHeader)),
-
- % Assert upgrade works on really old headers
- NewestHeader = upgrade(Vsn3Header),
- ?assertMatch(<<_:32/binary>>, uuid(NewestHeader)),
- ?assertEqual([{node(), 0}], epochs(NewestHeader)).
-
--endif.
-
-upgrade_v5_to_v8_test() ->
- Vsn5Header = mk_header(5),
- NewHeader = upgrade_disk_version(upgrade_tuple(Vsn5Header)),
-
- ?assert(is_record(NewHeader, db_header)),
- ?assertEqual(8, disk_version(NewHeader)),
-
- % Security ptr isn't changed for v5 headers
- ?assertEqual(bang, security_ptr(NewHeader)).
-
-
-upgrade_uuid_test() ->
- Vsn5Header = mk_header(5),
-
- % Upgraded headers get a new UUID
- NewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(Vsn5Header))),
- ?assertMatch(<<_:32/binary>>, uuid(NewHeader)),
-
- % Headers with a UUID don't have their UUID changed
- NewNewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(NewHeader))),
- ?assertEqual(uuid(NewHeader), uuid(NewNewHeader)),
-
- % Derived empty headers maintain the same UUID
- ResetHeader = from(NewNewHeader),
- ?assertEqual(uuid(NewHeader), uuid(ResetHeader)).
-
-
-upgrade_epochs_test() ->
- Vsn5Header = mk_header(5),
-
- % Upgraded headers get a default epochs set
- NewHeader = upgrade(Vsn5Header),
- ?assertEqual([{node(), 0}], epochs(NewHeader)),
-
- % Fake an old entry in epochs
- FakeFields = [
- {update_seq, 20},
- {epochs, [{'someothernode@someotherhost', 0}]}
- ],
- NotOwnedHeader = set(NewHeader, FakeFields),
-
- OwnedEpochs = [
- {node(), 20},
- {'someothernode@someotherhost', 0}
- ],
-
- % Upgrading a header not owned by the local node updates
- % the epochs appropriately.
- NowOwnedHeader = upgrade(NotOwnedHeader),
- ?assertEqual(OwnedEpochs, epochs(NowOwnedHeader)),
-
- % Headers with epochs stay the same after upgrades
- NewNewHeader = upgrade(NowOwnedHeader),
- ?assertEqual(OwnedEpochs, epochs(NewNewHeader)),
-
- % Getting a reset header maintains the epoch data
- ResetHeader = from(NewNewHeader),
- ?assertEqual(OwnedEpochs, epochs(ResetHeader)).
-
-
-get_uuid_from_old_header_test() ->
- Vsn5Header = mk_header(5),
- ?assertEqual(undefined, uuid(Vsn5Header)).
-
-
-get_epochs_from_old_header_test() ->
- Vsn5Header = mk_header(5),
- ?assertEqual(undefined, epochs(Vsn5Header)).
-
-
--endif.
diff --git a/src/couch/src/couch_bt_engine_stream.erl b/src/couch/src/couch_bt_engine_stream.erl
deleted file mode 100644
index 431894a50..000000000
--- a/src/couch/src/couch_bt_engine_stream.erl
+++ /dev/null
@@ -1,70 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_stream).
-
--export([
- foldl/3,
- seek/2,
- write/2,
- finalize/1,
- to_disk_term/1
-]).
-
-
-foldl({_Fd, []}, _Fun, Acc) ->
- Acc;
-
-foldl({Fd, [{Pos, _} | Rest]}, Fun, Acc) ->
- foldl({Fd, [Pos | Rest]}, Fun, Acc);
-
-foldl({Fd, [Bin | Rest]}, Fun, Acc) when is_binary(Bin) ->
- % We're processing the first bit of data
- % after we did a seek for a range fold.
- foldl({Fd, Rest}, Fun, Fun(Bin, Acc));
-
-foldl({Fd, [Pos | Rest]}, Fun, Acc) when is_integer(Pos) ->
- {ok, Bin} = couch_file:pread_binary(Fd, Pos),
- foldl({Fd, Rest}, Fun, Fun(Bin, Acc)).
-
-
-seek({Fd, [{Pos, Length} | Rest]}, Offset) ->
- case Length =< Offset of
- true ->
- seek({Fd, Rest}, Offset - Length);
- false ->
- seek({Fd, [Pos | Rest]}, Offset)
- end;
-
-seek({Fd, [Pos | Rest]}, Offset) when is_integer(Pos) ->
- {ok, Bin} = couch_file:pread_binary(Fd, Pos),
- case iolist_size(Bin) =< Offset of
- true ->
- seek({Fd, Rest}, Offset - size(Bin));
- false ->
- <<_:Offset/binary, Tail/binary>> = Bin,
- {ok, {Fd, [Tail | Rest]}}
- end.
-
-
-write({Fd, Written}, Data) when is_pid(Fd) ->
- {ok, Pos, _} = couch_file:append_binary(Fd, Data),
- {ok, {Fd, [{Pos, iolist_size(Data)} | Written]}}.
-
-
-finalize({Fd, Written}) ->
- {ok, {Fd, lists:reverse(Written)}}.
-
-
-to_disk_term({_Fd, Written}) ->
- {ok, Written}.
-
diff --git a/src/couch/src/couch_btree.erl b/src/couch/src/couch_btree.erl
deleted file mode 100644
index ea0cf69e9..000000000
--- a/src/couch/src/couch_btree.erl
+++ /dev/null
@@ -1,855 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_btree).
-
--export([open/2, open/3, query_modify/4, add/2, add_remove/3]).
--export([fold/4, full_reduce/1, final_reduce/2, size/1, foldl/3, foldl/4]).
--export([fold_reduce/4, lookup/2, get_state/1, set_options/2]).
--export([extract/2, assemble/3, less/3]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(FILL_RATIO, 0.5).
-
-extract(#btree{extract_kv=undefined}, Value) ->
- Value;
-extract(#btree{extract_kv=Extract}, Value) ->
- Extract(Value).
-
-assemble(#btree{assemble_kv=undefined}, Key, Value) ->
- {Key, Value};
-assemble(#btree{assemble_kv=Assemble}, Key, Value) ->
- Assemble(Key, Value).
-
-less(#btree{less=undefined}, A, B) ->
- A < B;
-less(#btree{less=Less}, A, B) ->
- Less(A, B).
-
-% pass in 'nil' for State if a new Btree.
-open(State, Fd) ->
- {ok, #btree{root=State, fd=Fd}}.
-
-set_options(Bt, []) ->
- Bt;
-set_options(Bt, [{split, Extract}|Rest]) ->
- set_options(Bt#btree{extract_kv=Extract}, Rest);
-set_options(Bt, [{join, Assemble}|Rest]) ->
- set_options(Bt#btree{assemble_kv=Assemble}, Rest);
-set_options(Bt, [{less, Less}|Rest]) ->
- set_options(Bt#btree{less=Less}, Rest);
-set_options(Bt, [{reduce, Reduce}|Rest]) ->
- set_options(Bt#btree{reduce=Reduce}, Rest);
-set_options(Bt, [{compression, Comp}|Rest]) ->
- set_options(Bt#btree{compression=Comp}, Rest).
-
-open(State, Fd, Options) ->
- {ok, set_options(#btree{root=State, fd=Fd}, Options)}.
-
-get_state(#btree{root=Root}) ->
- Root.
-
-final_reduce(#btree{reduce=Reduce}, Val) ->
- final_reduce(Reduce, Val);
-final_reduce(Reduce, {[], []}) ->
- Reduce(reduce, []);
-final_reduce(_Bt, {[], [Red]}) ->
- Red;
-final_reduce(Reduce, {[], Reductions}) ->
- Reduce(rereduce, Reductions);
-final_reduce(Reduce, {KVs, Reductions}) ->
- Red = Reduce(reduce, KVs),
- final_reduce(Reduce, {[], [Red | Reductions]}).
-
-fold_reduce(#btree{root=Root}=Bt, Fun, Acc, Options) ->
- Dir = couch_util:get_value(dir, Options, fwd),
- StartKey = couch_util:get_value(start_key, Options),
- InEndRangeFun = make_key_in_end_range_function(Bt, Dir, Options),
- KeyGroupFun = get_group_fun(Bt, Options),
- try
- {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
- reduce_stream_node(Bt, Dir, Root, StartKey, InEndRangeFun, undefined, [], [],
- KeyGroupFun, Fun, Acc),
- if GroupedKey2 == undefined ->
- {ok, Acc2};
- true ->
- case Fun(GroupedKey2, {GroupedKVsAcc2, GroupedRedsAcc2}, Acc2) of
- {ok, Acc3} -> {ok, Acc3};
- {stop, Acc3} -> {ok, Acc3}
- end
- end
- catch
- throw:{stop, AccDone} -> {ok, AccDone}
- end.
-
-full_reduce(#btree{root=nil,reduce=Reduce}) ->
- {ok, Reduce(reduce, [])};
-full_reduce(#btree{root=Root}) ->
- {ok, element(2, Root)}.
-
-size(#btree{root = nil}) ->
- 0;
-size(#btree{root = {_P, _Red}}) ->
- % pre 1.2 format
- nil;
-size(#btree{root = {_P, _Red, Size}}) ->
- Size.
-
-get_group_fun(Bt, Options) ->
- case couch_util:get_value(key_group_level, Options) of
- exact ->
- make_group_fun(Bt, exact);
- 0 ->
- fun(_, _) -> true end;
- N when is_integer(N), N > 0 ->
- make_group_fun(Bt, N);
- undefined ->
- couch_util:get_value(key_group_fun, Options, fun(_,_) -> true end)
- end.
-
-make_group_fun(Bt, exact) ->
- fun({Key1, _}, {Key2, _}) ->
- case less(Bt, {Key1, nil}, {Key2, nil}) of
- false ->
- case less(Bt, {Key2, nil}, {Key1, nil}) of
- false ->
- true;
- _ ->
- false
- end;
- _ ->
- false
- end
- end;
-make_group_fun(Bt, GroupLevel) when is_integer(GroupLevel), GroupLevel > 0 ->
- fun
- GF({{p, Partition, Key1}, Val1}, {{p, Partition, Key2}, Val2}) ->
- GF({Key1, Val1}, {Key2, Val2});
- GF({[_|_] = Key1, _}, {[_|_] = Key2, _}) ->
- SL1 = lists:sublist(Key1, GroupLevel),
- SL2 = lists:sublist(Key2, GroupLevel),
- case less(Bt, {SL1, nil}, {SL2, nil}) of
- false ->
- case less(Bt, {SL2, nil}, {SL1, nil}) of
- false ->
- true;
- _ ->
- false
- end;
- _ ->
- false
- end;
- GF({Key1, _}, {Key2, _}) ->
- case less(Bt, {Key1, nil}, {Key2, nil}) of
- false ->
- case less(Bt, {Key2, nil}, {Key1, nil}) of
- false ->
- true;
- _ ->
- false
- end;
- _ ->
- false
- end
- end.
-
-% wraps a 2 arity function with the proper 3 arity function
-convert_fun_arity(Fun) when is_function(Fun, 2) ->
- fun
- (visit, KV, _Reds, AccIn) -> Fun(KV, AccIn);
- (traverse, _K, _Red, AccIn) -> {ok, AccIn}
- end;
-convert_fun_arity(Fun) when is_function(Fun, 3) ->
- fun
- (visit, KV, Reds, AccIn) -> Fun(KV, Reds, AccIn);
- (traverse, _K, _Red, AccIn) -> {ok, AccIn}
- end;
-convert_fun_arity(Fun) when is_function(Fun, 4) ->
- Fun. % Already arity 4
-
-make_key_in_end_range_function(Bt, fwd, Options) ->
- case couch_util:get_value(end_key_gt, Options) of
- undefined ->
- case couch_util:get_value(end_key, Options) of
- undefined ->
- fun(_Key) -> true end;
- LastKey ->
- fun(Key) -> not less(Bt, LastKey, Key) end
- end;
- EndKey ->
- fun(Key) -> less(Bt, Key, EndKey) end
- end;
-make_key_in_end_range_function(Bt, rev, Options) ->
- case couch_util:get_value(end_key_gt, Options) of
- undefined ->
- case couch_util:get_value(end_key, Options) of
- undefined ->
- fun(_Key) -> true end;
- LastKey ->
- fun(Key) -> not less(Bt, Key, LastKey) end
- end;
- EndKey ->
- fun(Key) -> less(Bt, EndKey, Key) end
- end.
-
-
-foldl(Bt, Fun, Acc) ->
- fold(Bt, Fun, Acc, []).
-
-foldl(Bt, Fun, Acc, Options) ->
- fold(Bt, Fun, Acc, Options).
-
-
-fold(#btree{root=nil}, _Fun, Acc, _Options) ->
- {ok, {[], []}, Acc};
-fold(#btree{root=Root}=Bt, Fun, Acc, Options) ->
- Dir = couch_util:get_value(dir, Options, fwd),
- InRange = make_key_in_end_range_function(Bt, Dir, Options),
- Result =
- case couch_util:get_value(start_key, Options) of
- undefined ->
- stream_node(Bt, [], Bt#btree.root, InRange, Dir,
- convert_fun_arity(Fun), Acc);
- StartKey ->
- stream_node(Bt, [], Bt#btree.root, StartKey, InRange, Dir,
- convert_fun_arity(Fun), Acc)
- end,
- case Result of
- {ok, Acc2}->
- FullReduction = element(2, Root),
- {ok, {[], [FullReduction]}, Acc2};
- {stop, LastReduction, Acc2} ->
- {ok, LastReduction, Acc2}
- end.
-
-add(Bt, InsertKeyValues) ->
- add_remove(Bt, InsertKeyValues, []).
-
-add_remove(Bt, InsertKeyValues, RemoveKeys) ->
- {ok, [], Bt2} = query_modify(Bt, [], InsertKeyValues, RemoveKeys),
- {ok, Bt2}.
-
-query_modify(Bt, LookupKeys, InsertValues, RemoveKeys) ->
- #btree{root=Root} = Bt,
- InsertActions = lists:map(
- fun(KeyValue) ->
- {Key, Value} = extract(Bt, KeyValue),
- {insert, Key, Value}
- end, InsertValues),
- RemoveActions = [{remove, Key, nil} || Key <- RemoveKeys],
- FetchActions = [{fetch, Key, nil} || Key <- LookupKeys],
- SortFun =
- fun({OpA, A, _}, {OpB, B, _}) ->
- case A == B of
- % A and B are equal, sort by op.
- true -> op_order(OpA) < op_order(OpB);
- false ->
- less(Bt, A, B)
- end
- end,
- Actions = lists:sort(SortFun, lists:append([InsertActions, RemoveActions, FetchActions])),
- {ok, KeyPointers, QueryResults} = modify_node(Bt, Root, Actions, []),
- {ok, NewRoot} = complete_root(Bt, KeyPointers),
- {ok, QueryResults, Bt#btree{root=NewRoot}}.
-
-% for ordering different operations with the same key.
-% fetch < remove < insert
-op_order(fetch) -> 1;
-op_order(remove) -> 2;
-op_order(insert) -> 3.
-
-lookup(#btree{root=Root, less=Less}=Bt, Keys) ->
- SortedKeys = case Less of
- undefined -> lists:sort(Keys);
- _ -> lists:sort(Less, Keys)
- end,
- {ok, SortedResults} = lookup(Bt, Root, SortedKeys),
- % We want to return the results in the same order as the keys were input
- % but we may have changed the order when we sorted. So we need to put the
- % order back into the results.
- couch_util:reorder_results(Keys, SortedResults).
-
-lookup(_Bt, nil, Keys) ->
- {ok, [{Key, not_found} || Key <- Keys]};
-lookup(Bt, Node, Keys) ->
- Pointer = element(1, Node),
- {NodeType, NodeList} = get_node(Bt, Pointer),
- case NodeType of
- kp_node ->
- lookup_kpnode(Bt, list_to_tuple(NodeList), 1, Keys, []);
- kv_node ->
- lookup_kvnode(Bt, list_to_tuple(NodeList), 1, Keys, [])
- end.
-
-lookup_kpnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
- {ok, lists:reverse(Output)};
-lookup_kpnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
- {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
-lookup_kpnode(Bt, NodeTuple, LowerBound, [FirstLookupKey | _] = LookupKeys, Output) ->
- N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), FirstLookupKey),
- {Key, PointerInfo} = element(N, NodeTuple),
- SplitFun = fun(LookupKey) -> not less(Bt, Key, LookupKey) end,
- case lists:splitwith(SplitFun, LookupKeys) of
- {[], GreaterQueries} ->
- lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, Output);
- {LessEqQueries, GreaterQueries} ->
- {ok, Results} = lookup(Bt, PointerInfo, LessEqQueries),
- lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, lists:reverse(Results, Output))
- end.
-
-
-lookup_kvnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
- {ok, lists:reverse(Output)};
-lookup_kvnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
- % keys not found
- {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
-lookup_kvnode(Bt, NodeTuple, LowerBound, [LookupKey | RestLookupKeys], Output) ->
- N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), LookupKey),
- {Key, Value} = element(N, NodeTuple),
- case less(Bt, LookupKey, Key) of
- true ->
- % LookupKey is less than Key
- lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, not_found} | Output]);
- false ->
- case less(Bt, Key, LookupKey) of
- true ->
- % LookupKey is greater than Key
- lookup_kvnode(Bt, NodeTuple, N+1, RestLookupKeys, [{LookupKey, not_found} | Output]);
- false ->
- % LookupKey is equal to Key
- lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output])
- end
- end.
-
-
-complete_root(_Bt, []) ->
- {ok, nil};
-complete_root(_Bt, [{_Key, PointerInfo}])->
- {ok, PointerInfo};
-complete_root(Bt, KPs) ->
- {ok, ResultKeyPointers} = write_node(Bt, kp_node, KPs),
- complete_root(Bt, ResultKeyPointers).
-
-%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%%
-% It is inaccurate as it does not account for compression when blocks are
-% written. Plus with the "case byte_size(term_to_binary(InList)) of" code
-% it's probably really inefficient.
-
-chunkify(InList) ->
- BaseChunkSize = get_chunk_size(),
- case ?term_size(InList) of
- Size when Size > BaseChunkSize ->
- NumberOfChunksLikely = ((Size div BaseChunkSize) + 1),
- ChunkThreshold = Size div NumberOfChunksLikely,
- chunkify(InList, ChunkThreshold, [], 0, []);
- _Else ->
- [InList]
- end.
-
-chunkify([], _ChunkThreshold, [], 0, OutputChunks) ->
- lists:reverse(OutputChunks);
-chunkify([], _ChunkThreshold, [Item], _OutListSize, [PrevChunk | RestChunks]) ->
- NewPrevChunk = PrevChunk ++ [Item],
- lists:reverse(RestChunks, [NewPrevChunk]);
-chunkify([], _ChunkThreshold, OutList, _OutListSize, OutputChunks) ->
- lists:reverse([lists:reverse(OutList) | OutputChunks]);
-chunkify([InElement | RestInList], ChunkThreshold, OutList, OutListSize, OutputChunks) ->
- case ?term_size(InElement) of
- Size when (Size + OutListSize) > ChunkThreshold andalso OutList /= [] ->
- chunkify(RestInList, ChunkThreshold, [], 0, [lists:reverse([InElement | OutList]) | OutputChunks]);
- Size ->
- chunkify(RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks)
- end.
-
--compile({inline,[get_chunk_size/0]}).
-get_chunk_size() ->
- try
- list_to_integer(config:get("couchdb", "btree_chunk_size", "1279"))
- catch error:badarg ->
- 1279
- end.
-
-modify_node(Bt, RootPointerInfo, Actions, QueryOutput) ->
- {NodeType, NodeList} = case RootPointerInfo of
- nil ->
- {kv_node, []};
- _Tuple ->
- Pointer = element(1, RootPointerInfo),
- get_node(Bt, Pointer)
- end,
- NodeTuple = list_to_tuple(NodeList),
-
- {ok, NewNodeList, QueryOutput2} =
- case NodeType of
- kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput);
- kv_node -> modify_kvnode(Bt, NodeTuple, 1, Actions, [], QueryOutput)
- end,
- case NewNodeList of
- [] -> % no nodes remain
- {ok, [], QueryOutput2};
- NodeList -> % nothing changed
- {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
- {ok, [{LastKey, RootPointerInfo}], QueryOutput2};
- _Else2 ->
- {ok, ResultList} = case RootPointerInfo of
- nil ->
- write_node(Bt, NodeType, NewNodeList);
- _ ->
- {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
- OldNode = {LastKey, RootPointerInfo},
- write_node(Bt, OldNode, NodeType, NodeList, NewNodeList)
- end,
- {ok, ResultList, QueryOutput2}
- end.
-
-reduce_node(#btree{reduce=nil}, _NodeType, _NodeList) ->
- [];
-reduce_node(#btree{reduce=R}, kp_node, NodeList) ->
- R(rereduce, [element(2, Node) || {_K, Node} <- NodeList]);
-reduce_node(#btree{reduce=R}=Bt, kv_node, NodeList) ->
- R(reduce, [assemble(Bt, K, V) || {K, V} <- NodeList]).
-
-reduce_tree_size(kv_node, NodeSize, _KvList) ->
- NodeSize;
-reduce_tree_size(kp_node, NodeSize, []) ->
- NodeSize;
-reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red}} | _]) ->
- % pre 1.2 format
- nil;
-reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red, nil}} | _]) ->
- nil;
-reduce_tree_size(kp_node, NodeSize, [{_K, {_P, _Red, Sz}} | NodeList]) ->
- reduce_tree_size(kp_node, NodeSize + Sz, NodeList).
-
-get_node(#btree{fd = Fd}, NodePos) ->
- {ok, {NodeType, NodeList}} = couch_file:pread_term(Fd, NodePos),
- {NodeType, NodeList}.
-
-write_node(#btree{fd = Fd, compression = Comp} = Bt, NodeType, NodeList) ->
- % split up nodes into smaller sizes
- NodeListList = chunkify(NodeList),
- % now write out each chunk and return the KeyPointer pairs for those nodes
- ResultList = [
- begin
- {ok, Pointer, Size} = couch_file:append_term(
- Fd, {NodeType, ANodeList}, [{compression, Comp}]),
- {LastKey, _} = lists:last(ANodeList),
- SubTreeSize = reduce_tree_size(NodeType, Size, ANodeList),
- {LastKey, {Pointer, reduce_node(Bt, NodeType, ANodeList), SubTreeSize}}
- end
- ||
- ANodeList <- NodeListList
- ],
- {ok, ResultList}.
-
-
-write_node(Bt, _OldNode, NodeType, [], NewList) ->
- write_node(Bt, NodeType, NewList);
-write_node(Bt, _OldNode, NodeType, [_], NewList) ->
- write_node(Bt, NodeType, NewList);
-write_node(Bt, OldNode, NodeType, OldList, NewList) ->
- case can_reuse_old_node(OldList, NewList) of
- {true, Prefix, Suffix} ->
- {ok, PrefixKVs} = case Prefix of
- [] -> {ok, []};
- _ -> write_node(Bt, NodeType, Prefix)
- end,
- {ok, SuffixKVs} = case Suffix of
- [] -> {ok, []};
- _ -> write_node(Bt, NodeType, Suffix)
- end,
- Result = PrefixKVs ++ [OldNode] ++ SuffixKVs,
- {ok, Result};
- false ->
- write_node(Bt, NodeType, NewList)
- end.
-
-can_reuse_old_node(OldList, NewList) ->
- {Prefix, RestNewList} = remove_prefix_kvs(hd(OldList), NewList),
- case old_list_is_prefix(OldList, RestNewList, 0) of
- {true, Size, Suffix} ->
- ReuseThreshold = get_chunk_size() * ?FILL_RATIO,
- if Size < ReuseThreshold -> false; true ->
- {true, Prefix, Suffix}
- end;
- false ->
- false
- end.
-
-remove_prefix_kvs(KV1, [KV2 | Rest]) when KV2 < KV1 ->
- {Prefix, RestNewList} = remove_prefix_kvs(KV1, Rest),
- {[KV2 | Prefix], RestNewList};
-remove_prefix_kvs(_, RestNewList) ->
- {[], RestNewList}.
-
-% No more KV's in the old node so its a prefix
-old_list_is_prefix([], Suffix, Size) ->
- {true, Size, Suffix};
-% Some KV's have been removed from the old node
-old_list_is_prefix(_OldList, [], _Size) ->
- false;
-% KV is equal in both old and new node so continue
-old_list_is_prefix([KV | Rest1], [KV | Rest2], Acc) ->
- old_list_is_prefix(Rest1, Rest2, ?term_size(KV) + Acc);
-% KV mismatch between old and new node so not a prefix
-old_list_is_prefix(_OldList, _NewList, _Acc) ->
- false.
-
-modify_kpnode(Bt, {}, _LowerBound, Actions, [], QueryOutput) ->
- modify_node(Bt, nil, Actions, QueryOutput);
-modify_kpnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
- {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
- tuple_size(NodeTuple), [])), QueryOutput};
-modify_kpnode(Bt, NodeTuple, LowerBound,
- [{_, FirstActionKey, _}|_]=Actions, ResultNode, QueryOutput) ->
- Sz = tuple_size(NodeTuple),
- N = find_first_gteq(Bt, NodeTuple, LowerBound, Sz, FirstActionKey),
- case N =:= Sz of
- true ->
- % perform remaining actions on last node
- {_, PointerInfo} = element(Sz, NodeTuple),
- {ok, ChildKPs, QueryOutput2} =
- modify_node(Bt, PointerInfo, Actions, QueryOutput),
- NodeList = lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
- Sz - 1, ChildKPs)),
- {ok, NodeList, QueryOutput2};
- false ->
- {NodeKey, PointerInfo} = element(N, NodeTuple),
- SplitFun = fun({_ActionType, ActionKey, _ActionValue}) ->
- not less(Bt, NodeKey, ActionKey)
- end,
- {LessEqQueries, GreaterQueries} = lists:splitwith(SplitFun, Actions),
- {ok, ChildKPs, QueryOutput2} =
- modify_node(Bt, PointerInfo, LessEqQueries, QueryOutput),
- ResultNode2 = lists:reverse(ChildKPs, bounded_tuple_to_revlist(NodeTuple,
- LowerBound, N - 1, ResultNode)),
- modify_kpnode(Bt, NodeTuple, N+1, GreaterQueries, ResultNode2, QueryOutput2)
- end.
-
-bounded_tuple_to_revlist(_Tuple, Start, End, Tail) when Start > End ->
- Tail;
-bounded_tuple_to_revlist(Tuple, Start, End, Tail) ->
- bounded_tuple_to_revlist(Tuple, Start+1, End, [element(Start, Tuple)|Tail]).
-
-bounded_tuple_to_list(Tuple, Start, End, Tail) ->
- bounded_tuple_to_list2(Tuple, Start, End, [], Tail).
-
-bounded_tuple_to_list2(_Tuple, Start, End, Acc, Tail) when Start > End ->
- lists:reverse(Acc, Tail);
-bounded_tuple_to_list2(Tuple, Start, End, Acc, Tail) ->
- bounded_tuple_to_list2(Tuple, Start + 1, End, [element(Start, Tuple) | Acc], Tail).
-
-find_first_gteq(_Bt, _Tuple, Start, End, _Key) when Start == End ->
- End;
-find_first_gteq(Bt, Tuple, Start, End, Key) ->
- Mid = Start + ((End - Start) div 2),
- {TupleKey, _} = element(Mid, Tuple),
- case less(Bt, TupleKey, Key) of
- true ->
- find_first_gteq(Bt, Tuple, Mid+1, End, Key);
- false ->
- find_first_gteq(Bt, Tuple, Start, Mid, Key)
- end.
-
-modify_kvnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
- {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, tuple_size(NodeTuple), [])), QueryOutput};
-modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], ResultNode, QueryOutput) when LowerBound > tuple_size(NodeTuple) ->
- case ActionType of
- insert ->
- modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
- remove ->
- % just drop the action
- modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, QueryOutput);
- fetch ->
- % the key/value must not exist in the tree
- modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
- end;
-modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], AccNode, QueryOutput) ->
- N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), ActionKey),
- {Key, Value} = element(N, NodeTuple),
- ResultNode = bounded_tuple_to_revlist(NodeTuple, LowerBound, N - 1, AccNode),
- case less(Bt, ActionKey, Key) of
- true ->
- case ActionType of
- insert ->
- % ActionKey is less than the Key, so insert
- modify_kvnode(Bt, NodeTuple, N, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
- remove ->
- % ActionKey is less than the Key, just drop the action
- modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, QueryOutput);
- fetch ->
- % ActionKey is less than the Key, the key/value must not exist in the tree
- modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
- end;
- false ->
- % ActionKey and Key are maybe equal.
- case less(Bt, Key, ActionKey) of
- false ->
- case ActionType of
- insert ->
- modify_kvnode(Bt, NodeTuple, N+1, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
- remove ->
- modify_kvnode(Bt, NodeTuple, N+1, RestActions, ResultNode, QueryOutput);
- fetch ->
- % ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node
- % since an identical action key can follow it.
- modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{ok, assemble(Bt, Key, Value)} | QueryOutput])
- end;
- true ->
- modify_kvnode(Bt, NodeTuple, N + 1, [{ActionType, ActionKey, ActionValue} | RestActions], [{Key, Value} | ResultNode], QueryOutput)
- end
- end.
-
-
-reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _InEndRangeFun, GroupedKey, GroupedKVsAcc,
- GroupedRedsAcc, _KeyGroupFun, _Fun, Acc) ->
- {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_node(Bt, Dir, Node, KeyStart, InEndRangeFun, GroupedKey, GroupedKVsAcc,
- GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
- P = element(1, Node),
- case get_node(Bt, P) of
- {kp_node, NodeList} ->
- NodeList2 = adjust_dir(Dir, NodeList),
- reduce_stream_kp_node(Bt, Dir, NodeList2, KeyStart, InEndRangeFun, GroupedKey,
- GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc);
- {kv_node, KVs} ->
- KVs2 = adjust_dir(Dir, KVs),
- reduce_stream_kv_node(Bt, Dir, KVs2, KeyStart, InEndRangeFun, GroupedKey,
- GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc)
- end.
-
-reduce_stream_kv_node(Bt, Dir, KVs, KeyStart, InEndRangeFun,
- GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
- KeyGroupFun, Fun, Acc) ->
-
- GTEKeyStartKVs =
- case KeyStart of
- undefined ->
- KVs;
- _ ->
- DropFun = case Dir of
- fwd ->
- fun({Key, _}) -> less(Bt, Key, KeyStart) end;
- rev ->
- fun({Key, _}) -> less(Bt, KeyStart, Key) end
- end,
- lists:dropwhile(DropFun, KVs)
- end,
- KVs2 = lists:takewhile(
- fun({Key, _}) -> InEndRangeFun(Key) end, GTEKeyStartKVs),
- reduce_stream_kv_node2(Bt, KVs2, GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
- KeyGroupFun, Fun, Acc).
-
-
-reduce_stream_kv_node2(_Bt, [], GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
- _KeyGroupFun, _Fun, Acc) ->
- {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_kv_node2(Bt, [{Key, Value}| RestKVs], GroupedKey, GroupedKVsAcc,
- GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
- case GroupedKey of
- undefined ->
- reduce_stream_kv_node2(Bt, RestKVs, Key,
- [assemble(Bt,Key,Value)], [], KeyGroupFun, Fun, Acc);
- _ ->
-
- case KeyGroupFun(GroupedKey, Key) of
- true ->
- reduce_stream_kv_node2(Bt, RestKVs, GroupedKey,
- [assemble(Bt,Key,Value)|GroupedKVsAcc], GroupedRedsAcc, KeyGroupFun,
- Fun, Acc);
- false ->
- case Fun(GroupedKey, {GroupedKVsAcc, GroupedRedsAcc}, Acc) of
- {ok, Acc2} ->
- reduce_stream_kv_node2(Bt, RestKVs, Key, [assemble(Bt,Key,Value)],
- [], KeyGroupFun, Fun, Acc2);
- {stop, Acc2} ->
- throw({stop, Acc2})
- end
- end
- end.
-
-reduce_stream_kp_node(Bt, Dir, NodeList, KeyStart, InEndRangeFun,
- GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
- KeyGroupFun, Fun, Acc) ->
- Nodes =
- case KeyStart of
- undefined ->
- NodeList;
- _ ->
- case Dir of
- fwd ->
- lists:dropwhile(fun({Key, _}) -> less(Bt, Key, KeyStart) end, NodeList);
- rev ->
- RevKPs = lists:reverse(NodeList),
- case lists:splitwith(fun({Key, _}) -> less(Bt, Key, KeyStart) end, RevKPs) of
- {_Before, []} ->
- NodeList;
- {Before, [FirstAfter | _]} ->
- [FirstAfter | lists:reverse(Before)]
- end
- end
- end,
- {InRange, MaybeInRange} = lists:splitwith(
- fun({Key, _}) -> InEndRangeFun(Key) end, Nodes),
- NodesInRange = case MaybeInRange of
- [FirstMaybeInRange | _] when Dir =:= fwd ->
- InRange ++ [FirstMaybeInRange];
- _ ->
- InRange
- end,
- reduce_stream_kp_node2(Bt, Dir, NodesInRange, KeyStart, InEndRangeFun,
- GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc).
-
-
-reduce_stream_kp_node2(Bt, Dir, [{_Key, NodeInfo} | RestNodeList], KeyStart, InEndRangeFun,
- undefined, [], [], KeyGroupFun, Fun, Acc) ->
- {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
- reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, undefined,
- [], [], KeyGroupFun, Fun, Acc),
- reduce_stream_kp_node2(Bt, Dir, RestNodeList, KeyStart, InEndRangeFun, GroupedKey2,
- GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
-reduce_stream_kp_node2(Bt, Dir, NodeList, KeyStart, InEndRangeFun,
- GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
- {Grouped0, Ungrouped0} = lists:splitwith(fun({Key,_}) ->
- KeyGroupFun(GroupedKey, Key) end, NodeList),
- {GroupedNodes, UngroupedNodes} =
- case Grouped0 of
- [] ->
- {Grouped0, Ungrouped0};
- _ ->
- [FirstGrouped | RestGrouped] = lists:reverse(Grouped0),
- {RestGrouped, [FirstGrouped | Ungrouped0]}
- end,
- GroupedReds = [element(2, Node) || {_, Node} <- GroupedNodes],
- case UngroupedNodes of
- [{_Key, NodeInfo}|RestNodes] ->
- {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
- reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, GroupedKey,
- GroupedKVsAcc, GroupedReds ++ GroupedRedsAcc, KeyGroupFun, Fun, Acc),
- reduce_stream_kp_node2(Bt, Dir, RestNodes, KeyStart, InEndRangeFun, GroupedKey2,
- GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
- [] ->
- {ok, Acc, GroupedReds ++ GroupedRedsAcc, GroupedKVsAcc, GroupedKey}
- end.
-
-adjust_dir(fwd, List) ->
- List;
-adjust_dir(rev, List) ->
- lists:reverse(List).
-
-stream_node(Bt, Reds, Node, StartKey, InRange, Dir, Fun, Acc) ->
- Pointer = element(1, Node),
- {NodeType, NodeList} = get_node(Bt, Pointer),
- case NodeType of
- kp_node ->
- stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc);
- kv_node ->
- stream_kv_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc)
- end.
-
-stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc) ->
- Pointer = element(1, Node),
- {NodeType, NodeList} = get_node(Bt, Pointer),
- case NodeType of
- kp_node ->
- stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc);
- kv_node ->
- stream_kv_node2(Bt, Reds, [], adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc)
- end.
-
-stream_kp_node(_Bt, _Reds, [], _InRange, _Dir, _Fun, Acc) ->
- {ok, Acc};
-stream_kp_node(Bt, Reds, [{Key, Node} | Rest], InRange, Dir, Fun, Acc) ->
- Red = element(2, Node),
- case Fun(traverse, Key, Red, Acc) of
- {ok, Acc2} ->
- case stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc2) of
- {ok, Acc3} ->
- stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc3);
- {stop, LastReds, Acc3} ->
- {stop, LastReds, Acc3}
- end;
- {skip, Acc2} ->
- stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2);
- {stop, Acc2} ->
- {stop, Reds, Acc2}
- end.
-
-drop_nodes(_Bt, Reds, _StartKey, []) ->
- {Reds, []};
-drop_nodes(Bt, Reds, StartKey, [{NodeKey, Node} | RestKPs]) ->
- case less(Bt, NodeKey, StartKey) of
- true ->
- drop_nodes(Bt, [element(2, Node) | Reds], StartKey, RestKPs);
- false ->
- {Reds, [{NodeKey, Node} | RestKPs]}
- end.
-
-stream_kp_node(Bt, Reds, KPs, StartKey, InRange, Dir, Fun, Acc) ->
- {NewReds, NodesToStream} =
- case Dir of
- fwd ->
- % drop all nodes sorting before the key
- drop_nodes(Bt, Reds, StartKey, KPs);
- rev ->
- % keep all nodes sorting before the key, AND the first node to sort after
- RevKPs = lists:reverse(KPs),
- case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of
- {_RevsBefore, []} ->
- % everything sorts before it
- {Reds, KPs};
- {RevBefore, [FirstAfter | Drop]} ->
- {[element(2, Node) || {_K, Node} <- Drop] ++ Reds,
- [FirstAfter | lists:reverse(RevBefore)]}
- end
- end,
- case NodesToStream of
- [] ->
- {ok, Acc};
- [{_Key, Node} | Rest] ->
- case stream_node(Bt, NewReds, Node, StartKey, InRange, Dir, Fun, Acc) of
- {ok, Acc2} ->
- Red = element(2, Node),
- stream_kp_node(Bt, [Red | NewReds], Rest, InRange, Dir, Fun, Acc2);
- {stop, LastReds, Acc2} ->
- {stop, LastReds, Acc2}
- end
- end.
-
-stream_kv_node(Bt, Reds, KVs, StartKey, InRange, Dir, Fun, Acc) ->
- DropFun =
- case Dir of
- fwd ->
- fun({Key, _}) -> less(Bt, Key, StartKey) end;
- rev ->
- fun({Key, _}) -> less(Bt, StartKey, Key) end
- end,
- {LTKVs, GTEKVs} = lists:splitwith(DropFun, KVs),
- AssembleLTKVs = [assemble(Bt,K,V) || {K,V} <- LTKVs],
- stream_kv_node2(Bt, Reds, AssembleLTKVs, GTEKVs, InRange, Dir, Fun, Acc).
-
-stream_kv_node2(_Bt, _Reds, _PrevKVs, [], _InRange, _Dir, _Fun, Acc) ->
- {ok, Acc};
-stream_kv_node2(Bt, Reds, PrevKVs, [{K,V} | RestKVs], InRange, Dir, Fun, Acc) ->
- case InRange(K) of
- false ->
- {stop, {PrevKVs, Reds}, Acc};
- true ->
- AssembledKV = assemble(Bt, K, V),
- case Fun(visit, AssembledKV, {PrevKVs, Reds}, Acc) of
- {ok, Acc2} ->
- stream_kv_node2(Bt, Reds, [AssembledKV | PrevKVs], RestKVs, InRange, Dir, Fun, Acc2);
- {stop, Acc2} ->
- {stop, {PrevKVs, Reds}, Acc2}
- end
- end.
diff --git a/src/couch/src/couch_changes.erl b/src/couch/src/couch_changes.erl
deleted file mode 100644
index 6e9294a56..000000000
--- a/src/couch/src/couch_changes.erl
+++ /dev/null
@@ -1,724 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_changes).
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--export([
- handle_db_changes/3,
- get_changes_timeout/2,
- wait_updated/3,
- get_rest_updated/1,
- configure_filter/4,
- filter/3,
- handle_db_event/3,
- handle_view_event/3,
- send_changes_doc_ids/6,
- send_changes_design_docs/6
-]).
-
--export([changes_enumerator/2]).
-
-%% export so we can use fully qualified call to facilitate hot-code upgrade
--export([
- keep_sending_changes/3
-]).
-
--record(changes_acc, {
- db,
- seq,
- prepend,
- filter,
- callback,
- user_acc,
- resp_type,
- limit,
- include_docs,
- doc_options,
- conflicts,
- timeout,
- timeout_fun,
- aggregation_kvs,
- aggregation_results
-}).
-
-handle_db_changes(Args0, Req, Db0) ->
- #changes_args{
- style = Style,
- filter = FilterName,
- feed = Feed,
- dir = Dir,
- since = Since
- } = Args0,
- Filter = configure_filter(FilterName, Style, Req, Db0),
- Args = Args0#changes_args{filter_fun = Filter},
- DbName = couch_db:name(Db0),
- StartListenerFun = fun() ->
- couch_event:link_listener(
- ?MODULE, handle_db_event, self(), [{dbname, DbName}]
- )
- end,
- Start = fun() ->
- {ok, Db} = couch_db:reopen(Db0),
- StartSeq = case Dir of
- rev ->
- couch_db:get_update_seq(Db);
- fwd ->
- Since
- end,
- {Db, StartSeq}
- end,
- % begin timer to deal with heartbeat when filter function fails
- case Args#changes_args.heartbeat of
- undefined ->
- erlang:erase(last_changes_heartbeat);
- Val when is_integer(Val); Val =:= true ->
- put(last_changes_heartbeat, os:timestamp())
- end,
-
- case lists:member(Feed, ["continuous", "longpoll", "eventsource"]) of
- true ->
- fun(CallbackAcc) ->
- {Callback, UserAcc} = get_callback_acc(CallbackAcc),
- {ok, Listener} = StartListenerFun(),
-
- {Db, StartSeq} = Start(),
- UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
- {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
- Acc0 = build_acc(Args, Callback, UserAcc2, Db, StartSeq,
- <<"">>, Timeout, TimeoutFun),
- try
- keep_sending_changes(
- Args#changes_args{dir=fwd},
- Acc0,
- true)
- after
- couch_event:stop_listener(Listener),
- get_rest_updated(ok) % clean out any remaining update messages
- end
- end;
- false ->
- fun(CallbackAcc) ->
- {Callback, UserAcc} = get_callback_acc(CallbackAcc),
- UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
- {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
- {Db, StartSeq} = Start(),
- Acc0 = build_acc(Args#changes_args{feed="normal"}, Callback,
- UserAcc2, Db, StartSeq, <<>>,
- Timeout, TimeoutFun),
- {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} =
- send_changes(
- Acc0,
- Dir,
- true),
- end_sending_changes(Callback, UserAcc3, LastSeq, Feed)
- end
- end.
-
-
-handle_db_event(_DbName, updated, Parent) ->
- Parent ! updated,
- {ok, Parent};
-handle_db_event(_DbName, deleted, Parent) ->
- Parent ! deleted,
- {ok, Parent};
-handle_db_event(_DbName, _Event, Parent) ->
- {ok, Parent}.
-
-
-handle_view_event(_DbName, Msg, {Parent, DDocId}) ->
- case Msg of
- {index_commit, DDocId} ->
- Parent ! updated;
- {index_delete, DDocId} ->
- Parent ! deleted;
- _ ->
- ok
- end,
- {ok, {Parent, DDocId}}.
-
-get_callback_acc({Callback, _UserAcc} = Pair) when is_function(Callback, 3) ->
- Pair;
-get_callback_acc(Callback) when is_function(Callback, 2) ->
- {fun(Ev, Data, _) -> Callback(Ev, Data) end, ok}.
-
-
-configure_filter("_doc_ids", Style, Req, _Db) ->
- {doc_ids, Style, get_doc_ids(Req)};
-configure_filter("_selector", Style, Req, _Db) ->
- {selector, Style, get_selector_and_fields(Req)};
-configure_filter("_design", Style, _Req, _Db) ->
- {design_docs, Style};
-configure_filter("_view", Style, Req, Db) ->
- ViewName = get_view_qs(Req),
- if ViewName /= "" -> ok; true ->
- throw({bad_request, "`view` filter parameter is not provided."})
- end,
- ViewNameParts = string:tokens(ViewName, "/"),
- case [?l2b(couch_httpd:unquote(Part)) || Part <- ViewNameParts] of
- [DName, VName] ->
- {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
- check_member_exists(DDoc, [<<"views">>, VName]),
- case couch_db:is_clustered(Db) of
- true ->
- DIR = fabric_util:doc_id_and_rev(DDoc),
- {fetch, view, Style, DIR, VName};
- false ->
- {view, Style, DDoc, VName}
- end;
- [] ->
- Msg = "`view` must be of the form `designname/viewname`",
- throw({bad_request, Msg})
- end;
-configure_filter([$_ | _], _Style, _Req, _Db) ->
- throw({bad_request, "unknown builtin filter name"});
-configure_filter("", main_only, _Req, _Db) ->
- {default, main_only};
-configure_filter("", all_docs, _Req, _Db) ->
- {default, all_docs};
-configure_filter(FilterName, Style, Req, Db) ->
- FilterNameParts = string:tokens(FilterName, "/"),
- case [?l2b(couch_httpd:unquote(Part)) || Part <- FilterNameParts] of
- [DName, FName] ->
- {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
- check_member_exists(DDoc, [<<"filters">>, FName]),
- case couch_db:is_clustered(Db) of
- true ->
- DIR = fabric_util:doc_id_and_rev(DDoc),
- {fetch, custom, Style, Req, DIR, FName};
- false->
- {custom, Style, Req, DDoc, FName}
- end;
-
- [] ->
- {default, Style};
- _Else ->
- Msg = "`filter` must be of the form `designname/filtername`",
- throw({bad_request, Msg})
- end.
-
-
-filter(Db, #full_doc_info{}=FDI, Filter) ->
- filter(Db, couch_doc:to_doc_info(FDI), Filter);
-filter(_Db, DocInfo, {default, Style}) ->
- apply_style(DocInfo, Style);
-filter(_Db, DocInfo, {doc_ids, Style, DocIds}) ->
- case lists:member(DocInfo#doc_info.id, DocIds) of
- true ->
- apply_style(DocInfo, Style);
- false ->
- []
- end;
-filter(Db, DocInfo, {selector, Style, {Selector, _Fields}}) ->
- Docs = open_revs(Db, DocInfo, Style),
- Passes = [mango_selector:match(Selector, couch_doc:to_json_obj(Doc, []))
- || Doc <- Docs],
- filter_revs(Passes, Docs);
-filter(_Db, DocInfo, {design_docs, Style}) ->
- case DocInfo#doc_info.id of
- <<"_design", _/binary>> ->
- apply_style(DocInfo, Style);
- _ ->
- []
- end;
-filter(Db, DocInfo, {view, Style, DDoc, VName}) ->
- Docs = open_revs(Db, DocInfo, Style),
- {ok, Passes} = couch_query_servers:filter_view(DDoc, VName, Docs),
- filter_revs(Passes, Docs);
-filter(Db, DocInfo, {custom, Style, Req0, DDoc, FName}) ->
- Req = case Req0 of
- {json_req, _} -> Req0;
- #httpd{} -> {json_req, couch_httpd_external:json_req_obj(Req0, Db)}
- end,
- Docs = open_revs(Db, DocInfo, Style),
- {ok, Passes} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs),
- filter_revs(Passes, Docs).
-
-
-get_view_qs({json_req, {Props}}) ->
- {Query} = couch_util:get_value(<<"query">>, Props, {[]}),
- binary_to_list(couch_util:get_value(<<"view">>, Query, ""));
-get_view_qs(Req) ->
- couch_httpd:qs_value(Req, "view", "").
-
-get_doc_ids({json_req, {Props}}) ->
- check_docids(couch_util:get_value(<<"doc_ids">>, Props));
-get_doc_ids(#httpd{method='POST'}=Req) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- {Props} = couch_httpd:json_body_obj(Req),
- check_docids(couch_util:get_value(<<"doc_ids">>, Props));
-get_doc_ids(#httpd{method='GET'}=Req) ->
- DocIds = ?JSON_DECODE(couch_httpd:qs_value(Req, "doc_ids", "null")),
- check_docids(DocIds);
-get_doc_ids(_) ->
- throw({bad_request, no_doc_ids_provided}).
-
-
-get_selector_and_fields({json_req, {Props}}) ->
- Selector = check_selector(couch_util:get_value(<<"selector">>, Props)),
- Fields = check_fields(couch_util:get_value(<<"fields">>, Props, nil)),
- {Selector, Fields};
-get_selector_and_fields(#httpd{method='POST'}=Req) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- get_selector_and_fields({json_req, couch_httpd:json_body_obj(Req)});
-get_selector_and_fields(_) ->
- throw({bad_request, "Selector must be specified in POST payload"}).
-
-
-check_docids(DocIds) when is_list(DocIds) ->
- lists:foreach(fun
- (DocId) when not is_binary(DocId) ->
- Msg = "`doc_ids` filter parameter is not a list of doc ids.",
- throw({bad_request, Msg});
- (_) -> ok
- end, DocIds),
- DocIds;
-check_docids(_) ->
- Msg = "`doc_ids` filter parameter is not a list of doc ids.",
- throw({bad_request, Msg}).
-
-
-check_selector(Selector={_}) ->
- try
- mango_selector:normalize(Selector)
- catch
- {mango_error, Mod, Reason0} ->
- {_StatusCode, _Error, Reason} = mango_error:info(Mod, Reason0),
- throw({bad_request, Reason})
- end;
-check_selector(_Selector) ->
- throw({bad_request, "Selector error: expected a JSON object"}).
-
-
-check_fields(nil) ->
- nil;
-check_fields(Fields) when is_list(Fields) ->
- try
- {ok, Fields1} = mango_fields:new(Fields),
- Fields1
- catch
- {mango_error, Mod, Reason0} ->
- {_StatusCode, _Error, Reason} = mango_error:info(Mod, Reason0),
- throw({bad_request, Reason})
- end;
-check_fields(_Fields) ->
- throw({bad_request, "Selector error: fields must be JSON array"}).
-
-
-open_ddoc(Db, DDocId) ->
- DbName = couch_db:name(Db),
- case couch_db:is_clustered(Db) of
- true ->
- case ddoc_cache:open_doc(mem3:dbname(DbName), DDocId) of
- {ok, _} = Resp -> Resp;
- Else -> throw(Else)
- end;
- false ->
- case couch_db:open_doc(Db, DDocId, [ejson_body]) of
- {ok, _} = Resp -> Resp;
- Else -> throw(Else)
- end
- end.
-
-
-check_member_exists(#doc{body={Props}}, Path) ->
- couch_util:get_nested_json_value({Props}, Path).
-
-
-apply_style(#doc_info{revs=Revs}, main_only) ->
- [#rev_info{rev=Rev} | _] = Revs,
- [{[{<<"rev">>, couch_doc:rev_to_str(Rev)}]}];
-apply_style(#doc_info{revs=Revs}, all_docs) ->
- [{[{<<"rev">>, couch_doc:rev_to_str(R)}]} || #rev_info{rev=R} <- Revs].
-
-
-open_revs(Db, DocInfo, Style) ->
- DocInfos = case Style of
- main_only -> [DocInfo];
- all_docs -> [DocInfo#doc_info{revs=[R]}|| R <- DocInfo#doc_info.revs]
- end,
- OpenOpts = [deleted, conflicts],
- % Relying on list comprehensions to silence errors
- OpenResults = [couch_db:open_doc(Db, DI, OpenOpts) || DI <- DocInfos],
- [Doc || {ok, Doc} <- OpenResults].
-
-
-filter_revs(Passes, Docs) ->
- lists:flatmap(fun
- ({true, #doc{revs={RevPos, [RevId | _]}}}) ->
- RevStr = couch_doc:rev_to_str({RevPos, RevId}),
- Change = {[{<<"rev">>, RevStr}]},
- [Change];
- (_) ->
- []
- end, lists:zip(Passes, Docs)).
-
-
-get_changes_timeout(Args, Callback) ->
- #changes_args{
- heartbeat = Heartbeat,
- timeout = Timeout,
- feed = ResponseType
- } = Args,
- DefaultTimeout = list_to_integer(
- config:get("httpd", "changes_timeout", "60000")
- ),
- case Heartbeat of
- undefined ->
- case Timeout of
- undefined ->
- {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end};
- infinity ->
- {infinity, fun(UserAcc) -> {stop, UserAcc} end};
- _ ->
- {lists:min([DefaultTimeout, Timeout]),
- fun(UserAcc) -> {stop, UserAcc} end}
- end;
- true ->
- {DefaultTimeout,
- fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end};
- _ ->
- {lists:min([DefaultTimeout, Heartbeat]),
- fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end}
- end.
-
-start_sending_changes(_Callback, UserAcc, ResponseType)
- when ResponseType =:= "continuous"
- orelse ResponseType =:= "eventsource" ->
- UserAcc;
-start_sending_changes(Callback, UserAcc, ResponseType) ->
- Callback(start, ResponseType, UserAcc).
-
-build_acc(Args, Callback, UserAcc, Db, StartSeq, Prepend, Timeout, TimeoutFun) ->
- #changes_args{
- include_docs = IncludeDocs,
- doc_options = DocOpts,
- conflicts = Conflicts,
- limit = Limit,
- feed = ResponseType,
- filter_fun = Filter
- } = Args,
- #changes_acc{
- db = Db,
- seq = StartSeq,
- prepend = Prepend,
- filter = Filter,
- callback = Callback,
- user_acc = UserAcc,
- resp_type = ResponseType,
- limit = Limit,
- include_docs = IncludeDocs,
- doc_options = DocOpts,
- conflicts = Conflicts,
- timeout = Timeout,
- timeout_fun = TimeoutFun,
- aggregation_results=[],
- aggregation_kvs=[]
- }.
-
-send_changes(Acc, Dir, FirstRound) ->
- #changes_acc{
- db = Db,
- seq = StartSeq,
- filter = Filter
- } = maybe_upgrade_changes_acc(Acc),
- DbEnumFun = fun changes_enumerator/2,
- case can_optimize(FirstRound, Filter) of
- {true, Fun} ->
- Fun(Db, StartSeq, Dir, DbEnumFun, Acc, Filter);
- _ ->
- Opts = [{dir, Dir}],
- couch_db:fold_changes(Db, StartSeq, DbEnumFun, Acc, Opts)
- end.
-
-
-can_optimize(true, {doc_ids, _Style, DocIds}) ->
- MaxDocIds = config:get_integer("couchdb",
- "changes_doc_ids_optimization_threshold", 100),
- if length(DocIds) =< MaxDocIds ->
- {true, fun send_changes_doc_ids/6};
- true ->
- false
- end;
-can_optimize(true, {design_docs, _Style}) ->
- {true, fun send_changes_design_docs/6};
-can_optimize(_, _) ->
- false.
-
-
-send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) ->
- Results = couch_db:get_full_doc_infos(Db, DocIds),
- FullInfos = lists:foldl(fun
- (#full_doc_info{}=FDI, Acc) -> [FDI | Acc];
- (not_found, Acc) -> Acc
- end, [], Results),
- send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
-
-
-send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
- FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
- Opts = [
- include_deleted,
- {start_key, <<"_design/">>},
- {end_key_gt, <<"_design0">>}
- ],
- {ok, FullInfos} = couch_db:fold_docs(Db, FoldFun, [], Opts),
- send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
-
-
-send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) ->
- FoldFun = case Dir of
- fwd -> fun lists:foldl/3;
- rev -> fun lists:foldr/3
- end,
- GreaterFun = case Dir of
- fwd -> fun(A, B) -> A > B end;
- rev -> fun(A, B) -> A =< B end
- end,
- DocInfos = lists:foldl(fun(FDI, Acc) ->
- DI = couch_doc:to_doc_info(FDI),
- case GreaterFun(DI#doc_info.high_seq, StartSeq) of
- true -> [DI | Acc];
- false -> Acc
- end
- end, [], FullDocInfos),
- SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos),
- FinalAcc = try
- FoldFun(fun(DocInfo, Acc) ->
- case Fun(DocInfo, Acc) of
- {ok, NewAcc} ->
- NewAcc;
- {stop, NewAcc} ->
- throw({stop, NewAcc})
- end
- end, Acc0, SortedDocInfos)
- catch
- {stop, Acc} -> Acc
- end,
- case Dir of
- fwd ->
- FinalAcc0 = case element(1, FinalAcc) of
- changes_acc -> % we came here via couch_http or internal call
- FinalAcc#changes_acc{seq = couch_db:get_update_seq(Db)};
- fabric_changes_acc -> % we came here via chttpd / fabric / rexi
- FinalAcc#fabric_changes_acc{seq = couch_db:get_update_seq(Db)}
- end,
- {ok, FinalAcc0};
- rev -> {ok, FinalAcc}
- end.
-
-
-keep_sending_changes(Args, Acc0, FirstRound) ->
- #changes_args{
- feed = ResponseType,
- limit = Limit,
- db_open_options = DbOptions
- } = Args,
-
- {ok, ChangesAcc} = send_changes(Acc0, fwd, FirstRound),
-
- #changes_acc{
- db = Db, callback = Callback,
- timeout = Timeout, timeout_fun = TimeoutFun, seq = EndSeq,
- prepend = Prepend2, user_acc = UserAcc2, limit = NewLimit
- } = maybe_upgrade_changes_acc(ChangesAcc),
-
- couch_db:close(Db),
- if Limit > NewLimit, ResponseType == "longpoll" ->
- end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType);
- true ->
- case wait_updated(Timeout, TimeoutFun, UserAcc2) of
- {updated, UserAcc4} ->
- DbOptions1 = [{user_ctx, couch_db:get_user_ctx(Db)} | DbOptions],
- case couch_db:open(couch_db:name(Db), DbOptions1) of
- {ok, Db2} ->
- ?MODULE:keep_sending_changes(
- Args#changes_args{limit=NewLimit},
- ChangesAcc#changes_acc{
- db = Db2,
- user_acc = UserAcc4,
- seq = EndSeq,
- prepend = Prepend2,
- timeout = Timeout,
- timeout_fun = TimeoutFun},
- false);
- _Else ->
- end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType)
- end;
- {stop, UserAcc4} ->
- end_sending_changes(Callback, UserAcc4, EndSeq, ResponseType)
- end
- end.
-
-end_sending_changes(Callback, UserAcc, EndSeq, ResponseType) ->
- Callback({stop, EndSeq}, ResponseType, UserAcc).
-
-changes_enumerator(Value, Acc) ->
- #changes_acc{
- filter = Filter, callback = Callback, prepend = Prepend,
- user_acc = UserAcc, limit = Limit, resp_type = ResponseType, db = Db,
- timeout = Timeout, timeout_fun = TimeoutFun
- } = maybe_upgrade_changes_acc(Acc),
- Results0 = filter(Db, Value, Filter),
- Results = [Result || Result <- Results0, Result /= null],
- Seq = case Value of
- #full_doc_info{} ->
- Value#full_doc_info.update_seq;
- #doc_info{} ->
- Value#doc_info.high_seq
- end,
- Go = if (Limit =< 1) andalso Results =/= [] -> stop; true -> ok end,
- case Results of
- [] ->
- {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
- case Done of
- stop ->
- {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
- ok ->
- {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
- end;
- _ ->
- if ResponseType =:= "continuous" orelse ResponseType =:= "eventsource" ->
- ChangesRow = changes_row(Results, Value, Acc),
- UserAcc2 = Callback({change, ChangesRow, <<>>}, ResponseType, UserAcc),
- reset_heartbeat(),
- {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2, limit = Limit - 1}};
- true ->
- ChangesRow = changes_row(Results, Value, Acc),
- UserAcc2 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc),
- reset_heartbeat(),
- {Go, Acc#changes_acc{
- seq = Seq, prepend = <<",\n">>,
- user_acc = UserAcc2, limit = Limit - 1}}
- end
- end.
-
-
-
-changes_row(Results, #full_doc_info{} = FDI, Acc) ->
- changes_row(Results, couch_doc:to_doc_info(FDI), Acc);
-changes_row(Results, DocInfo, Acc0) ->
- Acc = maybe_upgrade_changes_acc(Acc0),
- #doc_info{
- id = Id, high_seq = Seq, revs = [#rev_info{deleted = Del} | _]
- } = DocInfo,
- {[{<<"seq">>, Seq}, {<<"id">>, Id}, {<<"changes">>, Results}] ++
- deleted_item(Del) ++ maybe_get_changes_doc(DocInfo, Acc)}.
-
-maybe_get_changes_doc(Value, #changes_acc{include_docs=true}=Acc) ->
- #changes_acc{
- db = Db,
- doc_options = DocOpts,
- conflicts = Conflicts,
- filter = Filter
- } = Acc,
- Opts = case Conflicts of
- true -> [deleted, conflicts];
- false -> [deleted]
- end,
- load_doc(Db, Value, Opts, DocOpts, Filter);
-
-maybe_get_changes_doc(_Value, _Acc) ->
- [].
-
-
-load_doc(Db, Value, Opts, DocOpts, Filter) ->
- case couch_index_util:load_doc(Db, Value, Opts) of
- null ->
- [{doc, null}];
- Doc ->
- [{doc, doc_to_json(Doc, DocOpts, Filter)}]
- end.
-
-
-doc_to_json(Doc, DocOpts, {selector, _Style, {_Selector, Fields}})
- when Fields =/= nil ->
- mango_fields:extract(couch_doc:to_json_obj(Doc, DocOpts), Fields);
-doc_to_json(Doc, DocOpts, _Filter) ->
- couch_doc:to_json_obj(Doc, DocOpts).
-
-
-deleted_item(true) -> [{<<"deleted">>, true}];
-deleted_item(_) -> [].
-
-% waits for a updated msg, if there are multiple msgs, collects them.
-wait_updated(Timeout, TimeoutFun, UserAcc) ->
- receive
- updated ->
- get_rest_updated(UserAcc);
- deleted ->
- {stop, UserAcc}
- after Timeout ->
- {Go, UserAcc2} = TimeoutFun(UserAcc),
- case Go of
- ok ->
- ?MODULE:wait_updated(Timeout, TimeoutFun, UserAcc2);
- stop ->
- {stop, UserAcc2}
- end
- end.
-
-get_rest_updated(UserAcc) ->
- receive
- updated ->
- get_rest_updated(UserAcc)
- after 0 ->
- {updated, UserAcc}
- end.
-
-reset_heartbeat() ->
- case get(last_changes_heartbeat) of
- undefined ->
- ok;
- _ ->
- put(last_changes_heartbeat, os:timestamp())
- end.
-
-maybe_heartbeat(Timeout, TimeoutFun, Acc) ->
- Before = get(last_changes_heartbeat),
- case Before of
- undefined ->
- {ok, Acc};
- _ ->
- Now = os:timestamp(),
- case timer:now_diff(Now, Before) div 1000 >= Timeout of
- true ->
- Acc2 = TimeoutFun(Acc),
- put(last_changes_heartbeat, Now),
- Acc2;
- false ->
- {ok, Acc}
- end
- end.
-
-
-maybe_upgrade_changes_acc(#changes_acc{} = Acc) ->
- Acc;
-maybe_upgrade_changes_acc(Acc) when tuple_size(Acc) == 19 ->
- #changes_acc{
- db = element(2, Acc),
- seq = element(6, Acc),
- prepend = element(7, Acc),
- filter = element(8, Acc),
- callback = element(9, Acc),
- user_acc = element(10, Acc),
- resp_type = element(11, Acc),
- limit = element(12, Acc),
- include_docs = element(13, Acc),
- doc_options = element(14, Acc),
- conflicts = element(15, Acc),
- timeout = element(16, Acc),
- timeout_fun = element(17, Acc),
- aggregation_kvs = element(18, Acc),
- aggregation_results = element(19, Acc)
- }.
diff --git a/src/couch/src/couch_compress.erl b/src/couch/src/couch_compress.erl
deleted file mode 100644
index cfcc2a481..000000000
--- a/src/couch/src/couch_compress.erl
+++ /dev/null
@@ -1,99 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_compress).
-
--export([compress/2, decompress/1, is_compressed/2]).
--export([get_compression_method/0]).
--export([uncompressed_size/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
-% binaries compressed with snappy have their first byte set to this value
--define(SNAPPY_PREFIX, 1).
-% Term prefixes documented at:
-% http://www.erlang.org/doc/apps/erts/erl_ext_dist.html
--define(TERM_PREFIX, 131).
--define(COMPRESSED_TERM_PREFIX, 131, 80).
-
-
-get_compression_method() ->
- case config:get("couchdb", "file_compression") of
- undefined ->
- ?DEFAULT_COMPRESSION;
- Method1 ->
- case string:tokens(Method1, "_") of
- [Method] ->
- list_to_existing_atom(Method);
- [Method, Level] ->
- {list_to_existing_atom(Method), list_to_integer(Level)}
- end
- end.
-
-
-compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, snappy) ->
- Bin;
-compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, Method) ->
- compress(decompress(Bin), Method);
-compress(<<?COMPRESSED_TERM_PREFIX, _/binary>> = Bin, {deflate, _Level}) ->
- Bin;
-compress(<<?TERM_PREFIX, _/binary>> = Bin, Method) ->
- compress(decompress(Bin), Method);
-compress(Term, none) ->
- ?term_to_bin(Term);
-compress(Term, {deflate, Level}) ->
- term_to_binary(Term, [{minor_version, 1}, {compressed, Level}]);
-compress(Term, snappy) ->
- Bin = ?term_to_bin(Term),
- try
- {ok, CompressedBin} = snappy:compress(Bin),
- <<?SNAPPY_PREFIX, CompressedBin/binary>>
- catch exit:snappy_nif_not_loaded ->
- Bin
- end.
-
-
-decompress(<<?SNAPPY_PREFIX, Rest/binary>>) ->
- {ok, TermBin} = snappy:decompress(Rest),
- binary_to_term(TermBin);
-decompress(<<?TERM_PREFIX, _/binary>> = Bin) ->
- binary_to_term(Bin);
-decompress(_) ->
- error(invalid_compression).
-
-
-is_compressed(<<?SNAPPY_PREFIX, _/binary>>, Method) ->
- Method =:= snappy;
-is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, {deflate, _Level}) ->
- true;
-is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, _Method) ->
- false;
-is_compressed(<<?TERM_PREFIX, _/binary>>, Method) ->
- Method =:= none;
-is_compressed(Term, _Method) when not is_binary(Term) ->
- false;
-is_compressed(_, _) ->
- error(invalid_compression).
-
-
-uncompressed_size(<<?SNAPPY_PREFIX, Rest/binary>>) ->
- {ok, Size} = snappy:uncompressed_length(Rest),
- Size;
-uncompressed_size(<<?COMPRESSED_TERM_PREFIX, Size:32, _/binary>> = _Bin) ->
- % See http://erlang.org/doc/apps/erts/erl_ext_dist.html
- % The uncompressed binary would be encoded with <<131, Rest/binary>>
- % so need to add 1 for 131
- Size + 1;
-uncompressed_size(<<?TERM_PREFIX, _/binary>> = Bin) ->
- byte_size(Bin);
-uncompressed_size(_) ->
- error(invalid_compression).
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
deleted file mode 100644
index 60d2bb796..000000000
--- a/src/couch/src/couch_db.erl
+++ /dev/null
@@ -1,2086 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db).
-
--export([
- create/2,
- open/2,
- open_int/2,
- incref/1,
- reopen/1,
- close/1,
-
- clustered_db/2,
- clustered_db/3,
-
- monitor/1,
- monitored_by/1,
- is_idle/1,
-
- is_admin/1,
- check_is_admin/1,
- check_is_member/1,
-
- name/1,
- get_after_doc_read_fun/1,
- get_before_doc_update_fun/1,
- get_committed_update_seq/1,
- get_compacted_seq/1,
- get_compactor_pid/1,
- get_compactor_pid_sync/1,
- get_db_info/1,
- get_partition_info/2,
- get_del_doc_count/1,
- get_doc_count/1,
- get_epochs/1,
- get_filepath/1,
- get_instance_start_time/1,
- get_pid/1,
- get_revs_limit/1,
- get_security/1,
- get_update_seq/1,
- get_user_ctx/1,
- get_uuid/1,
- get_purge_seq/1,
- get_oldest_purge_seq/1,
- get_purge_infos_limit/1,
-
- is_db/1,
- is_system_db/1,
- is_clustered/1,
- is_system_db_name/1,
- is_partitioned/1,
-
- set_revs_limit/2,
- set_purge_infos_limit/2,
- set_security/2,
- set_user_ctx/2,
-
- load_validation_funs/1,
- reload_validation_funs/1,
-
- open_doc/2,
- open_doc/3,
- open_doc_revs/4,
- open_doc_int/3,
- get_doc_info/2,
- get_full_doc_info/2,
- get_full_doc_infos/2,
- get_missing_revs/2,
- get_design_doc/2,
- get_design_docs/1,
- get_design_doc_count/1,
- get_purge_infos/2,
-
- get_minimum_purge_seq/1,
- purge_client_exists/3,
-
- validate_docid/2,
- doc_from_json_obj_validate/2,
-
- update_doc/3,
- update_doc/4,
- update_docs/4,
- update_docs/2,
- update_docs/3,
- delete_doc/3,
-
- purge_docs/2,
- purge_docs/3,
-
- with_stream/3,
- open_write_stream/2,
- open_read_stream/2,
- is_active_stream/2,
-
- fold_docs/3,
- fold_docs/4,
- fold_local_docs/4,
- fold_design_docs/4,
- fold_changes/4,
- fold_changes/5,
- count_changes_since/2,
- fold_purge_infos/4,
- fold_purge_infos/5,
-
- calculate_start_seq/3,
- owner_of/2,
-
- start_compact/1,
- cancel_compact/1,
- wait_for_compaction/1,
- wait_for_compaction/2,
-
- dbname_suffix/1,
- normalize_dbname/1,
- validate_dbname/1,
-
- make_doc/5,
- new_revid/1
-]).
-
-
--export([
- start_link/4
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_db_int.hrl").
-
--define(DBNAME_REGEX,
- "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*" % use the stock CouchDB regex
- "(\\.[0-9]{10,})?$" % but allow an optional shard timestamp at the end
-).
-
-start_link(Engine, DbName, Filepath, Options) ->
- Arg = {Engine, DbName, Filepath, Options},
- proc_lib:start_link(couch_db_updater, init, [Arg]).
-
-create(DbName, Options) ->
- couch_server:create(DbName, Options).
-
-% this is for opening a database for internal purposes like the replicator
-% or the view indexer. it never throws a reader error.
-open_int(DbName, Options) ->
- couch_server:open(DbName, Options).
-
-% this should be called anytime an http request opens the database.
-% it ensures that the http userCtx is a valid reader
-open(DbName, Options) ->
- case couch_server:open(DbName, Options) of
- {ok, Db} ->
- try
- check_is_member(Db),
- {ok, Db}
- catch
- throw:Error ->
- close(Db),
- throw(Error)
- end;
- Else -> Else
- end.
-
-
-reopen(#db{} = Db) ->
- % We could have just swapped out the storage engine
- % for this database during a compaction so we just
- % reimplement this as a close/open pair now.
- try
- open(Db#db.name, [{user_ctx, Db#db.user_ctx} | Db#db.options])
- after
- close(Db)
- end.
-
-
-% You shouldn't call this. Its part of the ref counting between
-% couch_server and couch_db instances.
-incref(#db{} = Db) ->
- couch_db_engine:incref(Db).
-
-clustered_db(DbName, Options) when is_list(Options) ->
- UserCtx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
- SecProps = couch_util:get_value(security, Options, []),
- Props = couch_util:get_value(props, Options, []),
- {ok, #db{
- name = DbName,
- user_ctx = UserCtx,
- security = SecProps,
- options = [{props, Props}]
- }};
-
-clustered_db(DbName, #user_ctx{} = UserCtx) ->
- clustered_db(DbName, [{user_ctx, UserCtx}]).
-
-clustered_db(DbName, UserCtx, SecProps) ->
- clustered_db(DbName, [{user_ctx, UserCtx}, {security, SecProps}]).
-
-is_db(#db{}) ->
- true;
-is_db(_) ->
- false.
-
-is_system_db(#db{options = Options}) ->
- lists:member(sys_db, Options).
-
-is_clustered(#{}) ->
- true;
-is_clustered(#db{main_pid = nil}) ->
- true;
-is_clustered(#db{}) ->
- false;
-is_clustered(?OLD_DB_REC = Db) ->
- ?OLD_DB_MAIN_PID(Db) == undefined.
-
-is_partitioned(#db{options = Options}) ->
- Props = couch_util:get_value(props, Options, []),
- couch_util:get_value(partitioned, Props, false).
-
-close(#db{} = Db) ->
- ok = couch_db_engine:decref(Db);
-close(?OLD_DB_REC) ->
- ok.
-
-is_idle(#db{compactor_pid=nil} = Db) ->
- monitored_by(Db) == [];
-is_idle(_Db) ->
- false.
-
-monitored_by(Db) ->
- case couch_db_engine:monitored_by(Db) of
- Pids when is_list(Pids) ->
- PidTracker = whereis(couch_stats_process_tracker),
- Pids -- [Db#db.main_pid, PidTracker];
- undefined ->
- []
- end.
-
-
-monitor(#db{main_pid=MainPid}) ->
- erlang:monitor(process, MainPid).
-
-start_compact(#db{} = Db) ->
- gen_server:call(Db#db.main_pid, start_compact).
-
-cancel_compact(#db{main_pid=Pid}) ->
- gen_server:call(Pid, cancel_compact).
-
-wait_for_compaction(Db) ->
- wait_for_compaction(Db, infinity).
-
-wait_for_compaction(#db{main_pid=Pid}=Db, Timeout) ->
- Start = os:timestamp(),
- case gen_server:call(Pid, compactor_pid) of
- CPid when is_pid(CPid) ->
- Ref = erlang:monitor(process, CPid),
- receive
- {'DOWN', Ref, _, _, normal} when Timeout == infinity ->
- wait_for_compaction(Db, Timeout);
- {'DOWN', Ref, _, _, normal} ->
- Elapsed = timer:now_diff(os:timestamp(), Start) div 1000,
- wait_for_compaction(Db, Timeout - Elapsed);
- {'DOWN', Ref, _, _, Reason} ->
- {error, Reason}
- after Timeout ->
- erlang:demonitor(Ref, [flush]),
- {error, Timeout}
- end;
- _ ->
- ok
- end.
-
-delete_doc(Db, Id, Revisions) ->
- DeletedDocs = [#doc{id=Id, revs=[Rev], deleted=true} || Rev <- Revisions],
- {ok, [Result]} = update_docs(Db, DeletedDocs, []),
- {ok, Result}.
-
-open_doc(Db, IdOrDocInfo) ->
- open_doc(Db, IdOrDocInfo, []).
-
-open_doc(Db, Id, Options) ->
- increment_stat(Db, [couchdb, database_reads]),
- case open_doc_int(Db, Id, Options) of
- {ok, #doc{deleted=true}=Doc} ->
- case lists:member(deleted, Options) of
- true ->
- apply_open_options({ok, Doc},Options);
- false ->
- {not_found, deleted}
- end;
- Else ->
- apply_open_options(Else,Options)
- end.
-
-apply_open_options({ok, Doc},Options) ->
- apply_open_options2(Doc,Options);
-apply_open_options(Else,_Options) ->
- Else.
-
-apply_open_options2(Doc,[]) ->
- {ok, Doc};
-apply_open_options2(#doc{atts=Atts0,revs=Revs}=Doc,
- [{atts_since, PossibleAncestors}|Rest]) ->
- RevPos = find_ancestor_rev_pos(Revs, PossibleAncestors),
- Atts = lists:map(fun(Att) ->
- [AttPos, Data] = couch_att:fetch([revpos, data], Att),
- if AttPos > RevPos -> couch_att:store(data, Data, Att);
- true -> couch_att:store(data, stub, Att)
- end
- end, Atts0),
- apply_open_options2(Doc#doc{atts=Atts}, Rest);
-apply_open_options2(Doc, [ejson_body | Rest]) ->
- apply_open_options2(couch_doc:with_ejson_body(Doc), Rest);
-apply_open_options2(Doc,[_|Rest]) ->
- apply_open_options2(Doc,Rest).
-
-
-find_ancestor_rev_pos({_, []}, _AttsSinceRevs) ->
- 0;
-find_ancestor_rev_pos(_DocRevs, []) ->
- 0;
-find_ancestor_rev_pos({RevPos, [RevId|Rest]}, AttsSinceRevs) ->
- case lists:member({RevPos, RevId}, AttsSinceRevs) of
- true ->
- RevPos;
- false ->
- find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs)
- end.
-
-open_doc_revs(Db, Id, Revs, Options) ->
- increment_stat(Db, [couchdb, database_reads]),
- [{ok, Results}] = open_doc_revs_int(Db, [{Id, Revs}], Options),
- {ok, [apply_open_options(Result, Options) || Result <- Results]}.
-
-% Each returned result is a list of tuples:
-% {Id, MissingRevs, PossibleAncestors}
-% if no revs are missing, it's omitted from the results.
-get_missing_revs(Db, IdRevsList) ->
- Results = get_full_doc_infos(Db, [Id1 || {Id1, _Revs} <- IdRevsList]),
- {ok, find_missing(IdRevsList, Results)}.
-
-find_missing([], []) ->
- [];
-find_missing([{Id, Revs}|RestIdRevs], [FullInfo | RestLookupInfo])
- when is_record(FullInfo, full_doc_info) ->
- case couch_key_tree:find_missing(FullInfo#full_doc_info.rev_tree, Revs) of
- [] ->
- find_missing(RestIdRevs, RestLookupInfo);
- MissingRevs ->
- #doc_info{revs=RevsInfo} = couch_doc:to_doc_info(FullInfo),
- LeafRevs = [Rev || #rev_info{rev=Rev} <- RevsInfo],
- % Find the revs that are possible parents of this rev
- PossibleAncestors =
- lists:foldl(fun({LeafPos, LeafRevId}, Acc) ->
- % this leaf is a "possible ancenstor" of the missing
- % revs if this LeafPos lessthan any of the missing revs
- case lists:any(fun({MissingPos, _}) ->
- LeafPos < MissingPos end, MissingRevs) of
- true ->
- [{LeafPos, LeafRevId} | Acc];
- false ->
- Acc
- end
- end, [], LeafRevs),
- [{Id, MissingRevs, PossibleAncestors} |
- find_missing(RestIdRevs, RestLookupInfo)]
- end;
-find_missing([{Id, Revs}|RestIdRevs], [not_found | RestLookupInfo]) ->
- [{Id, Revs, []} | find_missing(RestIdRevs, RestLookupInfo)].
-
-get_doc_info(Db, Id) ->
- case get_full_doc_info(Db, Id) of
- #full_doc_info{} = FDI ->
- {ok, couch_doc:to_doc_info(FDI)};
- Else ->
- Else
- end.
-
-get_full_doc_info(Db, Id) ->
- [Result] = get_full_doc_infos(Db, [Id]),
- Result.
-
-get_full_doc_infos(Db, Ids) ->
- couch_db_engine:open_docs(Db, Ids).
-
-purge_docs(Db, IdRevs) ->
- purge_docs(Db, IdRevs, []).
-
--spec purge_docs(#db{}, [{UUId, Id, [Rev]}], [PurgeOption]) ->
- {ok, [Reply]} when
- UUId :: binary(),
- Id :: binary() | list(),
- Rev :: {non_neg_integer(), binary()},
- PurgeOption :: interactive_edit | replicated_changes,
- Reply :: {ok, []} | {ok, [Rev]}.
-purge_docs(#db{main_pid = Pid} = Db, UUIDsIdsRevs, Options) ->
- UUIDsIdsRevs2 = [{UUID, couch_util:to_binary(Id), Revs}
- || {UUID, Id, Revs} <- UUIDsIdsRevs],
- % Check here if any UUIDs already exist when
- % we're not replicating purge infos
- IsRepl = lists:member(replicated_changes, Options),
- if IsRepl -> ok; true ->
- UUIDs = [UUID || {UUID, _, _} <- UUIDsIdsRevs2],
- lists:foreach(fun(Resp) ->
- if Resp == not_found -> ok; true ->
- Fmt = "Duplicate purge info UIUD: ~s",
- Reason = io_lib:format(Fmt, [element(2, Resp)]),
- throw({badreq, Reason})
- end
- end, get_purge_infos(Db, UUIDs))
- end,
- increment_stat(Db, [couchdb, database_purges]),
- gen_server:call(Pid, {purge_docs, UUIDsIdsRevs2, Options}).
-
--spec get_purge_infos(#db{}, [UUId]) -> [PurgeInfo] when
- UUId :: binary(),
- PurgeInfo :: {PurgeSeq, UUId, Id, [Rev]} | not_found,
- PurgeSeq :: non_neg_integer(),
- Id :: binary(),
- Rev :: {non_neg_integer(), binary()}.
-get_purge_infos(Db, UUIDs) ->
- couch_db_engine:load_purge_infos(Db, UUIDs).
-
-
-get_minimum_purge_seq(#db{} = Db) ->
- PurgeSeq = couch_db_engine:get_purge_seq(Db),
- OldestPurgeSeq = couch_db_engine:get_oldest_purge_seq(Db),
- PurgeInfosLimit = couch_db_engine:get_purge_infos_limit(Db),
-
- FoldFun = fun(#doc{id = DocId, body = {Props}}, SeqAcc) ->
- case DocId of
- <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
- ClientSeq = couch_util:get_value(<<"purge_seq">>, Props),
- DbName = couch_db:name(Db),
- % If there's a broken doc we have to keep every
- % purge info until the doc is fixed or removed.
- Fmt = "Invalid purge doc '~s' on ~p with purge_seq '~w'",
- case ClientSeq of
- CS when is_integer(CS), CS >= PurgeSeq - PurgeInfosLimit ->
- {ok, SeqAcc};
- CS when is_integer(CS) ->
- case purge_client_exists(DbName, DocId, Props) of
- true ->
- {ok, erlang:min(CS, SeqAcc)};
- false ->
- couch_log:error(Fmt, [DocId, DbName, ClientSeq]),
- {ok, SeqAcc}
- end;
- _ ->
- couch_log:error(Fmt, [DocId, DbName, ClientSeq]),
- {ok, erlang:min(OldestPurgeSeq, SeqAcc)}
- end;
- _ ->
- {stop, SeqAcc}
- end
- end,
- InitMinSeq = PurgeSeq - PurgeInfosLimit,
- Opts = [
- {start_key, list_to_binary(?LOCAL_DOC_PREFIX ++ "purge-")}
- ],
- {ok, MinIdxSeq} = couch_db:fold_local_docs(Db, FoldFun, InitMinSeq, Opts),
- FinalSeq = case MinIdxSeq < PurgeSeq - PurgeInfosLimit of
- true -> MinIdxSeq;
- false -> erlang:max(0, PurgeSeq - PurgeInfosLimit)
- end,
- % Log a warning if we've got a purge sequence exceeding the
- % configured threshold.
- if FinalSeq >= (PurgeSeq - PurgeInfosLimit) -> ok; true ->
- Fmt = "The purge sequence for '~s' exceeds configured threshold",
- couch_log:warning(Fmt, [couch_db:name(Db)])
- end,
- FinalSeq.
-
-
-purge_client_exists(DbName, DocId, Props) ->
- % Warn about clients that have not updated their purge
- % checkpoints in the last "index_lag_warn_seconds"
- LagWindow = config:get_integer(
- "purge", "index_lag_warn_seconds", 86400), % Default 24 hours
-
- {Mega, Secs, _} = os:timestamp(),
- NowSecs = Mega * 1000000 + Secs,
- LagThreshold = NowSecs - LagWindow,
-
- try
- Exists = couch_db_plugin:is_valid_purge_client(DbName, Props),
- if not Exists -> ok; true ->
- Updated = couch_util:get_value(<<"updated_on">>, Props),
- if is_integer(Updated) and Updated > LagThreshold -> ok; true ->
- Diff = NowSecs - Updated,
- Fmt1 = "Purge checkpoint '~s' not updated in ~p seconds
- in database ~p",
- couch_log:error(Fmt1, [DocId, Diff, DbName])
- end
- end,
- Exists
- catch _:_ ->
- % If we fail to check for a client we have to assume that
- % it exists.
- Fmt2 = "Failed to check purge checkpoint using
- document '~p' in database ~p",
- couch_log:error(Fmt2, [DocId, DbName]),
- true
- end.
-
-
-set_purge_infos_limit(#db{main_pid=Pid}=Db, Limit) when Limit > 0 ->
- check_is_admin(Db),
- gen_server:call(Pid, {set_purge_infos_limit, Limit}, infinity);
-set_purge_infos_limit(_Db, _Limit) ->
- throw(invalid_purge_infos_limit).
-
-
-get_after_doc_read_fun(#db{after_doc_read = Fun}) ->
- Fun.
-
-get_before_doc_update_fun(#db{before_doc_update = Fun}) ->
- Fun.
-
-get_committed_update_seq(#db{committed_update_seq=Seq}) ->
- Seq.
-
-get_update_seq(#db{} = Db)->
- couch_db_engine:get_update_seq(Db).
-
-get_user_ctx(#db{user_ctx = UserCtx}) ->
- UserCtx;
-get_user_ctx(?OLD_DB_REC = Db) ->
- ?OLD_DB_USER_CTX(Db).
-
-get_purge_seq(#db{}=Db) ->
- couch_db_engine:get_purge_seq(Db).
-
-get_oldest_purge_seq(#db{}=Db) ->
- couch_db_engine:get_oldest_purge_seq(Db).
-
-get_purge_infos_limit(#db{}=Db) ->
- couch_db_engine:get_purge_infos_limit(Db).
-
-get_pid(#db{main_pid = Pid}) ->
- Pid.
-
-get_del_doc_count(Db) ->
- {ok, couch_db_engine:get_del_doc_count(Db)}.
-
-get_doc_count(Db) ->
- {ok, couch_db_engine:get_doc_count(Db)}.
-
-get_uuid(#db{}=Db) ->
- couch_db_engine:get_uuid(Db).
-
-get_epochs(#db{}=Db) ->
- Epochs = couch_db_engine:get_epochs(Db),
- validate_epochs(Epochs),
- Epochs.
-
-get_filepath(#db{filepath = FilePath}) ->
- FilePath.
-
-get_instance_start_time(#db{instance_start_time = IST}) ->
- IST.
-
-get_compacted_seq(#db{}=Db) ->
- couch_db_engine:get_compacted_seq(Db).
-
-get_compactor_pid(#db{compactor_pid = Pid}) ->
- Pid.
-
-get_compactor_pid_sync(#db{main_pid=Pid}=Db) ->
- case gen_server:call(Pid, compactor_pid, infinity) of
- CPid when is_pid(CPid) ->
- CPid;
- _ ->
- nil
- end.
-
-get_db_info(Db) ->
- #db{
- name = Name,
- compactor_pid = Compactor,
- instance_start_time = StartTime,
- committed_update_seq = CommittedUpdateSeq
- } = Db,
- {ok, DocCount} = get_doc_count(Db),
- {ok, DelDocCount} = get_del_doc_count(Db),
- SizeInfo = couch_db_engine:get_size_info(Db),
- DiskVersion = couch_db_engine:get_disk_version(Db),
- Uuid = case get_uuid(Db) of
- undefined -> null;
- Uuid0 -> Uuid0
- end,
- CompactedSeq = case get_compacted_seq(Db) of
- undefined -> null;
- Else1 -> Else1
- end,
- Props = case couch_db_engine:get_props(Db) of
- undefined -> null;
- Else2 -> {Else2}
- end,
- InfoList = [
- {db_name, Name},
- {engine, couch_db_engine:get_engine(Db)},
- {doc_count, DocCount},
- {doc_del_count, DelDocCount},
- {update_seq, get_update_seq(Db)},
- {purge_seq, couch_db_engine:get_purge_seq(Db)},
- {compact_running, Compactor /= nil},
- {sizes, {SizeInfo}},
- {instance_start_time, StartTime},
- {disk_format_version, DiskVersion},
- {committed_update_seq, CommittedUpdateSeq},
- {compacted_seq, CompactedSeq},
- {props, Props},
- {uuid, Uuid}
- ],
- {ok, InfoList}.
-
-get_partition_info(#db{} = Db, Partition) when is_binary(Partition) ->
- Info = couch_db_engine:get_partition_info(Db, Partition),
- {ok, Info};
-get_partition_info(_Db, _Partition) ->
- throw({bad_request, <<"`partition` is not valid">>}).
-
-
-get_design_doc(#db{name = <<"shards/", _/binary>> = ShardDbName}, DDocId0) ->
- DDocId = couch_util:normalize_ddoc_id(DDocId0),
- DbName = mem3:dbname(ShardDbName),
- {_, Ref} = spawn_monitor(fun() ->
- exit(fabric:open_doc(DbName, DDocId, []))
- end),
- receive {'DOWN', Ref, _, _, Response} ->
- Response
- end;
-get_design_doc(#db{} = Db, DDocId0) ->
- DDocId = couch_util:normalize_ddoc_id(DDocId0),
- couch_db:open_doc_int(Db, DDocId, [ejson_body]).
-
-get_design_docs(#db{name = <<"shards/", _/binary>> = ShardDbName}) ->
- DbName = mem3:dbname(ShardDbName),
- {_, Ref} = spawn_monitor(fun() -> exit(fabric:design_docs(DbName)) end),
- receive {'DOWN', Ref, _, _, Response} ->
- Response
- end;
-get_design_docs(#db{} = Db) ->
- FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
- {ok, Docs} = fold_design_docs(Db, FoldFun, [], []),
- {ok, lists:reverse(Docs)}.
-
-get_design_doc_count(#db{} = Db) ->
- FoldFun = fun(_, Acc) -> {ok, Acc + 1} end,
- fold_design_docs(Db, FoldFun, 0, []).
-
-check_is_admin(#db{user_ctx=UserCtx}=Db) ->
- case is_admin(Db) of
- true -> ok;
- false ->
- Reason = <<"You are not a db or server admin.">>,
- throw_security_error(UserCtx, Reason)
- end.
-
-check_is_member(#db{user_ctx=UserCtx}=Db) ->
- case is_member(Db) of
- true -> ok;
- false -> throw_security_error(UserCtx)
- end.
-
-is_admin(#db{user_ctx=UserCtx}=Db) ->
- case couch_db_plugin:check_is_admin(Db) of
- true -> true;
- false ->
- {Admins} = get_admins(Db),
- is_authorized(UserCtx, Admins)
- end.
-
-is_member(#db{user_ctx=UserCtx}=Db) ->
- case is_admin(Db) of
- true -> true;
- false ->
- case is_public_db(Db) of
- true -> true;
- false ->
- {Members} = get_members(Db),
- is_authorized(UserCtx, Members)
- end
- end.
-
-is_public_db(#db{}=Db) ->
- {Members} = get_members(Db),
- Names = couch_util:get_value(<<"names">>, Members, []),
- Roles = couch_util:get_value(<<"roles">>, Members, []),
- Names =:= [] andalso Roles =:= [].
-
-is_authorized(#user_ctx{name=UserName,roles=UserRoles}, Security) ->
- Names = couch_util:get_value(<<"names">>, Security, []),
- Roles = couch_util:get_value(<<"roles">>, Security, []),
- case check_security(roles, UserRoles, [<<"_admin">> | Roles]) of
- true -> true;
- false -> check_security(names, UserName, Names)
- end.
-
-check_security(roles, [], _) ->
- false;
-check_security(roles, UserRoles, Roles) ->
- UserRolesSet = ordsets:from_list(UserRoles),
- RolesSet = ordsets:from_list(Roles),
- not ordsets:is_disjoint(UserRolesSet, RolesSet);
-check_security(names, _, []) ->
- false;
-check_security(names, null, _) ->
- false;
-check_security(names, UserName, Names) ->
- lists:member(UserName, Names).
-
-throw_security_error(#user_ctx{name=null}=UserCtx) ->
- Reason = <<"You are not authorized to access this db.">>,
- throw_security_error(UserCtx, Reason);
-throw_security_error(#user_ctx{name=_}=UserCtx) ->
- Reason = <<"You are not allowed to access this db.">>,
- throw_security_error(UserCtx, Reason).
-throw_security_error(#user_ctx{}=UserCtx, Reason) ->
- Error = security_error_type(UserCtx),
- throw({Error, Reason}).
-
-security_error_type(#user_ctx{name=null}) ->
- unauthorized;
-security_error_type(#user_ctx{name=_}) ->
- forbidden.
-
-
-get_admins(#db{security=SecProps}) ->
- couch_util:get_value(<<"admins">>, SecProps, {[]}).
-
-get_members(#db{security=SecProps}) ->
- % we fallback to readers here for backwards compatibility
- couch_util:get_value(<<"members">>, SecProps,
- couch_util:get_value(<<"readers">>, SecProps, {[]})).
-
-get_security(#db{security=SecProps}) ->
- {SecProps};
-get_security(?OLD_DB_REC = Db) ->
- {?OLD_DB_SECURITY(Db)}.
-
-set_security(#db{main_pid=Pid}=Db, {NewSecProps}) when is_list(NewSecProps) ->
- check_is_admin(Db),
- ok = validate_security_object(NewSecProps),
- gen_server:call(Pid, {set_security, NewSecProps}, infinity);
-set_security(_, _) ->
- throw(bad_request).
-
-set_user_ctx(#db{} = Db, UserCtx) ->
- {ok, Db#db{user_ctx = UserCtx}}.
-
-validate_security_object(SecProps) ->
- Admins = couch_util:get_value(<<"admins">>, SecProps, {[]}),
- % we fallback to readers here for backwards compatibility
- Members = couch_util:get_value(<<"members">>, SecProps,
- couch_util:get_value(<<"readers">>, SecProps, {[]})),
- ok = validate_names_and_roles(Admins),
- ok = validate_names_and_roles(Members),
- ok.
-
-% validate user input
-validate_names_and_roles({Props}) when is_list(Props) ->
- case couch_util:get_value(<<"names">>, Props, []) of
- Ns when is_list(Ns) ->
- [throw("names must be a JSON list of strings") ||N <- Ns, not is_binary(N)],
- Ns;
- _ ->
- throw("names must be a JSON list of strings")
- end,
- case couch_util:get_value(<<"roles">>, Props, []) of
- Rs when is_list(Rs) ->
- [throw("roles must be a JSON list of strings") ||R <- Rs, not is_binary(R)],
- Rs;
- _ ->
- throw("roles must be a JSON list of strings")
- end,
- ok;
-validate_names_and_roles(_) ->
- throw("admins or members must be a JSON list of strings").
-
-get_revs_limit(#db{} = Db) ->
- couch_db_engine:get_revs_limit(Db).
-
-set_revs_limit(#db{main_pid=Pid}=Db, Limit) when Limit > 0 ->
- check_is_admin(Db),
- gen_server:call(Pid, {set_revs_limit, Limit}, infinity);
-set_revs_limit(_Db, _Limit) ->
- throw(invalid_revs_limit).
-
-name(#db{name=Name}) ->
- Name;
-name(?OLD_DB_REC = Db) ->
- ?OLD_DB_NAME(Db).
-
-
-validate_docid(#db{} = Db, DocId) when is_binary(DocId) ->
- couch_doc:validate_docid(DocId, name(Db)),
- case is_partitioned(Db) of
- true ->
- couch_partition:validate_docid(DocId);
- false ->
- ok
- end.
-
-
-doc_from_json_obj_validate(#db{} = Db, DocJson) ->
- Doc = couch_doc:from_json_obj_validate(DocJson, name(Db)),
- {Props} = DocJson,
- case couch_util:get_value(<<"_id">>, Props) of
- DocId when is_binary(DocId) ->
- % Only validate the docid if it was provided
- validate_docid(Db, DocId);
- _ ->
- ok
- end,
- Doc.
-
-
-update_doc(Db, Doc, Options) ->
- update_doc(Db, Doc, Options, interactive_edit).
-
-update_doc(Db, Doc, Options, UpdateType) ->
- case update_docs(Db, [Doc], Options, UpdateType) of
- {ok, [{ok, NewRev}]} ->
- {ok, NewRev};
- {ok, [{{_Id, _Rev}, Error}]} ->
- throw(Error);
- {ok, [Error]} ->
- throw(Error);
- {ok, []} ->
- % replication success
- {Pos, [RevId | _]} = Doc#doc.revs,
- {ok, {Pos, RevId}}
- end.
-
-update_docs(Db, Docs) ->
- update_docs(Db, Docs, []).
-
-% group_alike_docs groups the sorted documents into sublist buckets, by id.
-% ([DocA, DocA, DocB, DocC], []) -> [[DocA, DocA], [DocB], [DocC]]
-group_alike_docs(Docs) ->
- % Here we're just asserting that our doc sort is stable so that
- % if we have duplicate docids we don't have to worry about the
- % behavior of lists:sort/2 which isn't documented anyhwere as
- % being stable.
- WithPos = lists:zip(Docs, lists:seq(1, length(Docs))),
- SortFun = fun({D1, P1}, {D2, P2}) -> {D1#doc.id, P1} =< {D2#doc.id, P2} end,
- SortedDocs = [D || {D, _} <- lists:sort(SortFun, WithPos)],
- group_alike_docs(SortedDocs, []).
-
-group_alike_docs([], Buckets) ->
- lists:reverse(lists:map(fun lists:reverse/1, Buckets));
-group_alike_docs([Doc|Rest], []) ->
- group_alike_docs(Rest, [[Doc]]);
-group_alike_docs([Doc|Rest], [Bucket|RestBuckets]) ->
- [#doc{id=BucketId}|_] = Bucket,
- case Doc#doc.id == BucketId of
- true ->
- % add to existing bucket
- group_alike_docs(Rest, [[Doc|Bucket]|RestBuckets]);
- false ->
- % add to new bucket
- group_alike_docs(Rest, [[Doc]|[Bucket|RestBuckets]])
- end.
-
-validate_doc_update(#db{}=Db, #doc{id= <<"_design/",_/binary>>}=Doc, _GetDiskDocFun) ->
- case catch check_is_admin(Db) of
- ok -> validate_ddoc(Db, Doc);
- Error -> Error
- end;
-validate_doc_update(#db{validate_doc_funs = undefined} = Db, Doc, Fun) ->
- ValidationFuns = load_validation_funs(Db),
- validate_doc_update(Db#db{validate_doc_funs=ValidationFuns}, Doc, Fun);
-validate_doc_update(#db{validate_doc_funs=[]}, _Doc, _GetDiskDocFun) ->
- ok;
-validate_doc_update(_Db, #doc{id= <<"_local/",_/binary>>}, _GetDiskDocFun) ->
- ok;
-validate_doc_update(Db, Doc, GetDiskDocFun) ->
- case get(io_priority) of
- {internal_repl, _} ->
- ok;
- _ ->
- validate_doc_update_int(Db, Doc, GetDiskDocFun)
- end.
-
-validate_ddoc(Db, DDoc) ->
- try
- ok = couch_index_server:validate(Db, couch_doc:with_ejson_body(DDoc))
- catch
- throw:{invalid_design_doc, Reason} ->
- {bad_request, invalid_design_doc, Reason};
- throw:{compilation_error, Reason} ->
- {bad_request, compilation_error, Reason};
- throw:Error ->
- Error
- end.
-
-validate_doc_update_int(Db, Doc, GetDiskDocFun) ->
- Fun = fun() ->
- DiskDoc = GetDiskDocFun(),
- JsonCtx = couch_util:json_user_ctx(Db),
- SecObj = get_security(Db),
- try
- [case Fun(Doc, DiskDoc, JsonCtx, SecObj) of
- ok -> ok;
- Error -> throw(Error)
- end || Fun <- Db#db.validate_doc_funs],
- ok
- catch
- throw:Error ->
- Error
- end
- end,
- couch_stats:update_histogram([couchdb, query_server, vdu_process_time],
- Fun).
-
-
-% to be safe, spawn a middleman here
-load_validation_funs(#db{main_pid=Pid, name = <<"shards/", _/binary>>}=Db) ->
- {_, Ref} = spawn_monitor(fun() ->
- exit(ddoc_cache:open(mem3:dbname(Db#db.name), validation_funs))
- end),
- receive
- {'DOWN', Ref, _, _, {ok, Funs}} ->
- gen_server:cast(Pid, {load_validation_funs, Funs}),
- Funs;
- {'DOWN', Ref, _, _, {database_does_not_exist, _StackTrace}} ->
- ok = couch_server:close_db_if_idle(Db#db.name),
- erlang:error(database_does_not_exist);
- {'DOWN', Ref, _, _, Reason} ->
- couch_log:error("could not load validation funs ~p", [Reason]),
- throw(internal_server_error)
- end;
-load_validation_funs(#db{main_pid=Pid}=Db) ->
- {ok, DDocInfos} = get_design_docs(Db),
- OpenDocs = fun
- (#full_doc_info{}=D) ->
- {ok, Doc} = open_doc_int(Db, D, [ejson_body]),
- Doc
- end,
- DDocs = lists:map(OpenDocs, DDocInfos),
- Funs = lists:flatmap(fun(DDoc) ->
- case couch_doc:get_validate_doc_fun(DDoc) of
- nil -> [];
- Fun -> [Fun]
- end
- end, DDocs),
- gen_server:cast(Pid, {load_validation_funs, Funs}),
- Funs.
-
-reload_validation_funs(#db{} = Db) ->
- gen_server:cast(Db#db.main_pid, {load_validation_funs, undefined}).
-
-prep_and_validate_update(Db, #doc{id=Id,revs={RevStart, Revs}}=Doc,
- OldFullDocInfo, LeafRevsDict, AllowConflict) ->
- case Revs of
- [PrevRev|_] ->
- case dict:find({RevStart, PrevRev}, LeafRevsDict) of
- {ok, {#leaf{deleted=Deleted, ptr=DiskSp}, DiskRevs}} ->
- case couch_doc:has_stubs(Doc) of
- true ->
- DiskDoc = make_doc(Db, Id, Deleted, DiskSp, DiskRevs),
- Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
- {validate_doc_update(Db, Doc2, fun() -> DiskDoc end), Doc2};
- false ->
- LoadDiskDoc = fun() -> make_doc(Db,Id,Deleted,DiskSp,DiskRevs) end,
- {validate_doc_update(Db, Doc, LoadDiskDoc), Doc}
- end;
- error when AllowConflict ->
- couch_doc:merge_stubs(Doc, #doc{}), % will generate error if
- % there are stubs
- {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
- error ->
- {conflict, Doc}
- end;
- [] ->
- % new doc, and we have existing revs.
- % reuse existing deleted doc
- if OldFullDocInfo#full_doc_info.deleted orelse AllowConflict ->
- {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
- true ->
- {conflict, Doc}
- end
- end.
-
-
-
-prep_and_validate_updates(_Db, [], [], _AllowConflict, AccPrepped,
- AccFatalErrors) ->
- AccPrepped2 = lists:reverse(lists:map(fun lists:reverse/1, AccPrepped)),
- {AccPrepped2, AccFatalErrors};
-prep_and_validate_updates(Db, [DocBucket|RestBuckets], [not_found|RestLookups],
- AllowConflict, AccPrepped, AccErrors) ->
- % no existing revs are known,
- {PreppedBucket, AccErrors3} = lists:foldl(
- fun(#doc{revs=Revs}=Doc, {AccBucket, AccErrors2}) ->
- case couch_doc:has_stubs(Doc) of
- true ->
- couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
- false -> ok
- end,
- case Revs of
- {0, []} ->
- case validate_doc_update(Db, Doc, fun() -> nil end) of
- ok ->
- {[Doc | AccBucket], AccErrors2};
- Error ->
- {AccBucket, [{doc_tag(Doc), Error} | AccErrors2]}
- end;
- _ ->
- % old revs specified but none exist, a conflict
- {AccBucket, [{doc_tag(Doc), conflict} | AccErrors2]}
- end
- end,
- {[], AccErrors}, DocBucket),
-
- prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
- [PreppedBucket | AccPrepped], AccErrors3);
-prep_and_validate_updates(Db, [DocBucket|RestBuckets],
- [#full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo|RestLookups],
- AllowConflict, AccPrepped, AccErrors) ->
- Leafs = couch_key_tree:get_all_leafs(OldRevTree),
- LeafRevsDict = dict:from_list([
- {{Start, RevId}, {Leaf, Revs}} ||
- {Leaf, {Start, [RevId | _]} = Revs} <- Leafs
- ]),
- {PreppedBucket, AccErrors3} = lists:foldl(
- fun(Doc, {Docs2Acc, AccErrors2}) ->
- case prep_and_validate_update(Db, Doc, OldFullDocInfo,
- LeafRevsDict, AllowConflict) of
- {ok, Doc2} ->
- {[Doc2 | Docs2Acc], AccErrors2};
- {Error, _} ->
- % Record the error
- {Docs2Acc, [{doc_tag(Doc), Error} |AccErrors2]}
- end
- end,
- {[], AccErrors}, DocBucket),
- prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
- [PreppedBucket | AccPrepped], AccErrors3).
-
-
-update_docs(Db, Docs, Options) ->
- update_docs(Db, Docs, Options, interactive_edit).
-
-
-prep_and_validate_replicated_updates(_Db, [], [], AccPrepped, AccErrors) ->
- Errors2 = [{{Id, {Pos, Rev}}, Error} ||
- {#doc{id=Id,revs={Pos,[Rev|_]}}, Error} <- AccErrors],
- AccPrepped2 = lists:reverse(lists:map(fun lists:reverse/1, AccPrepped)),
- {AccPrepped2, lists:reverse(Errors2)};
-prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldInfo], AccPrepped, AccErrors) ->
- case OldInfo of
- not_found ->
- {ValidatedBucket, AccErrors3} = lists:foldl(
- fun(Doc, {AccPrepped2, AccErrors2}) ->
- case couch_doc:has_stubs(Doc) of
- true ->
- couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
- false -> ok
- end,
- case validate_doc_update(Db, Doc, fun() -> nil end) of
- ok ->
- {[Doc | AccPrepped2], AccErrors2};
- Error ->
- {AccPrepped2, [{Doc, Error} | AccErrors2]}
- end
- end,
- {[], AccErrors}, Bucket),
- prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3);
- #full_doc_info{rev_tree=OldTree} ->
- OldLeafs = couch_key_tree:get_all_leafs_full(OldTree),
- OldLeafsLU = [{Start, RevId} || {Start, [{RevId, _}|_]} <- OldLeafs],
- NewPaths = lists:map(fun couch_doc:to_path/1, Bucket),
- NewRevTree = couch_key_tree:multi_merge(OldTree, NewPaths),
- Leafs = couch_key_tree:get_all_leafs_full(NewRevTree),
- LeafRevsFullDict = dict:from_list( [{{Start, RevId}, FullPath} || {Start, [{RevId, _}|_]}=FullPath <- Leafs]),
- {ValidatedBucket, AccErrors3} =
- lists:foldl(
- fun(#doc{id=Id,revs={Pos, [RevId|_]}}=Doc, {AccValidated, AccErrors2}) ->
- IsOldLeaf = lists:member({Pos, RevId}, OldLeafsLU),
- case dict:find({Pos, RevId}, LeafRevsFullDict) of
- {ok, {Start, Path}} when not IsOldLeaf ->
- % our unflushed doc is a leaf node. Go back on the path
- % to find the previous rev that's on disk.
-
- LoadPrevRevFun = fun() ->
- make_first_doc_on_disk(Db,Id,Start-1, tl(Path))
- end,
-
- case couch_doc:has_stubs(Doc) of
- true ->
- DiskDoc = case LoadPrevRevFun() of
- #doc{} = DiskDoc0 ->
- DiskDoc0;
- _ ->
- % Force a missing_stub exception
- couch_doc:merge_stubs(Doc, #doc{})
- end,
- Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
- GetDiskDocFun = fun() -> DiskDoc end;
- false ->
- Doc2 = Doc,
- GetDiskDocFun = LoadPrevRevFun
- end,
-
- case validate_doc_update(Db, Doc2, GetDiskDocFun) of
- ok ->
- {[Doc2 | AccValidated], AccErrors2};
- Error ->
- {AccValidated, [{Doc, Error} | AccErrors2]}
- end;
- _ ->
- % this doc isn't a leaf or already exists in the tree.
- % ignore but consider it a success.
- {AccValidated, AccErrors2}
- end
- end,
- {[], AccErrors}, Bucket),
- prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo,
- [ValidatedBucket | AccPrepped], AccErrors3)
- end.
-
-
-
-new_revid(#doc{body=Body, revs={OldStart,OldRevs}, atts=Atts, deleted=Deleted}) ->
- DigestedAtts = lists:foldl(fun(Att, Acc) ->
- [N, T, M] = couch_att:fetch([name, type, md5], Att),
- case M == <<>> of
- true -> Acc;
- false -> [{N, T, M} | Acc]
- end
- end, [], Atts),
- case DigestedAtts of
- Atts2 when length(Atts) =/= length(Atts2) ->
- % We must have old style non-md5 attachments
- ?l2b(integer_to_list(couch_util:rand32()));
- Atts2 ->
- OldRev = case OldRevs of [] -> 0; [OldRev0|_] -> OldRev0 end,
- couch_hash:md5_hash(term_to_binary([Deleted, OldStart, OldRev, Body, Atts2], [{minor_version, 1}]))
- end.
-
-new_revs([], OutBuckets, IdRevsAcc) ->
- {lists:reverse(OutBuckets), IdRevsAcc};
-new_revs([Bucket|RestBuckets], OutBuckets, IdRevsAcc) ->
- {NewBucket, IdRevsAcc3} = lists:mapfoldl(
- fun(#doc{revs={Start, RevIds}}=Doc, IdRevsAcc2)->
- NewRevId = new_revid(Doc),
- {Doc#doc{revs={Start+1, [NewRevId | RevIds]}},
- [{doc_tag(Doc), {ok, {Start+1, NewRevId}}} | IdRevsAcc2]}
- end, IdRevsAcc, Bucket),
- new_revs(RestBuckets, [NewBucket|OutBuckets], IdRevsAcc3).
-
-check_dup_atts(#doc{atts=Atts}=Doc) ->
- lists:foldl(fun(Att, Names) ->
- Name = couch_att:fetch(name, Att),
- case ordsets:is_element(Name, Names) of
- true -> throw({bad_request, <<"Duplicate attachments">>});
- false -> ordsets:add_element(Name, Names)
- end
- end, ordsets:new(), Atts),
- Doc.
-
-tag_docs([]) ->
- [];
-tag_docs([#doc{meta=Meta}=Doc | Rest]) ->
- [Doc#doc{meta=[{ref, make_ref()} | Meta]} | tag_docs(Rest)].
-
-doc_tag(#doc{meta=Meta}) ->
- case lists:keyfind(ref, 1, Meta) of
- {ref, Ref} when is_reference(Ref) -> Ref;
- false -> throw(doc_not_tagged);
- Else -> throw({invalid_doc_tag, Else})
- end.
-
-update_docs(Db, Docs0, Options, replicated_changes) ->
- Docs = tag_docs(Docs0),
-
- PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) ->
- prep_and_validate_replicated_updates(Db0, DocBuckets0,
- ExistingDocInfos, [], [])
- end,
-
- {ok, DocBuckets, NonRepDocs, DocErrors}
- = before_docs_update(Db, Docs, PrepValidateFun, replicated_changes),
-
- DocBuckets2 = [[doc_flush_atts(Db, check_dup_atts(Doc))
- || Doc <- Bucket] || Bucket <- DocBuckets],
- {ok, _} = write_and_commit(Db, DocBuckets2,
- NonRepDocs, [merge_conflicts | Options]),
- {ok, DocErrors};
-
-update_docs(Db, Docs0, Options, interactive_edit) ->
- Docs = tag_docs(Docs0),
-
- AllOrNothing = lists:member(all_or_nothing, Options),
- PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) ->
- prep_and_validate_updates(Db0, DocBuckets0, ExistingDocInfos,
- AllOrNothing, [], [])
- end,
-
- {ok, DocBuckets, NonRepDocs, DocErrors}
- = before_docs_update(Db, Docs, PrepValidateFun, interactive_edit),
-
- if (AllOrNothing) and (DocErrors /= []) ->
- RefErrorDict = dict:from_list([{doc_tag(Doc), Doc} || Doc <- Docs]),
- {aborted, lists:map(fun({Ref, Error}) ->
- #doc{id=Id,revs={Start,RevIds}} = dict:fetch(Ref, RefErrorDict),
- case {Start, RevIds} of
- {Pos, [RevId | _]} -> {{Id, {Pos, RevId}}, Error};
- {0, []} -> {{Id, {0, <<>>}}, Error}
- end
- end, DocErrors)};
- true ->
- Options2 = if AllOrNothing -> [merge_conflicts];
- true -> [] end ++ Options,
- DocBuckets2 = [[
- doc_flush_atts(Db, set_new_att_revpos(
- check_dup_atts(Doc)))
- || Doc <- B] || B <- DocBuckets],
- {DocBuckets3, IdRevs} = new_revs(DocBuckets2, [], []),
-
- {ok, CommitResults} = write_and_commit(Db, DocBuckets3,
- NonRepDocs, Options2),
-
- ResultsDict = lists:foldl(fun({Key, Resp}, ResultsAcc) ->
- dict:store(Key, Resp, ResultsAcc)
- end, dict:from_list(IdRevs), CommitResults ++ DocErrors),
- {ok, lists:map(fun(Doc) ->
- dict:fetch(doc_tag(Doc), ResultsDict)
- end, Docs)}
- end.
-
-% Returns the first available document on disk. Input list is a full rev path
-% for the doc.
-make_first_doc_on_disk(_Db, _Id, _Pos, []) ->
- nil;
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #doc{}} | RestPath]) ->
- make_first_doc_on_disk(Db, Id, Pos-1, RestPath);
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, ?REV_MISSING}|RestPath]) ->
- make_first_doc_on_disk(Db, Id, Pos - 1, RestPath);
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #leaf{deleted=IsDel, ptr=Sp}} |_]=DocPath) ->
- Revs = [Rev || {Rev, _} <- DocPath],
- make_doc(Db, Id, IsDel, Sp, {Pos, Revs}).
-
-collect_results_with_metrics(Pid, MRef, []) ->
- Begin = os:timestamp(),
- try
- collect_results(Pid, MRef, [])
- after
- ResultsTime = timer:now_diff(os:timestamp(), Begin) div 1000,
- couch_stats:update_histogram(
- [couchdb, collect_results_time],
- ResultsTime
- )
- end.
-
-collect_results(Pid, MRef, ResultsAcc) ->
- receive
- {result, Pid, Result} ->
- collect_results(Pid, MRef, [Result | ResultsAcc]);
- {done, Pid} ->
- {ok, ResultsAcc};
- {retry, Pid} ->
- retry;
- {'DOWN', MRef, _, _, Reason} ->
- exit(Reason)
- end.
-
-write_and_commit(#db{main_pid=Pid, user_ctx=Ctx}=Db, DocBuckets1,
- NonRepDocs, Options) ->
- DocBuckets = prepare_doc_summaries(Db, DocBuckets1),
- MergeConflicts = lists:member(merge_conflicts, Options),
- MRef = erlang:monitor(process, Pid),
- try
- Pid ! {update_docs, self(), DocBuckets, NonRepDocs, MergeConflicts},
- case collect_results_with_metrics(Pid, MRef, []) of
- {ok, Results} -> {ok, Results};
- retry ->
- % This can happen if the db file we wrote to was swapped out by
- % compaction. Retry by reopening the db and writing to the current file
- {ok, Db2} = open(Db#db.name, [{user_ctx, Ctx}]),
- DocBuckets2 = [
- [doc_flush_atts(Db2, Doc) || Doc <- Bucket] ||
- Bucket <- DocBuckets1
- ],
- % We only retry once
- DocBuckets3 = prepare_doc_summaries(Db2, DocBuckets2),
- close(Db2),
- Pid ! {update_docs, self(), DocBuckets3, NonRepDocs, MergeConflicts},
- case collect_results_with_metrics(Pid, MRef, []) of
- {ok, Results} -> {ok, Results};
- retry -> throw({update_error, compaction_retry})
- end
- end
- after
- erlang:demonitor(MRef, [flush])
- end.
-
-
-prepare_doc_summaries(Db, BucketList) ->
- [lists:map(
- fun(#doc{body = Body, atts = Atts} = Doc0) ->
- DiskAtts = [couch_att:to_disk_term(Att) || Att <- Atts],
- {ok, SizeInfo} = couch_att:size_info(Atts),
- AttsStream = case Atts of
- [Att | _] ->
- {stream, StreamEngine} = couch_att:fetch(data, Att),
- StreamEngine;
- [] ->
- nil
- end,
- Doc1 = Doc0#doc{
- atts = DiskAtts,
- meta = [
- {size_info, SizeInfo},
- {atts_stream, AttsStream},
- {ejson_size, couch_ejson_size:encoded_size(Body)}
- ] ++ Doc0#doc.meta
- },
- couch_db_engine:serialize_doc(Db, Doc1)
- end,
- Bucket) || Bucket <- BucketList].
-
-
-before_docs_update(#db{validate_doc_funs = VDFuns} = Db, Docs, PVFun, UpdateType) ->
- increment_stat(Db, [couchdb, database_writes]),
-
- % Separate _local docs from normal docs
- IsLocal = fun
- (#doc{id= <<?LOCAL_DOC_PREFIX, _/binary>>}) -> true;
- (_) -> false
- end,
- {NonRepDocs, Docs2} = lists:partition(IsLocal, Docs),
-
- BucketList = group_alike_docs(Docs2),
-
- DocBuckets = lists:map(fun(Bucket) ->
- lists:map(fun(Doc) ->
- DocWithBody = couch_doc:with_ejson_body(Doc),
- couch_db_plugin:before_doc_update(Db, DocWithBody, UpdateType)
- end, Bucket)
- end, BucketList),
-
- ValidatePred = fun
- (#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true;
- (#doc{atts = Atts}) -> Atts /= []
- end,
-
- case (VDFuns /= []) orelse lists:any(ValidatePred, Docs2) of
- true ->
- % lookup the doc by id and get the most recent
- Ids = [Id || [#doc{id = Id} | _] <- DocBuckets],
- ExistingDocs = get_full_doc_infos(Db, Ids),
- {DocBuckets2, DocErrors} = PVFun(Db, DocBuckets, ExistingDocs),
- % remove empty buckets
- DocBuckets3 = [Bucket || Bucket <- DocBuckets2, Bucket /= []],
- {ok, DocBuckets3, NonRepDocs, DocErrors};
- false ->
- {ok, DocBuckets, NonRepDocs, []}
- end.
-
-
-set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts0}=Doc) ->
- Atts = lists:map(
- fun(Att) ->
- case couch_att:fetch(data, Att) of
- % already commited to disk, don't set new rev
- {stream, _} -> Att;
- {Fd, _} when is_pid(Fd) -> Att;
- % write required so update RevPos
- _ -> couch_att:store(revpos, RevPos+1, Att)
- end
- end, Atts0),
- Doc#doc{atts = Atts}.
-
-
-doc_flush_atts(Db, Doc) ->
- Doc#doc{atts=[couch_att:flush(Db, Att) || Att <- Doc#doc.atts]}.
-
-
-compressible_att_type(MimeType) when is_binary(MimeType) ->
- compressible_att_type(?b2l(MimeType));
-compressible_att_type(MimeType) ->
- TypeExpList = re:split(
- config:get("attachments", "compressible_types", ""),
- "\\s*,\\s*",
- [{return, list}]
- ),
- lists:any(
- fun(TypeExp) ->
- Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"),
- "(?:\\s*;.*?)?\\s*", $$],
- re:run(MimeType, Regexp, [caseless]) =/= nomatch
- end,
- [T || T <- TypeExpList, T /= []]
- ).
-
-% From RFC 2616 3.6.1 - Chunked Transfer Coding
-%
-% In other words, the origin server is willing to accept
-% the possibility that the trailer fields might be silently
-% discarded along the path to the client.
-%
-% I take this to mean that if "Trailers: Content-MD5\r\n"
-% is present in the request, but there is no Content-MD5
-% trailer, we're free to ignore this inconsistency and
-% pretend that no Content-MD5 exists.
-with_stream(Db, Att, Fun) ->
- [InMd5, Type, Enc] = couch_att:fetch([md5, type, encoding], Att),
- BufferSize = list_to_integer(
- config:get("couchdb", "attachment_stream_buffer_size", "4096")),
- Options = case (Enc =:= identity) andalso compressible_att_type(Type) of
- true ->
- CompLevel = list_to_integer(
- config:get("attachments", "compression_level", "0")
- ),
- [
- {buffer_size, BufferSize},
- {encoding, gzip},
- {compression_level, CompLevel}
- ];
- _ ->
- [{buffer_size, BufferSize}]
- end,
- {ok, OutputStream} = open_write_stream(Db, Options),
- ReqMd5 = case Fun(OutputStream) of
- {md5, FooterMd5} ->
- case InMd5 of
- md5_in_footer -> FooterMd5;
- _ -> InMd5
- end;
- _ ->
- InMd5
- end,
- {StreamEngine, Len, IdentityLen, Md5, IdentityMd5} =
- couch_stream:close(OutputStream),
- couch_util:check_md5(IdentityMd5, ReqMd5),
- {AttLen, DiskLen, NewEnc} = case Enc of
- identity ->
- case {Md5, IdentityMd5} of
- {Same, Same} ->
- {Len, IdentityLen, identity};
- _ ->
- {Len, IdentityLen, gzip}
- end;
- gzip ->
- case couch_att:fetch([att_len, disk_len], Att) of
- [AL, DL] when AL =:= undefined orelse DL =:= undefined ->
- % Compressed attachment uploaded through the standalone API.
- {Len, Len, gzip};
- [AL, DL] ->
- % This case is used for efficient push-replication, where a
- % compressed attachment is located in the body of multipart
- % content-type request.
- {AL, DL, gzip}
- end
- end,
- couch_att:store([
- {data, {stream, StreamEngine}},
- {att_len, AttLen},
- {disk_len, DiskLen},
- {md5, Md5},
- {encoding, NewEnc}
- ], Att).
-
-
-open_write_stream(Db, Options) ->
- couch_db_engine:open_write_stream(Db, Options).
-
-
-open_read_stream(Db, AttState) ->
- couch_db_engine:open_read_stream(Db, AttState).
-
-
-is_active_stream(Db, StreamEngine) ->
- couch_db_engine:is_active_stream(Db, StreamEngine).
-
-
-calculate_start_seq(_Db, _Node, Seq) when is_integer(Seq) ->
- Seq;
-calculate_start_seq(Db, Node, {Seq, Uuid}) ->
- % Treat the current node as the epoch node
- calculate_start_seq(Db, Node, {Seq, Uuid, Node});
-calculate_start_seq(Db, _Node, {Seq, {split, Uuid}, EpochNode}) ->
- case is_owner(EpochNode, Seq, get_epochs(Db)) of
- true ->
- % Find last replicated sequence from split source to target
- mem3_rep:find_split_target_seq(Db, EpochNode, Uuid, Seq);
- false ->
- couch_log:warning("~p calculate_start_seq not owner "
- "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p, epochs: ~p",
- [?MODULE, Db#db.name, Seq, Uuid, EpochNode, get_epochs(Db)]),
- 0
- end;
-calculate_start_seq(Db, _Node, {Seq, Uuid, EpochNode}) ->
- case is_prefix(Uuid, get_uuid(Db)) of
- true ->
- case is_owner(EpochNode, Seq, get_epochs(Db)) of
- true -> Seq;
- false ->
- couch_log:warning("~p calculate_start_seq not owner "
- "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p, epochs: ~p",
- [?MODULE, Db#db.name, Seq, Uuid, EpochNode,
- get_epochs(Db)]),
- 0
- end;
- false ->
- couch_log:warning("~p calculate_start_seq uuid prefix mismatch "
- "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p",
- [?MODULE, Db#db.name, Seq, Uuid, EpochNode]),
- %% The file was rebuilt, most likely in a different
- %% order, so rewind.
- 0
- end;
-calculate_start_seq(Db, _Node, {replace, OriginalNode, Uuid, Seq}) ->
- case is_prefix(Uuid, couch_db:get_uuid(Db)) of
- true ->
- try
- start_seq(get_epochs(Db), OriginalNode, Seq)
- catch throw:epoch_mismatch ->
- couch_log:warning("~p start_seq duplicate uuid on node: ~p "
- "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p",
- [?MODULE, node(), Db#db.name, Seq, Uuid, OriginalNode]),
- 0
- end;
- false ->
- {replace, OriginalNode, Uuid, Seq}
- end.
-
-
-validate_epochs(Epochs) ->
- %% Assert uniqueness.
- case length(Epochs) == length(lists:ukeysort(2, Epochs)) of
- true -> ok;
- false -> erlang:error(duplicate_epoch)
- end,
- %% Assert order.
- case Epochs == lists:sort(fun({_, A}, {_, B}) -> B =< A end, Epochs) of
- true -> ok;
- false -> erlang:error(epoch_order)
- end.
-
-
-is_prefix(Pattern, Subject) ->
- binary:longest_common_prefix([Pattern, Subject]) == size(Pattern).
-
-
-is_owner(Node, Seq, Epochs) ->
- Node =:= owner_of(Epochs, Seq).
-
-
-owner_of(Db, Seq) when not is_list(Db) ->
- owner_of(get_epochs(Db), Seq);
-owner_of([], _Seq) ->
- undefined;
-owner_of([{EpochNode, EpochSeq} | _Rest], Seq) when Seq > EpochSeq ->
- EpochNode;
-owner_of([_ | Rest], Seq) ->
- owner_of(Rest, Seq).
-
-
-start_seq([{OrigNode, EpochSeq} | _], OrigNode, Seq) when Seq > EpochSeq ->
- %% OrigNode is the owner of the Seq so we can safely stream from there
- Seq;
-start_seq([{_, NewSeq}, {OrigNode, _} | _], OrigNode, Seq) when Seq > NewSeq ->
- %% We transferred this file before Seq was written on OrigNode, so we need
- %% to stream from the beginning of the next epoch. Note that it is _not_
- %% necessary for the current node to own the epoch beginning at NewSeq
- NewSeq;
-start_seq([_ | Rest], OrigNode, Seq) ->
- start_seq(Rest, OrigNode, Seq);
-start_seq([], _OrigNode, _Seq) ->
- throw(epoch_mismatch).
-
-
-fold_docs(Db, UserFun, UserAcc) ->
- fold_docs(Db, UserFun, UserAcc, []).
-
-fold_docs(Db, UserFun, UserAcc, Options) ->
- couch_db_engine:fold_docs(Db, UserFun, UserAcc, Options).
-
-
-fold_local_docs(Db, UserFun, UserAcc, Options) ->
- couch_db_engine:fold_local_docs(Db, UserFun, UserAcc, Options).
-
-
-fold_design_docs(Db, UserFun, UserAcc, Options1) ->
- Options2 = set_design_doc_keys(Options1),
- couch_db_engine:fold_docs(Db, UserFun, UserAcc, Options2).
-
-
-fold_changes(Db, StartSeq, UserFun, UserAcc) ->
- fold_changes(Db, StartSeq, UserFun, UserAcc, []).
-
-
-fold_changes(Db, StartSeq, UserFun, UserAcc, Opts) ->
- couch_db_engine:fold_changes(Db, StartSeq, UserFun, UserAcc, Opts).
-
-
-fold_purge_infos(Db, StartPurgeSeq, Fun, Acc) ->
- fold_purge_infos(Db, StartPurgeSeq, Fun, Acc, []).
-
-
-fold_purge_infos(Db, StartPurgeSeq, UFun, UAcc, Opts) ->
- couch_db_engine:fold_purge_infos(Db, StartPurgeSeq, UFun, UAcc, Opts).
-
-
-count_changes_since(Db, SinceSeq) ->
- couch_db_engine:count_changes_since(Db, SinceSeq).
-
-
-%%% Internal function %%%
-open_doc_revs_int(Db, IdRevs, Options) ->
- Ids = [Id || {Id, _Revs} <- IdRevs],
- LookupResults = get_full_doc_infos(Db, Ids),
- lists:zipwith(
- fun({Id, Revs}, Lookup) ->
- case Lookup of
- #full_doc_info{rev_tree=RevTree} ->
- {FoundRevs, MissingRevs} =
- case Revs of
- all ->
- {couch_key_tree:get_all_leafs(RevTree), []};
- _ ->
- case lists:member(latest, Options) of
- true ->
- couch_key_tree:get_key_leafs(RevTree, Revs);
- false ->
- couch_key_tree:get(RevTree, Revs)
- end
- end,
- FoundResults =
- lists:map(fun({Value, {Pos, [Rev|_]}=FoundRevPath}) ->
- case Value of
- ?REV_MISSING ->
- % we have the rev in our list but know nothing about it
- {{not_found, missing}, {Pos, Rev}};
- #leaf{deleted=IsDeleted, ptr=SummaryPtr} ->
- {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)}
- end
- end, FoundRevs),
- Results = FoundResults ++ [{{not_found, missing}, MissingRev} || MissingRev <- MissingRevs],
- {ok, Results};
- not_found when Revs == all ->
- {ok, []};
- not_found ->
- {ok, [{{not_found, missing}, Rev} || Rev <- Revs]}
- end
- end,
- IdRevs, LookupResults).
-
-open_doc_int(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = Id, Options) ->
- case couch_db_engine:open_local_docs(Db, [Id]) of
- [#doc{} = Doc] ->
- apply_open_options({ok, Doc}, Options);
- [not_found] ->
- {not_found, missing}
- end;
-open_doc_int(Db, #doc_info{id=Id,revs=[RevInfo|_]}=DocInfo, Options) ->
- #rev_info{deleted=IsDeleted,rev={Pos,RevId},body_sp=Bp} = RevInfo,
- Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos,[RevId]}),
- apply_open_options(
- {ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}}, Options);
-open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) ->
- #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} =
- DocInfo = couch_doc:to_doc_info(FullDocInfo),
- {[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]),
- Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath),
- apply_open_options(
- {ok, Doc#doc{meta=doc_meta_info(DocInfo, RevTree, Options)}}, Options);
-open_doc_int(Db, Id, Options) ->
- case get_full_doc_info(Db, Id) of
- #full_doc_info{} = FullDocInfo ->
- open_doc_int(Db, FullDocInfo, Options);
- not_found ->
- {not_found, missing}
- end.
-
-doc_meta_info(#doc_info{high_seq=Seq,revs=[#rev_info{rev=Rev}|RestInfo]}, RevTree, Options) ->
- case lists:member(revs_info, Options) of
- false -> [];
- true ->
- {[{Pos, RevPath}],[]} =
- couch_key_tree:get_full_key_paths(RevTree, [Rev]),
-
- [{revs_info, Pos, lists:map(
- fun({Rev1, ?REV_MISSING}) ->
- {Rev1, missing};
- ({Rev1, Leaf}) ->
- case Leaf#leaf.deleted of
- true ->
- {Rev1, deleted};
- false ->
- {Rev1, available}
- end
- end, RevPath)}]
- end ++
- case lists:member(conflicts, Options) of
- false -> [];
- true ->
- case [Rev1 || #rev_info{rev=Rev1,deleted=false} <- RestInfo] of
- [] -> [];
- ConflictRevs -> [{conflicts, ConflictRevs}]
- end
- end ++
- case lists:member(deleted_conflicts, Options) of
- false -> [];
- true ->
- case [Rev1 || #rev_info{rev=Rev1,deleted=true} <- RestInfo] of
- [] -> [];
- DelConflictRevs -> [{deleted_conflicts, DelConflictRevs}]
- end
- end ++
- case lists:member(local_seq, Options) of
- false -> [];
- true -> [{local_seq, Seq}]
- end.
-
-
-make_doc(_Db, Id, Deleted, nil = _Bp, RevisionPath) ->
- #doc{
- id = Id,
- revs = RevisionPath,
- body = [],
- atts = [],
- deleted = Deleted
- };
-make_doc(#db{} = Db, Id, Deleted, Bp, {Pos, Revs}) ->
- RevsLimit = get_revs_limit(Db),
- Doc0 = couch_db_engine:read_doc_body(Db, #doc{
- id = Id,
- revs = {Pos, lists:sublist(Revs, 1, RevsLimit)},
- body = Bp,
- deleted = Deleted
- }),
- Doc1 = case Doc0#doc.atts of
- BinAtts when is_binary(BinAtts) ->
- Doc0#doc{
- atts = couch_compress:decompress(BinAtts)
- };
- ListAtts when is_list(ListAtts) ->
- Doc0
- end,
- after_doc_read(Db, Doc1#doc{
- atts = [couch_att:from_disk_term(Db, T) || T <- Doc1#doc.atts]
- }).
-
-
-after_doc_read(#db{} = Db, Doc) ->
- DocWithBody = couch_doc:with_ejson_body(Doc),
- couch_db_plugin:after_doc_read(Db, DocWithBody).
-
-increment_stat(#db{options = Options}, Stat) ->
- case lists:member(sys_db, Options) of
- true ->
- ok;
- false ->
- couch_stats:increment_counter(Stat)
- end.
-
--spec normalize_dbname(list() | binary()) -> binary().
-
-normalize_dbname(DbName) when is_list(DbName) ->
- normalize_dbname(list_to_binary(DbName));
-normalize_dbname(DbName) when is_binary(DbName) ->
- mem3:dbname(couch_util:drop_dot_couch_ext(DbName)).
-
-
--spec dbname_suffix(list() | binary()) -> binary().
-
-dbname_suffix(DbName) ->
- filename:basename(normalize_dbname(DbName)).
-
-
-validate_dbname(DbName) when is_list(DbName) ->
- validate_dbname(?l2b(DbName));
-validate_dbname(DbName) when is_binary(DbName) ->
- Normalized = normalize_dbname(DbName),
- couch_db_plugin:validate_dbname(
- DbName, Normalized, fun validate_dbname_int/2).
-
-validate_dbname_int(DbName, Normalized) when is_binary(DbName) ->
- DbNoExt = couch_util:drop_dot_couch_ext(DbName),
- case re:run(DbNoExt, ?DBNAME_REGEX, [{capture,none}, dollar_endonly]) of
- match ->
- ok;
- nomatch ->
- case is_system_db_name(Normalized) of
- true -> ok;
- false -> {error, {illegal_database_name, DbName}}
- end
- end.
-
-is_system_db_name(DbName) when is_list(DbName) ->
- is_system_db_name(?l2b(DbName));
-is_system_db_name(DbName) when is_binary(DbName) ->
- Normalized = normalize_dbname(DbName),
- Suffix = filename:basename(Normalized),
- case {filename:dirname(Normalized), lists:member(Suffix, ?SYSTEM_DATABASES)} of
- {<<".">>, Result} -> Result;
- {_Prefix, false} -> false;
- {Prefix, true} ->
- ReOpts = [{capture,none}, dollar_endonly],
- re:run(Prefix, ?DBNAME_REGEX, ReOpts) == match
- end.
-
-set_design_doc_keys(Options1) ->
- Dir = case lists:keyfind(dir, 1, Options1) of
- {dir, D0} -> D0;
- _ -> fwd
- end,
- Options2 = set_design_doc_start_key(Options1, Dir),
- set_design_doc_end_key(Options2, Dir).
-
-
--define(FIRST_DDOC_KEY, <<"_design/">>).
--define(LAST_DDOC_KEY, <<"_design0">>).
-
-
-set_design_doc_start_key(Options, fwd) ->
- Key1 = couch_util:get_value(start_key, Options, ?FIRST_DDOC_KEY),
- Key2 = case Key1 < ?FIRST_DDOC_KEY of
- true -> ?FIRST_DDOC_KEY;
- false -> Key1
- end,
- lists:keystore(start_key, 1, Options, {start_key, Key2});
-set_design_doc_start_key(Options, rev) ->
- Key1 = couch_util:get_value(start_key, Options, ?LAST_DDOC_KEY),
- Key2 = case Key1 > ?LAST_DDOC_KEY of
- true -> ?LAST_DDOC_KEY;
- false -> Key1
- end,
- lists:keystore(start_key, 1, Options, {start_key, Key2}).
-
-
-set_design_doc_end_key(Options, fwd) ->
- case couch_util:get_value(end_key_gt, Options) of
- undefined ->
- Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY),
- Key2 = case Key1 > ?LAST_DDOC_KEY of
- true -> ?LAST_DDOC_KEY;
- false -> Key1
- end,
- lists:keystore(end_key, 1, Options, {end_key, Key2});
- EKeyGT ->
- Key2 = case EKeyGT > ?LAST_DDOC_KEY of
- true -> ?LAST_DDOC_KEY;
- false -> EKeyGT
- end,
- lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
- end;
-set_design_doc_end_key(Options, rev) ->
- case couch_util:get_value(end_key_gt, Options) of
- undefined ->
- Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY),
- Key2 = case Key1 < ?FIRST_DDOC_KEY of
- true -> ?FIRST_DDOC_KEY;
- false -> Key1
- end,
- lists:keystore(end_key, 1, Options, {end_key, Key2});
- EKeyGT ->
- Key2 = case EKeyGT < ?FIRST_DDOC_KEY of
- true -> ?FIRST_DDOC_KEY;
- false -> EKeyGT
- end,
- lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
- end.
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-setup_all() ->
- ok = meck:new(couch_epi, [passthrough]),
- ok = meck:expect(couch_epi, decide, fun(_, _, _, _, _) -> no_decision end),
- ok.
-
-teardown_all(_) ->
- meck:unload().
-
-setup() ->
- meck:reset([couch_epi]).
-
-teardown(_) ->
- ok.
-
-validate_dbname_success_test_() ->
- Cases =
- generate_cases_with_shards("long/co$mplex-/path+/something")
- ++ generate_cases_with_shards("something")
- ++ lists:append(
- [generate_cases_with_shards(?b2l(SystemDb))
- || SystemDb <- ?SYSTEM_DATABASES]),
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [should_pass_validate_dbname(A) || {_, A} <- Cases]
- }
- }.
-
-validate_dbname_fail_test_() ->
- Cases = generate_cases("_long/co$mplex-/path+/_something")
- ++ generate_cases("_something")
- ++ generate_cases_with_shards("long/co$mplex-/path+/_something#")
- ++ generate_cases_with_shards("long/co$mplex-/path+/some.thing")
- ++ generate_cases("!abcdefg/werwej/_users")
- ++ generate_cases_with_shards("!abcdefg/werwej/_users"),
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [should_fail_validate_dbname(A) || {_, A} <- Cases]
- }
- }.
-
-normalize_dbname_test_() ->
- Cases = generate_cases_with_shards("long/co$mplex-/path+/_something")
- ++ generate_cases_with_shards("_something"),
- WithExpected = [{?l2b(filename:rootname(A)), B} || {A, B} <- Cases],
- [{test_name({Expected, Db}), ?_assertEqual(Expected, normalize_dbname(Db))}
- || {Expected, Db} <- WithExpected].
-
-dbname_suffix_test_() ->
- Cases = generate_cases_with_shards("long/co$mplex-/path+/_something")
- ++ generate_cases_with_shards("_something"),
- WithExpected = [{?l2b(filename:basename(Arg)), Db} || {Arg, Db} <- Cases],
- [{test_name({Expected, Db}), ?_assertEqual(Expected, dbname_suffix(Db))}
- || {Expected, Db} <- WithExpected].
-
-is_system_db_name_test_() ->
- Cases = lists:append([
- generate_cases_with_shards("long/co$mplex-/path+/" ++ ?b2l(Db))
- || Db <- ?SYSTEM_DATABASES]
- ++ [generate_cases_with_shards(?b2l(Db)) || Db <- ?SYSTEM_DATABASES
- ]),
- WithExpected = [{?l2b(filename:basename(filename:rootname(Arg))), Db}
- || {Arg, Db} <- Cases],
- [{test_name({Expected, Db}) ++ " in ?SYSTEM_DATABASES",
- ?_assert(is_system_db_name(Db))} || {Expected, Db} <- WithExpected].
-
-should_pass_validate_dbname(DbName) ->
- {test_name(DbName), ?_assertEqual(ok, validate_dbname(DbName))}.
-
-should_fail_validate_dbname(DbName) ->
- {test_name(DbName), ?_test(begin
- Result = validate_dbname(DbName),
- ?assertMatch({error, {illegal_database_name, _}}, Result),
- {error, {illegal_database_name, FailedDbName}} = Result,
- ?assertEqual(to_binary(DbName), FailedDbName),
- ok
- end)}.
-
-calculate_start_seq_test_() ->
- {
- setup,
- fun setup_start_seq_all/0,
- fun teardown_start_seq_all/1,
- {
- foreach,
- fun setup_start_seq/0,
- fun teardown_start_seq/1,
- [
- t_calculate_start_seq_uuid_mismatch(),
- t_calculate_start_seq_is_owner(),
- t_calculate_start_seq_not_owner(),
- t_calculate_start_seq_raw(),
- t_calculate_start_seq_epoch_mismatch()
- ]
- }
- }.
-
-setup_start_seq_all() ->
- meck:new(couch_db_engine, [passthrough]),
- meck:expect(couch_db_engine, get_uuid, fun(_) -> <<"foo">> end),
- ok = meck:expect(couch_log, warning, 2, ok),
- Epochs = [
- {node2, 10},
- {node1, 1}
- ],
- meck:expect(couch_db_engine, get_epochs, fun(_) -> Epochs end).
-
-teardown_start_seq_all(_) ->
- meck:unload().
-
-setup_start_seq() ->
- meck:reset([
- couch_db_engine,
- couch_log
- ]).
-
-teardown_start_seq(_) ->
- ok.
-
-t_calculate_start_seq_uuid_mismatch() ->
- ?_test(begin
- Db = test_util:fake_db([]),
- Seq = calculate_start_seq(Db, node2, {15, <<"baz">>}),
- ?assertEqual(0, Seq)
- end).
-
-t_calculate_start_seq_is_owner() ->
- ?_test(begin
- Db = test_util:fake_db([]),
- Seq = calculate_start_seq(Db, node2, {15, <<"foo">>}),
- ?assertEqual(15, Seq)
- end).
-
-t_calculate_start_seq_not_owner() ->
- ?_test(begin
- Db = test_util:fake_db([]),
- Seq = calculate_start_seq(Db, node1, {15, <<"foo">>}),
- ?assertEqual(0, Seq)
- end).
-
-t_calculate_start_seq_raw() ->
- ?_test(begin
- Db = test_util:fake_db([]),
- Seq = calculate_start_seq(Db, node1, 13),
- ?assertEqual(13, Seq)
- end).
-
-t_calculate_start_seq_epoch_mismatch() ->
- ?_test(begin
- Db = test_util:fake_db([]),
- SeqIn = {replace, not_this_node, get_uuid(Db), 42},
- Seq = calculate_start_seq(Db, node1, SeqIn),
- ?assertEqual(0, Seq)
- end).
-
-is_owner_test() ->
- ?assertNot(is_owner(foo, 1, [])),
- ?assertNot(is_owner(foo, 1, [{foo, 1}])),
- ?assert(is_owner(foo, 2, [{foo, 1}])),
- ?assert(is_owner(foo, 50, [{bar, 100}, {foo, 1}])),
- ?assert(is_owner(foo, 50, [{baz, 200}, {bar, 100}, {foo, 1}])),
- ?assert(is_owner(bar, 150, [{baz, 200}, {bar, 100}, {foo, 1}])),
- ?assertError(duplicate_epoch, validate_epochs([{foo, 1}, {bar, 1}])),
- ?assertError(epoch_order, validate_epochs([{foo, 100}, {bar, 200}])).
-
-to_binary(DbName) when is_list(DbName) ->
- ?l2b(DbName);
-to_binary(DbName) when is_binary(DbName) ->
- DbName.
-
-test_name({Expected, DbName}) ->
- lists:flatten(io_lib:format("~p -> ~p", [DbName, Expected]));
-test_name(DbName) ->
- lists:flatten(io_lib:format("~p", [DbName])).
-
-generate_cases_with_shards(DbName) ->
- DbNameWithShard = add_shard(DbName),
- DbNameWithShardAndExtension = add_shard(DbName) ++ ".couch",
- Cases = [
- DbName, ?l2b(DbName),
- DbNameWithShard, ?l2b(DbNameWithShard),
- DbNameWithShardAndExtension, ?l2b(DbNameWithShardAndExtension)
- ],
- [{DbName, Case} || Case <- Cases].
-
-add_shard(DbName) ->
- "shards/00000000-3fffffff/" ++ DbName ++ ".1415960794".
-
-generate_cases(DbName) ->
- [{DbName, DbName}, {DbName, ?l2b(DbName)}].
-
--endif.
diff --git a/src/couch/src/couch_db_engine.erl b/src/couch/src/couch_db_engine.erl
deleted file mode 100644
index 9adc9929d..000000000
--- a/src/couch/src/couch_db_engine.erl
+++ /dev/null
@@ -1,1105 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_engine).
-
-
--include("couch_db.hrl").
--include("couch_db_int.hrl").
-
-
--type filepath() :: iolist().
--type docid() :: binary().
--type rev() :: {non_neg_integer(), binary()}.
--type revs() :: [rev()].
--type json() :: any().
--type uuid() :: binary().
--type purge_seq() :: non_neg_integer().
-
--type doc_pair() :: {
- #full_doc_info{} | not_found,
- #full_doc_info{} | not_found
- }.
-
--type doc_pairs() :: [doc_pair()].
-
--type db_open_options() :: [
- create
- ].
-
--type delete_options() :: [
- {context, delete | compaction} |
- sync
- ].
-
--type purge_info() :: {purge_seq(), uuid(), docid(), revs()}.
--type epochs() :: [{Node::atom(), UpdateSeq::non_neg_integer()}].
--type size_info() :: [{Name::atom(), Size::non_neg_integer()}].
--type partition_info() :: [
- {partition, Partition::binary()} |
- {doc_count, DocCount::non_neg_integer()} |
- {doc_del_count, DocDelCount::non_neg_integer()} |
- {sizes, size_info()}
-].
-
--type write_stream_options() :: [
- {buffer_size, Size::pos_integer()} |
- {encoding, atom()} |
- {compression_level, non_neg_integer()}
- ].
-
--type doc_fold_options() :: [
- {start_key, Key::any()} |
- {end_key, Key::any()} |
- {end_key_gt, Key::any()} |
- {dir, fwd | rev} |
- include_reductions |
- include_deleted
- ].
-
--type changes_fold_options() :: [
- {dir, fwd | rev}
- ].
-
--type purge_fold_options() :: [
- {start_key, Key::any()} |
- {end_key, Key::any()} |
- {end_key_gt, Key::any()} |
- {dir, fwd | rev}
- ].
-
--type db_handle() :: any().
-
--type doc_fold_fun() :: fun((#full_doc_info{}, UserAcc::any()) ->
- {ok, NewUserAcc::any()} |
- {stop, NewUserAcc::any()}).
-
--type local_doc_fold_fun() :: fun((#doc{}, UserAcc::any()) ->
- {ok, NewUserAcc::any()} |
- {stop, NewUserAcc::any()}).
-
--type changes_fold_fun() :: fun((#doc_info{}, UserAcc::any()) ->
- {ok, NewUserAcc::any()} |
- {stop, NewUserAcc::any()}).
-
--type purge_fold_fun() :: fun((purge_info(), UserAcc::any()) ->
- {ok, NewUserAcc::any()} |
- {stop, NewUserAcc::any()}).
-
-
-% This is called by couch_server to determine which
-% engine should be used for the given database. DbPath
-% is calculated based on the DbName and the configured
-% extension for a given engine. The first engine to
-% return true is the engine that will be used for the
-% database.
--callback exists(DbPath::filepath()) -> boolean().
-
-
-% This is called by couch_server to delete a database. It
-% is called from inside the couch_server process which
-% means that the storage engine does not have to guarantee
-% its own consistency checks when executing in this
-% context. Although since this is executed in the context
-% of couch_server it should return relatively quickly.
--callback delete(
- RootDir::filepath(),
- DbPath::filepath(),
- DelOpts::delete_options()) ->
- ok | {error, Reason::atom()}.
-
-
-% This function can be called from multiple contexts. It
-% will either be called just before a call to delete/3 above
-% or when a compaction is cancelled which executes in the
-% context of a couch_db_updater process. It is intended to
-% remove any temporary files used during compaction that
-% may be used to recover from a failed compaction swap.
--callback delete_compaction_files(
- RootDir::filepath(),
- DbPath::filepath(),
- DelOpts::delete_options()) ->
- ok.
-
-
-% This is called from the couch_db_updater:init/1 context. As
-% such this means that it is guaranteed to only have one process
-% executing for a given DbPath argument (ie, opening a given
-% database is guaranteed to only happen in a single process).
-% However, multiple process may be trying to open different
-% databases concurrently so if a database requires a shared
-% resource that will require concurrency control at the storage
-% engine layer.
-%
-% The returned DbHandle should be a term that can be freely
-% copied between processes and accessed concurrently. However
-% its guaranteed that the handle will only ever be mutated
-% in a single threaded context (ie, within the couch_db_updater
-% process).
--callback init(DbPath::filepath(), db_open_options()) ->
- {ok, DbHandle::db_handle()}.
-
-
-% This is called in the context of couch_db_updater:terminate/2
-% and as such has the same properties for init/2. It's guaranteed
-% to be consistent for a given database but may be called by many
-% databases concurrently.
--callback terminate(Reason::any(), DbHandle::db_handle()) -> Ignored::any().
-
-
-% This is called in the context of couch_db_updater:handle_call/3
-% for any message that is unknown. It can be used to handle messages
-% from asynchronous processes like the engine's compactor if it has one.
--callback handle_db_updater_call(Msg::any(), DbHandle::db_handle()) ->
- {reply, Resp::any(), NewDbHandle::db_handle()} |
- {stop, Reason::any(), Resp::any(), NewDbHandle::db_handle()}.
-
-
-% This is called in the context of couch_db_updater:handle_info/2
-% and has the same properties as handle_call/3.
--callback handle_db_updater_info(Msg::any(), DbHandle::db_handle()) ->
- {noreply, NewDbHandle::db_handle()} |
- {noreply, NewDbHandle::db_handle(), Timeout::timeout()} |
- {stop, Reason::any(), NewDbHandle::db_handle()}.
-
-
-% These functions are called by any process opening or closing
-% a database. As such they need to be able to handle being
-% called concurrently. For example, the legacy engine uses these
-% to add monitors to the main engine process.
--callback incref(DbHandle::db_handle()) -> {ok, NewDbHandle::db_handle()}.
--callback decref(DbHandle::db_handle()) -> ok.
--callback monitored_by(DbHande::db_handle()) -> [pid()].
-
-
-% This is called in the context of couch_db_updater:handle_info/2
-% and should return the timestamp of the last activity of
-% the database. If a storage has no notion of activity or the
-% value would be hard to report its ok to just return the
-% result of os:timestamp/0 as this will just disable idle
-% databases from automatically closing.
--callback last_activity(DbHandle::db_handle()) -> erlang:timestamp().
-
-
-% All of the get_* functions may be called from many
-% processes concurrently.
-
-% The database should make a note of the update sequence when it
-% was last compacted. If the database doesn't need compacting it
-% can just hard code a return value of 0.
--callback get_compacted_seq(DbHandle::db_handle()) ->
- CompactedSeq::non_neg_integer().
-
-
-% The number of documents in the database which have all leaf
-% revisions marked as deleted.
--callback get_del_doc_count(DbHandle::db_handle()) ->
- DelDocCount::non_neg_integer().
-
-
-% This number is reported in the database info properties and
-% as such can be any JSON value.
--callback get_disk_version(DbHandle::db_handle()) -> Version::json().
-
-
-% The number of documents in the database that have one or more
-% leaf revisions not marked as deleted.
--callback get_doc_count(DbHandle::db_handle()) -> DocCount::non_neg_integer().
-
-
-% The epochs track which node owned the database starting at
-% a given update sequence. Each time a database is opened it
-% should look at the epochs. If the most recent entry is not
-% for the current node it should add an entry that will be
-% written the next time a write is performed. An entry is
-% simply a {node(), CurrentUpdateSeq} tuple.
--callback get_epochs(DbHandle::db_handle()) -> Epochs::epochs().
-
-
-% Get the current purge sequence known to the engine. This
-% value should be updated during calls to purge_docs.
--callback get_purge_seq(DbHandle::db_handle()) -> purge_seq().
-
-
-% Get the oldest purge sequence known to the engine
--callback get_oldest_purge_seq(DbHandle::db_handle()) -> purge_seq().
-
-
-% Get the purged infos limit. This should just return the last
-% value that was passed to set_purged_docs_limit/2.
--callback get_purge_infos_limit(DbHandle::db_handle()) -> pos_integer().
-
-
-% Get the revision limit. This should just return the last
-% value that was passed to set_revs_limit/2.
--callback get_revs_limit(DbHandle::db_handle()) -> RevsLimit::pos_integer().
-
-
-% Get the current security properties. This should just return
-% the last value that was passed to set_security/2.
--callback get_security(DbHandle::db_handle()) -> SecProps::any().
-
-
-% Get the current properties.
--callback get_props(DbHandle::db_handle()) -> Props::[any()].
-
-
-% This information is displayed in the database info poperties. It
-% should just be a list of {Name::atom(), Size::non_neg_integer()}
-% tuples that will then be combined across shards. Currently,
-% various modules expect there to at least be values for:
-%
-% file - Number of bytes on disk
-%
-% active - Theoretical minimum number of bytes to store this db on disk
-% which is used to guide decisions on compaction
-%
-% external - Number of bytes that would be required to represent the
-% contents outside of the database (for capacity and backup
-% planning)
--callback get_size_info(DbHandle::db_handle()) -> SizeInfo::size_info().
-
-
-% This returns the information for the given partition.
-% It should just be a list of {Name::atom(), Size::non_neg_integer()}
-% It returns the partition name, doc count, deleted doc count and two sizes:
-%
-% active - Theoretical minimum number of bytes to store this partition on disk
-%
-% external - Number of bytes that would be required to represent the
-% contents of this partition outside of the database
--callback get_partition_info(DbHandle::db_handle(), Partition::binary()) ->
- partition_info().
-
-
-% The current update sequence of the database. The update
-% sequence should be incrememnted for every revision added to
-% the database.
--callback get_update_seq(DbHandle::db_handle()) -> UpdateSeq::non_neg_integer().
-
-
-% Whenever a database is created it should generate a
-% persistent UUID for identification in case the shard should
-% ever need to be moved between nodes in a cluster.
--callback get_uuid(DbHandle::db_handle()) -> UUID::binary().
-
-
-% These functions are only called by couch_db_updater and
-% as such are guaranteed to be single threaded calls. The
-% database should simply store these values somewhere so
-% they can be returned by the corresponding get_* calls.
-
--callback set_revs_limit(DbHandle::db_handle(), RevsLimit::pos_integer()) ->
- {ok, NewDbHandle::db_handle()}.
-
-
--callback set_purge_infos_limit(DbHandle::db_handle(), Limit::pos_integer()) ->
- {ok, NewDbHandle::db_handle()}.
-
-
--callback set_security(DbHandle::db_handle(), SecProps::any()) ->
- {ok, NewDbHandle::db_handle()}.
-
-
-% This function is only called by couch_db_updater and
-% as such is guaranteed to be single threaded calls. The
-% database should simply store provided property list
-% unaltered.
-
--callback set_props(DbHandle::db_handle(), Props::any()) ->
- {ok, NewDbHandle::db_handle()}.
-
-
-% Set the current update sequence of the database. The intention is to use this
-% when copying a database such that the destination update sequence should
-% match exactly the source update sequence.
--callback set_update_seq(
- DbHandle::db_handle(),
- UpdateSeq::non_neg_integer()) ->
- {ok, NewDbHandle::db_handle()}.
-
-
-% This function will be called by many processes concurrently.
-% It should return a #full_doc_info{} record or not_found for
-% every provided DocId in the order those DocId's appear in
-% the input.
-%
-% Traditionally this function will only return documents that
-% were present in the database when the DbHandle was retrieved
-% from couch_server. It is currently unknown what would break
-% if a storage engine deviated from that property.
--callback open_docs(DbHandle::db_handle(), DocIds::[docid()]) ->
- [#full_doc_info{} | not_found].
-
-
-% This function will be called by many processes concurrently.
-% It should return a #doc{} record or not_found for every
-% provided DocId in the order they appear in the input.
-%
-% The same caveats around database snapshots from open_docs
-% apply to this function (although this function is called
-% rather less frequently so it may not be as big of an
-% issue).
--callback open_local_docs(DbHandle::db_handle(), DocIds::[docid()]) ->
- [#doc{} | not_found].
-
-
-% This function will be called from many contexts concurrently.
-% The provided RawDoc is a #doc{} record that has its body
-% value set to the body value returned from write_doc_body/2.
-%
-% This API exists so that storage engines can store document
-% bodies externally from the #full_doc_info{} record (which
-% is the traditional approach and is recommended).
--callback read_doc_body(DbHandle::db_handle(), RawDoc::doc()) ->
- doc().
-
-
-% This function will be called from many contexts concurrently.
-% If the storage engine has a purge_info() record for any of the
-% provided UUIDs, those purge_info() records should be returned. The
-% resulting list should have the same length as the input list of
-% UUIDs.
--callback load_purge_infos(DbHandle::db_handle(), [uuid()]) ->
- [purge_info() | not_found].
-
-
-% This function is called concurrently by any client process
-% that is writing a document. It should accept a #doc{}
-% record and return a #doc{} record with a mutated body it
-% wishes to have written to disk by write_doc_body/2.
-%
-% This API exists so that storage engines can compress
-% document bodies in parallel by client processes rather
-% than forcing all compression to occur single threaded
-% in the context of the couch_db_updater process.
--callback serialize_doc(DbHandle::db_handle(), Doc::doc()) ->
- doc().
-
-
-% This function is called in the context of a couch_db_updater
-% which means its single threaded for the given DbHandle.
-%
-% The returned #doc{} record should have its Body set to a value
-% that will be stored in the #full_doc_info{} record's revision
-% tree leaves which is passed to read_doc_body/2 above when
-% a client wishes to read a document.
-%
-% The BytesWritten return value is used to determine the number
-% of active bytes in the database which can is used to make
-% a determination of when to compact this database.
--callback write_doc_body(DbHandle::db_handle(), Doc::doc()) ->
- {ok, FlushedDoc::doc(), BytesWritten::non_neg_integer()}.
-
-
-% This function is called from the context of couch_db_updater
-% and as such is guaranteed single threaded for the given
-% DbHandle.
-%
-% This is probably the most complicated function in the entire
-% API due to a few subtle behavior requirements required by
-% CouchDB's storage model.
-%
-% The Pairs argument is a list of pairs (2-tuples) of
-% #full_doc_info{} records. The first element of the pair is
-% the #full_doc_info{} that exists on disk. The second element
-% is the new version that should be written to disk. There are
-% two basic cases that should be followed:
-%
-% 1. {not_found, #full_doc_info{}} - A new document was created
-% 2. {#full_doc_info{}, #full_doc_info{}} - A document was updated
-%
-% The cases are fairly straight forward as long as proper
-% accounting for moving entries in the update sequence are accounted
-% for.
-%
-% The LocalDocs variable is applied separately. Its important to
-% note for new storage engine authors that these documents are
-% separate because they should *not* be included as part of the
-% changes index for the database.
-%
-% Traditionally an invocation of write_doc_infos should be all
-% or nothing in so much that if an error occurs (or the VM dies)
-% then the database doesn't retain any of the changes. However
-% as long as a storage engine maintains consistency this should
-% not be an issue as it has never been a guarantee and the
-% batches are non-deterministic (from the point of view of the
-% client).
--callback write_doc_infos(
- DbHandle::db_handle(),
- Pairs::doc_pairs(),
- LocalDocs::[#doc{}]) ->
- {ok, NewDbHandle::db_handle()}.
-
-
-% This function is called from the context of couch_db_updater
-% and as such is guaranteed single threaded for the given
-% DbHandle.
-%
-% Each doc_pair() is a 2-tuple of #full_doc_info{} records. The
-% first element of the pair is the #full_doc_info{} that exists
-% on disk. The second element is the new version that should be
-% written to disk. There are three basic cases that should be considered:
-%
-% 1. {#full_doc_info{}, #full_doc_info{}} - A document was partially purged
-% 2. {#full_doc_info{}, not_found} - A document was completely purged
-% 3. {not_found, not_found} - A no-op purge
-%
-% In case 1, non-tail-append engines may have to remove revisions
-% specifically rather than rely on compaction to remove them. Also
-% note that the new #full_doc_info{} will have a different update_seq
-% that will need to be reflected in the changes feed.
-%
-% In case 2 you'll notice is "purged completely" which
-% means it needs to be removed from the database including the
-% update sequence.
-%
-% In case 3 we just need to store the purge_info() to know that it
-% was processed even though it produced no changes to the database.
-%
-% The purge_info() tuples contain the purge_seq, uuid, docid and
-% revisions that were requested to be purged. This should be persisted
-% in such a way that we can efficiently load purge_info() by its UUID
-% as well as iterate over purge_info() entries in order of their PurgeSeq.
--callback purge_docs(DbHandle::db_handle(), [doc_pair()], [purge_info()]) ->
- {ok, NewDbHandle::db_handle()}.
-
-
-% This function should be called from a single threaded context and
-% should be used to copy purge infos from on database to another
-% when copying a database
--callback copy_purge_infos(DbHandle::db_handle(), [purge_info()]) ->
- {ok, NewDbHandle::db_handle()}.
-
-
-% This function is called in the context of couch_db_udpater and
-% as such is single threaded for any given DbHandle.
-%
-% This call is made periodically to ensure that the database has
-% stored all updates on stable storage. (ie, here is where you fsync).
--callback commit_data(DbHandle::db_handle()) ->
- {ok, NewDbHande::db_handle()}.
-
-
-% This function is called by multiple processes concurrently.
-%
-% This function along with open_read_stream are part of the
-% attachments API. For the time being I'm leaving these mostly
-% undocumented. There are implementations of this in both the
-% legacy btree engine as well as the alternative engine
-% implementations for the curious, however this is a part of the
-% API for which I'd like feed back.
-%
-% Currently an engine can elect to not implement these API's
-% by throwing the atom not_supported.
--callback open_write_stream(
- DbHandle::db_handle(),
- Options::write_stream_options()) ->
- {ok, pid()}.
-
-
-% See the documentation for open_write_stream
--callback open_read_stream(DbHandle::db_handle(), StreamDiskInfo::any()) ->
- {ok, {Module::atom(), ReadStreamState::any()}}.
-
-
-% See the documentation for open_write_stream
--callback is_active_stream(DbHandle::db_handle(), ReadStreamState::any()) ->
- boolean().
-
-
-% This funciton is called by many processes concurrently.
-%
-% This function is called to fold over the documents in
-% the database sorted by the raw byte collation order of
-% the document id. For each document id, the supplied user
-% function should be invoked with the first argument set
-% to the #full_doc_info{} record and the second argument
-% set to the current user supplied accumulator. The return
-% value of the user function is a 2-tuple of {Go, NewUserAcc}.
-% The NewUserAcc value should then replace the current
-% user accumulator. If Go is the atom ok, iteration over
-% documents should continue. If Go is the atom stop, then
-% iteration should halt and the return value should be
-% {ok, NewUserAcc}.
-%
-% Possible options to this function include:
-%
-% 1. start_key - Start iteration at the provided key or
-% or just after if the key doesn't exist
-% 2. end_key - Stop iteration just after the provided key
-% 3. end_key_gt - Stop iteration prior to visiting the provided
-% key
-% 4. dir - The atom fwd or rev. This is to be able to iterate
-% over documents in reverse order. The logic for comparing
-% start_key, end_key, and end_key_gt are then reversed (ie,
-% when rev, start_key should be greater than end_key if the
-% user wishes to see results)
-% 5. include_reductions - This is a hack for _all_docs since
-% it currently relies on reductions to count an offset. This
-% is a terrible hack that will need to be addressed by the
-% API in the future. If this option is present the supplied
-% user function expects three arguments, where the first
-% argument is a #full_doc_info{} record, the second argument
-% is the current list of reductions to the left of the current
-% document, and the third argument is the current user
-% accumulator. The return value from the user function is
-% unaffected. However the final return value of the function
-% should include the final total reductions as the second
-% element of a 3-tuple. Like I said, this is a hack.
-% 6. include_deleted - By default deleted documents are not
-% included in fold_docs calls. However in some special
-% cases we do want to see them (as of now, just in couch_changes
-% during the design document changes optimization)
-%
-% Historically, if a process calls this function repeatedly it
-% would see the same results returned even if there were concurrent
-% updates happening. However there doesn't seem to be any instance of
-% that actually happening so a storage engine that includes new results
-% between invocations shouldn't have any issues.
--callback fold_docs(
- DbHandle::db_handle(),
- UserFold::doc_fold_fun(),
- UserAcc::any(),
- doc_fold_options()) ->
- {ok, LastUserAcc::any()}.
-
-
-% This function may be called by many processes concurrently.
-%
-% This should behave exactly the same as fold_docs/4 except that it
-% should only return local documents and the first argument to the
-% user function is a #doc{} record, not a #full_doc_info{}.
--callback fold_local_docs(
- DbHandle::db_handle(),
- UserFold::local_doc_fold_fun(),
- UserAcc::any(),
- doc_fold_options()) ->
- {ok, LastUserAcc::any()}.
-
-
-% This function may be called by many processes concurrently.
-%
-% This function is called to fold over the documents (not local
-% documents) in order of their most recent update. Each document
-% in the database should have exactly one entry in this sequence.
-% If a document is updated during a call to this function it should
-% not be included twice as that will probably lead to Very Bad Things.
-%
-% This should behave similarly to fold_docs/4 in that the supplied
-% user function should be invoked with a #full_doc_info{} record
-% as the first argument and the current user accumulator as the
-% second argument. The same semantics for the return value from the
-% user function should be handled as in fold_docs/4.
-%
-% The StartSeq parameter indicates where the fold should start
-% *after*. As in, if a change with a value of StartSeq exists in the
-% database it should not be included in the fold.
-%
-% The only option currently supported by the API is the `dir`
-% option that should behave the same as for fold_docs.
--callback fold_changes(
- DbHandle::db_handle(),
- StartSeq::non_neg_integer(),
- UserFold::changes_fold_fun(),
- UserAcc::any(),
- changes_fold_options()) ->
- {ok, LastUserAcc::any()}.
-
-
-% This function may be called by many processes concurrently.
-%
-% This function is called to fold over purged requests in order of
-% their oldest purge (increasing purge_seq order)
-%
-% The StartPurgeSeq parameter indicates where the fold should start *after*.
--callback fold_purge_infos(
- DbHandle::db_handle(),
- StartPurgeSeq::purge_seq(),
- UserFold::purge_fold_fun(),
- UserAcc::any(),
- purge_fold_options()) ->
- {ok, LastUserAcc::any()}.
-
-
-% This function may be called by many processes concurrently.
-%
-% This function is called to count the number of documents changed
-% since the given UpdateSeq (ie, not including the possible change
-% at exactly UpdateSeq). It is currently only used internally to
-% provide a status update in a replication's _active_tasks entry
-% to indicate how many documents are left to be processed.
-%
-% This is a fairly difficult thing to support in engine's that don't
-% behave exactly like a tree with efficient support for counting rows
-% between keys. As such returning 0 or even just the difference between
-% the current update sequence is possibly the best some storage engines
-% can provide. This may lead to some confusion when interpreting the
-% _active_tasks entry if the storage engine isn't accounted for by the
-% client.
--callback count_changes_since(
- DbHandle::db_handle(),
- UpdateSeq::non_neg_integer()) ->
- TotalChanges::non_neg_integer().
-
-
-% This function is called in the context of couch_db_updater and as
-% such is guaranteed to be single threaded for the given DbHandle.
-%
-% If a storage engine requires compaction this is a trigger to start
-% it off. However a storage engine can do whatever it wants here. As
-% this is fairly engine specific there's not a lot guidance that is
-% generally applicable.
-%
-% When compaction is finished the compactor should use
-% gen_server:cast/2 to send a {compact_done, CompactEngine, CompactInfo}
-% message to the Parent pid provided. Currently CompactEngine
-% must be the same engine that started the compaction and CompactInfo
-% is an arbitrary term that's passed to finish_compaction/4.
--callback start_compaction(
- DbHandle::db_handle(),
- DbName::binary(),
- Options::db_open_options(),
- Parent::pid()) ->
- {ok, NewDbHandle::db_handle(), CompactorPid::pid()}.
-
-
-% This function is called in the context of couch_db_udpater and as
-% such is guarnateed to be single threaded for the given DbHandle.
-%
-% Same as for start_compaction, this will be extremely specific to
-% any given storage engine.
-%
-% The split in the API here is so that if the storage engine needs
-% to update the DbHandle state of the couch_db_updater it can as
-% finish_compaction/4 is called in the context of the couch_db_updater.
--callback finish_compaction(
- OldDbHandle::db_handle(),
- DbName::binary(),
- Options::db_open_options(),
- CompactInfo::any()) ->
- {ok, CompactedDbHandle::db_handle(), CompactorPid::pid() | undefined}.
-
-
--export([
- exists/2,
- delete/4,
- delete_compaction_files/4,
-
- init/3,
- terminate/2,
- handle_db_updater_call/3,
- handle_db_updater_info/2,
-
- incref/1,
- decref/1,
- monitored_by/1,
-
- last_activity/1,
-
- get_engine/1,
- get_compacted_seq/1,
- get_del_doc_count/1,
- get_disk_version/1,
- get_doc_count/1,
- get_epochs/1,
- get_purge_seq/1,
- get_oldest_purge_seq/1,
- get_purge_infos_limit/1,
- get_revs_limit/1,
- get_security/1,
- get_props/1,
- get_size_info/1,
- get_partition_info/2,
- get_update_seq/1,
- get_uuid/1,
-
- set_revs_limit/2,
- set_security/2,
- set_purge_infos_limit/2,
- set_props/2,
-
- set_update_seq/2,
-
- open_docs/2,
- open_local_docs/2,
- read_doc_body/2,
- load_purge_infos/2,
-
- serialize_doc/2,
- write_doc_body/2,
- write_doc_infos/3,
- purge_docs/3,
- copy_purge_infos/2,
- commit_data/1,
-
- open_write_stream/2,
- open_read_stream/2,
- is_active_stream/2,
-
- fold_docs/4,
- fold_local_docs/4,
- fold_changes/5,
- fold_purge_infos/5,
- count_changes_since/2,
-
- start_compaction/1,
- finish_compaction/2,
- trigger_on_compact/1
-]).
-
-
-exists(Engine, DbPath) ->
- Engine:exists(DbPath).
-
-
-delete(Engine, RootDir, DbPath, DelOpts) when is_list(DelOpts) ->
- Engine:delete(RootDir, DbPath, DelOpts).
-
-
-delete_compaction_files(Engine, RootDir, DbPath, DelOpts)
- when is_list(DelOpts) ->
- Engine:delete_compaction_files(RootDir, DbPath, DelOpts).
-
-
-init(Engine, DbPath, Options) ->
- case Engine:init(DbPath, Options) of
- {ok, EngineState} ->
- {ok, {Engine, EngineState}};
- Error ->
- throw(Error)
- end.
-
-
-terminate(Reason, #db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:terminate(Reason, EngineState).
-
-
-handle_db_updater_call(Msg, _From, #db{} = Db) ->
- #db{
- engine = {Engine, EngineState}
- } = Db,
- case Engine:handle_db_updater_call(Msg, EngineState) of
- {reply, Resp, NewState} ->
- {reply, Resp, Db#db{engine = {Engine, NewState}}};
- {stop, Reason, Resp, NewState} ->
- {stop, Reason, Resp, Db#db{engine = {Engine, NewState}}}
- end.
-
-
-handle_db_updater_info(Msg, #db{} = Db) ->
- #db{
- name = Name,
- engine = {Engine, EngineState}
- } = Db,
- case Engine:handle_db_updater_info(Msg, EngineState) of
- {noreply, NewState} ->
- {noreply, Db#db{engine = {Engine, NewState}}};
- {noreply, NewState, Timeout} ->
- {noreply, Db#db{engine = {Engine, NewState}}, Timeout};
- {stop, Reason, NewState} ->
- couch_log:error("DB ~s shutting down: ~p", [Name, Msg]),
- {stop, Reason, Db#db{engine = {Engine, NewState}}}
- end.
-
-
-incref(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewState} = Engine:incref(EngineState),
- {ok, Db#db{engine = {Engine, NewState}}}.
-
-
-decref(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:decref(EngineState).
-
-
-monitored_by(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:monitored_by(EngineState).
-
-
-last_activity(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:last_activity(EngineState).
-
-
-get_engine(#db{} = Db) ->
- #db{engine = {Engine, _}} = Db,
- Engine.
-
-
-get_compacted_seq(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_compacted_seq(EngineState).
-
-
-get_del_doc_count(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_del_doc_count(EngineState).
-
-
-get_disk_version(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_disk_version(EngineState).
-
-
-get_doc_count(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_doc_count(EngineState).
-
-
-get_epochs(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_epochs(EngineState).
-
-
-get_purge_seq(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_purge_seq(EngineState).
-
-
-get_oldest_purge_seq(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_oldest_purge_seq(EngineState).
-
-
-get_purge_infos_limit(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_purge_infos_limit(EngineState).
-
-
-get_revs_limit(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_revs_limit(EngineState).
-
-
-get_security(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_security(EngineState).
-
-
-get_props(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_props(EngineState).
-
-
-get_size_info(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_size_info(EngineState).
-
-
-get_partition_info(#db{} = Db, Partition) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_partition_info(EngineState, Partition).
-
-
-get_update_seq(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_update_seq(EngineState).
-
-get_uuid(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:get_uuid(EngineState).
-
-
-set_revs_limit(#db{} = Db, RevsLimit) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:set_revs_limit(EngineState, RevsLimit),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-set_purge_infos_limit(#db{} = Db, PurgedDocsLimit) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:set_purge_infos_limit(EngineState, PurgedDocsLimit),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-set_security(#db{} = Db, SecProps) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:set_security(EngineState, SecProps),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-set_props(#db{} = Db, Props) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:set_props(EngineState, Props),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-set_update_seq(#db{} = Db, UpdateSeq) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:set_update_seq(EngineState, UpdateSeq),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-open_docs(#db{} = Db, DocIds) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:open_docs(EngineState, DocIds).
-
-
-open_local_docs(#db{} = Db, DocIds) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:open_local_docs(EngineState, DocIds).
-
-
-read_doc_body(#db{} = Db, RawDoc) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:read_doc_body(EngineState, RawDoc).
-
-
-load_purge_infos(#db{} = Db, UUIDs) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:load_purge_infos(EngineState, UUIDs).
-
-
-serialize_doc(#db{} = Db, #doc{} = Doc) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:serialize_doc(EngineState, Doc).
-
-
-write_doc_body(#db{} = Db, #doc{} = Doc) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:write_doc_body(EngineState, Doc).
-
-
-write_doc_infos(#db{} = Db, DocUpdates, LocalDocs) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:write_doc_infos(EngineState, DocUpdates, LocalDocs),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-purge_docs(#db{} = Db, DocUpdates, Purges) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:purge_docs(
- EngineState, DocUpdates, Purges),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-copy_purge_infos(#db{} = Db, Purges) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:copy_purge_infos(
- EngineState, Purges),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-commit_data(#db{} = Db) ->
- #db{engine = {Engine, EngineState}} = Db,
- {ok, NewSt} = Engine:commit_data(EngineState),
- {ok, Db#db{engine = {Engine, NewSt}}}.
-
-
-open_write_stream(#db{} = Db, Options) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:open_write_stream(EngineState, Options).
-
-
-open_read_stream(#db{} = Db, StreamDiskInfo) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:open_read_stream(EngineState, StreamDiskInfo).
-
-
-is_active_stream(#db{} = Db, ReadStreamState) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:is_active_stream(EngineState, ReadStreamState).
-
-
-fold_docs(#db{} = Db, UserFun, UserAcc, Options) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:fold_docs(EngineState, UserFun, UserAcc, Options).
-
-
-fold_local_docs(#db{} = Db, UserFun, UserAcc, Options) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:fold_local_docs(EngineState, UserFun, UserAcc, Options).
-
-
-fold_changes(#db{} = Db, StartSeq, UserFun, UserAcc, Options) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:fold_changes(EngineState, StartSeq, UserFun, UserAcc, Options).
-
-
-fold_purge_infos(#db{} = Db, StartPurgeSeq, UserFun, UserAcc, Options) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:fold_purge_infos(
- EngineState, StartPurgeSeq, UserFun, UserAcc, Options).
-
-
-count_changes_since(#db{} = Db, StartSeq) ->
- #db{engine = {Engine, EngineState}} = Db,
- Engine:count_changes_since(EngineState, StartSeq).
-
-
-start_compaction(#db{} = Db) ->
- #db{
- engine = {Engine, EngineState},
- name = DbName,
- options = Options
- } = Db,
- {ok, NewEngineState, Pid} = Engine:start_compaction(
- EngineState, DbName, Options, self()),
- {ok, Db#db{
- engine = {Engine, NewEngineState},
- compactor_pid = Pid
- }}.
-
-
-finish_compaction(Db, CompactInfo) ->
- #db{
- engine = {Engine, St},
- name = DbName,
- options = Options
- } = Db,
- NewDb = case Engine:finish_compaction(St, DbName, Options, CompactInfo) of
- {ok, NewState, undefined} ->
- couch_event:notify(DbName, compacted),
- Db#db{
- engine = {Engine, NewState},
- compactor_pid = nil
- };
- {ok, NewState, CompactorPid} when is_pid(CompactorPid) ->
- Db#db{
- engine = {Engine, NewState},
- compactor_pid = CompactorPid
- }
- end,
- ok = gen_server:call(couch_server, {db_updated, NewDb}, infinity),
- {ok, NewDb}.
-
-
-trigger_on_compact(DbName) ->
- {ok, DDocs} = get_ddocs(DbName),
- couch_db_plugin:on_compact(DbName, DDocs).
-
-
-get_ddocs(<<"shards/", _/binary>> = DbName) ->
- {_, Ref} = spawn_monitor(fun() ->
- exit(fabric:design_docs(mem3:dbname(DbName)))
- end),
- receive
- {'DOWN', Ref, _, _, {ok, JsonDDocs}} ->
- {ok, lists:map(fun(JsonDDoc) ->
- couch_doc:from_json_obj(JsonDDoc)
- end, JsonDDocs)};
- {'DOWN', Ref, _, _, Else} ->
- Else
- end;
-get_ddocs(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- FoldFun = fun(FDI, Acc) ->
- {ok, Doc} = couch_db:open_doc_int(Db, FDI, []),
- {ok, [Doc | Acc]}
- end,
- {ok, Docs} = couch_db:fold_design_docs(Db, FoldFun, [], []),
- {ok, lists:reverse(Docs)}
- end).
diff --git a/src/couch/src/couch_db_epi.erl b/src/couch/src/couch_db_epi.erl
index 21879f683..bfd435ac8 100644
--- a/src/couch/src/couch_db_epi.erl
+++ b/src/couch/src/couch_db_epi.erl
@@ -35,7 +35,6 @@ providers() ->
services() ->
[
- {couch_db, couch_db_plugin},
{feature_flags, couch_flags}
].
diff --git a/src/couch/src/couch_db_header.erl b/src/couch/src/couch_db_header.erl
deleted file mode 100644
index 355364f9b..000000000
--- a/src/couch/src/couch_db_header.erl
+++ /dev/null
@@ -1,405 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_header).
-
-
--export([
- new/0,
- from/1,
- is_header/1,
- upgrade/1,
- set/2
-]).
-
--export([
- disk_version/1,
- update_seq/1,
- id_tree_state/1,
- seq_tree_state/1,
- latest/1,
- local_tree_state/1,
- purge_seq/1,
- purged_docs/1,
- security_ptr/1,
- revs_limit/1,
- uuid/1,
- epochs/1,
- compacted_seq/1
-]).
-
-
-% This should be updated anytime a header change happens that requires more
-% than filling in new defaults.
-%
-% As long the changes are limited to new header fields (with inline
-% defaults) added to the end of the record, then there is no need to increment
-% the disk revision number.
-%
-% if the disk revision is incremented, then new upgrade logic will need to be
-% added to couch_db_updater:init_db.
-
--define(LATEST_DISK_VERSION, 6).
-
--record(db_header, {
- disk_version = ?LATEST_DISK_VERSION,
- update_seq = 0,
- unused = 0,
- id_tree_state = nil,
- seq_tree_state = nil,
- local_tree_state = nil,
- purge_seq = 0,
- purged_docs = nil,
- security_ptr = nil,
- revs_limit = 1000,
- uuid,
- epochs,
- compacted_seq
-}).
-
-
-new() ->
- #db_header{
- uuid = couch_uuids:random(),
- epochs = [{node(), 0}]
- }.
-
-
-from(Header0) ->
- Header = upgrade(Header0),
- #db_header{
- uuid = Header#db_header.uuid,
- epochs = Header#db_header.epochs,
- compacted_seq = Header#db_header.compacted_seq
- }.
-
-
-is_header(Header) ->
- try
- upgrade(Header),
- true
- catch _:_ ->
- false
- end.
-
-
-upgrade(Header) ->
- Funs = [
- fun upgrade_tuple/1,
- fun upgrade_disk_version/1,
- fun upgrade_uuid/1,
- fun upgrade_epochs/1,
- fun upgrade_compacted_seq/1
- ],
- lists:foldl(fun(F, HdrAcc) ->
- F(HdrAcc)
- end, Header, Funs).
-
-
-set(Header0, Fields) ->
- % A subtlety here is that if a database was open during
- % the release upgrade that updates to uuids and epochs then
- % this dynamic upgrade also assigns a uuid and epoch.
- Header = upgrade(Header0),
- lists:foldl(fun({Field, Value}, HdrAcc) ->
- set_field(HdrAcc, Field, Value)
- end, Header, Fields).
-
-
-disk_version(Header) ->
- get_field(Header, disk_version).
-
-
-update_seq(Header) ->
- get_field(Header, update_seq).
-
-
-id_tree_state(Header) ->
- get_field(Header, id_tree_state).
-
-
-seq_tree_state(Header) ->
- get_field(Header, seq_tree_state).
-
-
-local_tree_state(Header) ->
- get_field(Header, local_tree_state).
-
-
-purge_seq(Header) ->
- get_field(Header, purge_seq).
-
-
-purged_docs(Header) ->
- get_field(Header, purged_docs).
-
-
-security_ptr(Header) ->
- get_field(Header, security_ptr).
-
-
-revs_limit(Header) ->
- get_field(Header, revs_limit).
-
-
-uuid(Header) ->
- get_field(Header, uuid).
-
-
-epochs(Header) ->
- get_field(Header, epochs).
-
-
-compacted_seq(Header) ->
- get_field(Header, compacted_seq).
-
-
-get_field(Header, Field) ->
- Idx = index(Field),
- case Idx > tuple_size(Header) of
- true -> undefined;
- false -> element(index(Field), Header)
- end.
-
-
-set_field(Header, Field, Value) ->
- setelement(index(Field), Header, Value).
-
-
-index(Field) ->
- couch_util:get_value(Field, indexes()).
-
-
-indexes() ->
- Fields = record_info(fields, db_header),
- Indexes = lists:seq(2, record_info(size, db_header)),
- lists:zip(Fields, Indexes).
-
-
-upgrade_tuple(Old) when is_record(Old, db_header) ->
- Old;
-upgrade_tuple(Old) when is_tuple(Old) ->
- NewSize = record_info(size, db_header),
- if tuple_size(Old) < NewSize -> ok; true ->
- erlang:error({invalid_header_size, Old})
- end,
- {_, New} = lists:foldl(fun(Val, {Idx, Hdr}) ->
- {Idx+1, setelement(Idx, Hdr, Val)}
- end, {1, #db_header{}}, tuple_to_list(Old)),
- if is_record(New, db_header) -> ok; true ->
- erlang:error({invalid_header_extension, {Old, New}})
- end,
- New.
-
--define(OLD_DISK_VERSION_ERROR,
- "Database files from versions smaller than 0.10.0 are no longer supported").
-
-upgrade_disk_version(#db_header{}=Header) ->
- case element(2, Header) of
- 1 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- 2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- 3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- 4 -> Header#db_header{security_ptr = nil}; % [0.10 - 0.11)
- 5 -> Header; % pre 1.2
- ?LATEST_DISK_VERSION -> Header;
- _ ->
- Reason = "Incorrect disk header version",
- throw({database_disk_version_error, Reason})
- end.
-
-
-upgrade_uuid(#db_header{}=Header) ->
- case Header#db_header.uuid of
- undefined ->
- % Upgrading this old db file to a newer
- % on disk format that includes a UUID.
- Header#db_header{uuid=couch_uuids:random()};
- _ ->
- Header
- end.
-
-
-upgrade_epochs(#db_header{}=Header) ->
- NewEpochs = case Header#db_header.epochs of
- undefined ->
- % This node is taking over ownership of shard with
- % and old version of couch file. Before epochs there
- % was always an implicit assumption that a file was
- % owned since eternity by the node it was on. This
- % just codifies that assumption.
- [{node(), 0}];
- [{Node, _} | _] = Epochs0 when Node == node() ->
- % Current node is the current owner of this db
- Epochs0;
- Epochs1 ->
- % This node is taking over ownership of this db
- % and marking the update sequence where it happened.
- [{node(), Header#db_header.update_seq} | Epochs1]
- end,
- % Its possible for a node to open a db and claim
- % ownership but never make a write to the db. This
- % removes nodes that claimed ownership but never
- % changed the database.
- DedupedEpochs = remove_dup_epochs(NewEpochs),
- Header#db_header{epochs=DedupedEpochs}.
-
-
-% This is slightly relying on the udpate_seq's being sorted
-% in epochs due to how we only ever push things onto the
-% front. Although if we ever had a case where the update_seq
-% is not monotonically increasing I don't know that we'd
-% want to remove dupes (by calling a sort on the input to this
-% function). So for now we don't sort but are relying on the
-% idea that epochs is always sorted.
-remove_dup_epochs([_]=Epochs) ->
- Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S}]) ->
- % Seqs match, keep the most recent owner
- [{N1, S}];
-remove_dup_epochs([_, _]=Epochs) ->
- % Seqs don't match.
- Epochs;
-remove_dup_epochs([{N1, S}, {_N2, S} | Rest]) ->
- % Seqs match, keep the most recent owner
- remove_dup_epochs([{N1, S} | Rest]);
-remove_dup_epochs([{N1, S1}, {N2, S2} | Rest]) ->
- % Seqs don't match, recurse to check others
- [{N1, S1} | remove_dup_epochs([{N2, S2} | Rest])].
-
-
-upgrade_compacted_seq(#db_header{}=Header) ->
- case Header#db_header.compacted_seq of
- undefined ->
- Header#db_header{compacted_seq=0};
- _ ->
- Header
- end.
-
-latest(?LATEST_DISK_VERSION) ->
- true;
-latest(N) when is_integer(N), N < ?LATEST_DISK_VERSION ->
- false;
-latest(_Else) ->
- undefined.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-mk_header(Vsn) ->
- {
- db_header, % record name
- Vsn, % disk version
- 100, % update_seq
- 0, % unused
- foo, % id_tree_state
- bar, % seq_tree_state
- bam, % local_tree_state
- 1, % purge_seq
- baz, % purged_docs
- bang, % security_ptr
- 999 % revs_limit
- }.
-
-
-upgrade_v3_test() ->
- Vsn3Header = mk_header(3),
- NewHeader = upgrade_tuple(Vsn3Header),
-
- % Tuple upgrades don't change
- ?assert(is_record(NewHeader, db_header)),
- ?assertEqual(3, disk_version(NewHeader)),
- ?assertEqual(100, update_seq(NewHeader)),
- ?assertEqual(foo, id_tree_state(NewHeader)),
- ?assertEqual(bar, seq_tree_state(NewHeader)),
- ?assertEqual(bam, local_tree_state(NewHeader)),
- ?assertEqual(1, purge_seq(NewHeader)),
- ?assertEqual(baz, purged_docs(NewHeader)),
- ?assertEqual(bang, security_ptr(NewHeader)),
- ?assertEqual(999, revs_limit(NewHeader)),
- ?assertEqual(undefined, uuid(NewHeader)),
- ?assertEqual(undefined, epochs(NewHeader)),
-
- ?assertThrow({database_disk_version_error, _},
- upgrade_disk_version(NewHeader)).
-
-
-upgrade_v5_test() ->
- Vsn5Header = mk_header(5),
- NewHeader = upgrade_disk_version(upgrade_tuple(Vsn5Header)),
-
- ?assert(is_record(NewHeader, db_header)),
- ?assertEqual(5, disk_version(NewHeader)),
-
- % Security ptr isn't changed for v5 headers
- ?assertEqual(bang, security_ptr(NewHeader)).
-
-
-upgrade_uuid_test() ->
- Vsn5Header = mk_header(5),
-
- % Upgraded headers get a new UUID
- NewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(Vsn5Header))),
- ?assertMatch(<<_:32/binary>>, uuid(NewHeader)),
-
- % Headers with a UUID don't have their UUID changed
- NewNewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(NewHeader))),
- ?assertEqual(uuid(NewHeader), uuid(NewNewHeader)),
-
- % Derived empty headers maintain the same UUID
- ResetHeader = from(NewNewHeader),
- ?assertEqual(uuid(NewHeader), uuid(ResetHeader)).
-
-
-upgrade_epochs_test() ->
- Vsn5Header = mk_header(5),
-
- % Upgraded headers get a default epochs set
- NewHeader = upgrade(Vsn5Header),
- ?assertEqual([{node(), 0}], epochs(NewHeader)),
-
- % Fake an old entry in epochs
- FakeFields = [
- {update_seq, 20},
- {epochs, [{'someothernode@someotherhost', 0}]}
- ],
- NotOwnedHeader = set(NewHeader, FakeFields),
-
- OwnedEpochs = [
- {node(), 20},
- {'someothernode@someotherhost', 0}
- ],
-
- % Upgrading a header not owned by the local node updates
- % the epochs appropriately.
- NowOwnedHeader = upgrade(NotOwnedHeader),
- ?assertEqual(OwnedEpochs, epochs(NowOwnedHeader)),
-
- % Headers with epochs stay the same after upgrades
- NewNewHeader = upgrade(NowOwnedHeader),
- ?assertEqual(OwnedEpochs, epochs(NewNewHeader)),
-
- % Getting a reset header maintains the epoch data
- ResetHeader = from(NewNewHeader),
- ?assertEqual(OwnedEpochs, epochs(ResetHeader)).
-
-
-get_uuid_from_old_header_test() ->
- Vsn5Header = mk_header(5),
- ?assertEqual(undefined, uuid(Vsn5Header)).
-
-
-get_epochs_from_old_header_test() ->
- Vsn5Header = mk_header(5),
- ?assertEqual(undefined, epochs(Vsn5Header)).
-
-
--endif.
diff --git a/src/couch/src/couch_db_int.hrl b/src/couch/src/couch_db_int.hrl
deleted file mode 100644
index 7da0ce5df..000000000
--- a/src/couch/src/couch_db_int.hrl
+++ /dev/null
@@ -1,76 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
--record(db, {
- vsn = 1,
- name,
- filepath,
-
- engine = {couch_bt_engine, undefined},
-
- main_pid = nil,
- compactor_pid = nil,
-
- committed_update_seq,
-
- instance_start_time, % number of microsecs since jan 1 1970 as a binary string
-
- user_ctx = #user_ctx{},
- security = [],
- validate_doc_funs = undefined,
-
- before_doc_update = nil, % nil | fun(Doc, Db) -> NewDoc
- after_doc_read = nil, % nil | fun(Doc, Db) -> NewDoc
-
- % feature removed in 3.x, but field kept to avoid changing db record size
- % and breaking rolling cluster upgrade
- waiting_delayed_commit_deprecated,
-
- options = [],
- compression
-}).
-
-
--define(OLD_DB_REC, {
- db,
- _, % MainPid
- _, % CompactorPid
- _, % InstanceStartTime
- _, % Fd
- _, % FdMonitor
- _, % Header
- _, % CommittedUpdateSeq
- _, % IdTree
- _, % SeqTree
- _, % LocalTree
- _, % UpdateSeq
- _, % Name
- _, % FilePath
- _, % ValidateDocFuns
- _, % Security
- _, % SecurityPtr
- _, % UserCtx
- _, % WaitingDelayedCommit
- _, % RevsLimit
- _, % FsyncOptions
- _, % Options
- _, % Compression
- _, % BeforeDocUpdate
- _ % AfterDocRead
-}).
-
-
--define(OLD_DB_NAME(Db), element(2, Db)).
--define(OLD_DB_MAIN_PID(Db), element(13, Db)).
--define(OLD_DB_USER_CTX(Db), element(18, Db)).
--define(OLD_DB_SECURITY(Db), element(16, Db)).
diff --git a/src/couch/src/couch_db_plugin.erl b/src/couch/src/couch_db_plugin.erl
deleted file mode 100644
index c3684c6e3..000000000
--- a/src/couch/src/couch_db_plugin.erl
+++ /dev/null
@@ -1,96 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_plugin).
-
--export([
- validate_dbname/3,
- before_doc_update/3,
- after_doc_read/2,
- validate_docid/1,
- check_is_admin/1,
- is_valid_purge_client/2,
- on_compact/2,
- on_delete/2
-]).
-
--define(SERVICE_ID, couch_db).
-
--include_lib("couch/include/couch_db.hrl").
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
-validate_dbname(DbName, Normalized, Default) ->
- maybe_handle(validate_dbname, [DbName, Normalized], Default).
-
-before_doc_update(Db, Doc0, UpdateType) ->
- Fun = couch_db:get_before_doc_update_fun(Db),
- case with_pipe(before_doc_update, [Doc0, Db, UpdateType]) of
- [Doc1, _Db, UpdateType1] when is_function(Fun) ->
- Fun(Doc1, Db, UpdateType1);
- [Doc1, _Db, _UpdateType] ->
- Doc1
- end.
-
-after_doc_read(Db, Doc0) ->
- Fun = couch_db:get_after_doc_read_fun(Db),
- case with_pipe(after_doc_read, [Doc0, Db]) of
- [Doc1, _Db] when is_function(Fun) -> Fun(Doc1, Db);
- [Doc1, _Db] -> Doc1
- end.
-
-validate_docid(Id) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- %% callbacks return true only if it specifically allow the given Id
- couch_epi:any(Handle, ?SERVICE_ID, validate_docid, [Id], []).
-
-check_is_admin(Db) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- %% callbacks return true only if it specifically allow the given Id
- couch_epi:any(Handle, ?SERVICE_ID, check_is_admin, [Db], []).
-
-is_valid_purge_client(DbName, Props) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- %% callbacks return true only if it specifically allow the given Id
- couch_epi:any(Handle, ?SERVICE_ID, is_valid_purge_client, [DbName, Props], []).
-
-on_compact(DbName, DDocs) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- couch_epi:apply(Handle, ?SERVICE_ID, on_compact, [DbName, DDocs], []).
-
-on_delete(DbName, Options) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- couch_epi:apply(Handle, ?SERVICE_ID, on_delete, [DbName, Options], []).
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-with_pipe(Func, Args) ->
- do_apply(Func, Args, [pipe]).
-
-do_apply(Func, Args, Opts) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- couch_epi:apply(Handle, ?SERVICE_ID, Func, Args, Opts).
-
-maybe_handle(Func, Args, Default) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- case couch_epi:decide(Handle, ?SERVICE_ID, Func, Args, []) of
- no_decision when is_function(Default) ->
- apply(Default, Args);
- no_decision ->
- Default;
- {decided, Result} ->
- Result
- end.
diff --git a/src/couch/src/couch_db_split.erl b/src/couch/src/couch_db_split.erl
deleted file mode 100644
index 3a1f98d3e..000000000
--- a/src/couch/src/couch_db_split.erl
+++ /dev/null
@@ -1,503 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_split).
-
-
--export([
- split/3,
- copy_local_docs/3,
- cleanup_target/2
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
-
-
--define(DEFAULT_BUFFER_SIZE, 16777216).
-
-
--record(state, {
- source_db,
- source_uuid,
- targets,
- pickfun,
- max_buffer_size = ?DEFAULT_BUFFER_SIZE,
- hashfun
-}).
-
--record(target, {
- db,
- uuid,
- buffer = [],
- buffer_size = 0
-}).
-
--record(racc, {
- id,
- source_db,
- target_db,
- active = 0,
- external = 0,
- atts = []
-}).
-
-
-% Public API
-
-split(Source, #{} = Targets, PickFun) when
- map_size(Targets) >= 2, is_function(PickFun, 3) ->
- case couch_db:open_int(Source, [?ADMIN_CTX]) of
- {ok, SourceDb} ->
- Engine = get_engine(SourceDb),
- Partitioned = couch_db:is_partitioned(SourceDb),
- HashFun = mem3_hash:get_hash_fun(couch_db:name(SourceDb)),
- try
- split(SourceDb, Partitioned, Engine, Targets, PickFun, HashFun)
- catch
- throw:{target_create_error, DbName, Error, TargetDbs} ->
- cleanup_targets(TargetDbs, Engine),
- {error, {target_create_error, DbName, Error}}
- after
- couch_db:close(SourceDb)
- end;
- {not_found, _} ->
- {error, missing_source}
- end.
-
-
-copy_local_docs(Source, #{} = Targets0, PickFun) when
- is_binary(Source), is_function(PickFun, 3) ->
- case couch_db:open_int(Source, [?ADMIN_CTX]) of
- {ok, SourceDb} ->
- try
- Targets = maps:map(fun(_, DbName) ->
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- #target{db = Db, uuid = couch_db:get_uuid(Db)}
- end, Targets0),
- SourceName = couch_db:name(SourceDb),
- try
- State = #state{
- source_db = SourceDb,
- source_uuid = couch_db:get_uuid(SourceDb),
- targets = Targets,
- pickfun = PickFun,
- hashfun = mem3_hash:get_hash_fun(SourceName)
- },
- copy_local_docs(State),
- ok
- after
- maps:map(fun(_, #target{db = Db} = T) ->
- couch_db:close(Db),
- T#target{db = undefined}
- end, Targets)
- end
- after
- couch_db:close(SourceDb)
- end;
- {not_found, _} ->
- {error, missing_source}
- end.
-
-
-cleanup_target(Source, Target) when is_binary(Source), is_binary(Target) ->
- case couch_db:open_int(Source, [?ADMIN_CTX]) of
- {ok, SourceDb} ->
- try
- delete_target(Target, get_engine(SourceDb))
- after
- couch_db:close(SourceDb)
- end;
- {not_found, _} ->
- {error, missing_source}
- end.
-
-
-% Private Functions
-
-split(SourceDb, Partitioned, Engine, Targets0, PickFun, {M, F, A} = HashFun) ->
- Targets = maps:fold(fun(Key, DbName, Map) ->
- case couch_db:validate_dbname(DbName) of
- ok ->
- ok;
- {error, E} ->
- throw({target_create_error, DbName, E, Map})
- end,
- case couch_server:lock(DbName, <<"shard splitting">>) of
- ok ->
- ok;
- {error, Err} ->
- throw({target_create_error, DbName, Err, Map})
- end,
- {ok, Filepath} = couch_server:get_engine_path(DbName, Engine),
- Opts = [create, ?ADMIN_CTX] ++ case Partitioned of
- true -> [{props, [{partitioned, true}, {hash, [M, F, A]}]}];
- false -> []
- end,
- case couch_db:start_link(Engine, DbName, Filepath, Opts) of
- {ok, Db} ->
- Map#{Key => #target{db = Db}};
- {error, Error} ->
- throw({target_create_error, DbName, Error, Map})
- end
- end, #{}, Targets0),
- Seq = couch_db:get_update_seq(SourceDb),
- State1 = #state{
- source_db = SourceDb,
- targets = Targets,
- pickfun = PickFun,
- hashfun = HashFun,
- max_buffer_size = get_max_buffer_size()
- },
- State2 = copy_docs(State1),
- State3 = copy_checkpoints(State2),
- State4 = copy_meta(State3),
- State5 = copy_purge_info(State4),
- State6 = set_targets_update_seq(State5),
- stop_targets(State6#state.targets),
- {ok, Seq}.
-
-
-cleanup_targets(#{} = Targets, Engine) ->
- maps:map(fun(_, #target{db = Db} = T) ->
- ok = stop_target_db(Db),
- DbName = couch_db:name(Db),
- delete_target(DbName, Engine),
- couch_server:unlock(DbName),
- T
- end, Targets).
-
-
-stop_targets(#{} = Targets) ->
- maps:map(fun(_, #target{db = Db} = T) ->
- {ok, Db1} = couch_db_engine:commit_data(Db),
- ok = stop_target_db(Db1),
- T
- end, Targets).
-
-
-stop_target_db(Db) ->
- couch_db:close(Db),
- Pid = couch_db:get_pid(Db),
- catch unlink(Pid),
- catch exit(Pid, kill),
- couch_server:unlock(couch_db:name(Db)),
- ok.
-
-
-delete_target(DbName, Engine) ->
- RootDir = config:get("couchdb", "database_dir", "."),
- {ok, Filepath} = couch_server:get_engine_path(DbName, Engine),
- DelOpt = [{context, compaction}, sync],
- couch_db_engine:delete(Engine, RootDir, Filepath, DelOpt).
-
-
-pick_target(DocId, #state{} = State, #{} = Targets) ->
- #state{pickfun = PickFun, hashfun = HashFun} = State,
- Key = PickFun(DocId, maps:keys(Targets), HashFun),
- {Key, maps:get(Key, Targets)}.
-
-
-set_targets_update_seq(#state{targets = Targets} = State) ->
- Seq = couch_db:get_update_seq(State#state.source_db),
- Targets1 = maps:map(fun(_, #target{db = Db} = Target) ->
- {ok, Db1} = couch_db_engine:set_update_seq(Db, Seq),
- Target#target{db = Db1}
- end, Targets),
- State#state{targets = Targets1}.
-
-
-copy_checkpoints(#state{} = State) ->
- #state{source_db = Db, source_uuid = SrcUUID, targets = Targets} = State,
- FoldFun = fun(#doc{id = Id} = Doc, Acc) ->
- UpdatedAcc = case Id of
- <<?LOCAL_DOC_PREFIX, "shard-sync-", _/binary>> ->
- % Transform mem3 internal replicator checkpoints to avoid
- % rewinding the changes feed when it sees the new shards
- maps:map(fun(_, #target{uuid = TgtUUID, buffer = Docs} = T) ->
- Doc1 = update_checkpoint_doc(SrcUUID, TgtUUID, Doc),
- T#target{buffer = [Doc1 | Docs]}
- end, Acc);
- <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
- % Copy purge checkpoints to all shards
- maps:map(fun(_, #target{buffer = Docs} = T) ->
- T#target{buffer = [Doc | Docs]}
- end, Acc);
- <<?LOCAL_DOC_PREFIX, _/binary>> ->
- % Skip copying these that will be done during
- % local docs top off right before the shards are switched
- Acc
- end,
- {ok, UpdatedAcc}
- end,
- {ok, Targets1} = couch_db_engine:fold_local_docs(Db, FoldFun, Targets, []),
- Targets2 = maps:map(fun(_, #target{db = TDb, buffer = Docs} = T) ->
- case Docs of
- [] ->
- T;
- [_ | _] ->
- Docs1 = lists:reverse(Docs),
- {ok, TDb1} = couch_db_engine:write_doc_infos(TDb, [], Docs1),
- {ok, TDb2} = couch_db_engine:commit_data(TDb1),
- T#target{db = TDb2, buffer = []}
- end
- end, Targets1),
- State#state{targets = Targets2}.
-
-
-update_checkpoint_doc(Old, New, #doc{body = {Props}} = Doc) ->
- NewProps = case couch_util:get_value(<<"target_uuid">>, Props) of
- Old ->
- replace_kv(Props, {<<"target_uuid">>, Old, New});
- Other when is_binary(Other) ->
- replace_kv(Props, {<<"source_uuid">>, Old, New})
- end,
- NewId = update_checkpoint_id(Doc#doc.id, Old, New),
- Doc#doc{id = NewId, body = {NewProps}}.
-
-
-update_checkpoint_id(Id, Old, New) ->
- OldHash = mem3_rep:local_id_hash(Old),
- NewHash = mem3_rep:local_id_hash(New),
- binary:replace(Id, OldHash, NewHash).
-
-
-replace_kv({[]}, _) ->
- {[]};
-replace_kv({KVs}, Replacement) ->
- {[replace_kv(KV, Replacement) || KV <- KVs]};
-replace_kv([], _) ->
- [];
-replace_kv(List, Replacement) when is_list(List) ->
- [replace_kv(V, Replacement) || V <- List];
-replace_kv({K, V}, {K, V, NewV}) ->
- {K, NewV};
-replace_kv({K, V}, Replacement) ->
- {K, replace_kv(V, Replacement)};
-replace_kv(V, _) ->
- V.
-
-
-copy_meta(#state{source_db = SourceDb, targets = Targets} = State) ->
- RevsLimit = couch_db:get_revs_limit(SourceDb),
- {SecProps} = couch_db:get_security(SourceDb),
- PurgeLimit = couch_db:get_purge_infos_limit(SourceDb),
- Targets1 = maps:map(fun(_, #target{db = Db} = T) ->
- {ok, Db1} = couch_db_engine:set_revs_limit(Db, RevsLimit),
- {ok, Db2} = couch_db_engine:set_security(Db1, SecProps),
- {ok, Db3} = couch_db_engine:set_purge_infos_limit(Db2, PurgeLimit),
- T#target{db = Db3}
- end, Targets),
- State#state{targets = Targets1}.
-
-
-copy_purge_info(#state{source_db = Db} = State) ->
- {ok, NewState} = couch_db:fold_purge_infos(Db, 0, fun purge_cb/2, State),
- Targets = maps:map(fun(_, #target{} = T) ->
- commit_purge_infos(T)
- end, NewState#state.targets),
- NewState#state{targets = Targets}.
-
-
-acc_and_flush(Item, #target{}= Target, MaxBuffer, FlushCb) ->
- #target{buffer = Buffer, buffer_size = BSize} = Target,
- BSize1 = BSize + ?term_size(Item),
- Target1 = Target#target{buffer = [Item | Buffer], buffer_size = BSize1},
- case BSize1 > MaxBuffer of
- true -> FlushCb(Target1);
- false -> Target1
- end.
-
-
-purge_cb({_PSeq, _UUID, Id, _Revs} = PI, #state{targets = Targets} = State) ->
- {Key, Target} = pick_target(Id, State, Targets),
- MaxBuffer = State#state.max_buffer_size,
- Target1 = acc_and_flush(PI, Target, MaxBuffer, fun commit_purge_infos/1),
- {ok, State#state{targets = Targets#{Key => Target1}}}.
-
-
-commit_purge_infos(#target{buffer = [], db = Db} = Target) ->
- Target#target{db = Db};
-
-commit_purge_infos(#target{buffer = PIs0, db = Db} = Target) ->
- PIs = lists:reverse(PIs0),
- {ok, Db1} = couch_db_engine:copy_purge_infos(Db, PIs),
- {ok, Db2} = couch_db_engine:commit_data(Db1),
- Target#target{buffer = [], buffer_size = 0, db = Db2}.
-
-
-copy_docs(#state{source_db = Db} = State) ->
- {ok, NewState} = couch_db:fold_changes(Db, 0, fun changes_cb/2, State),
- CommitTargets = maps:map(fun(_, #target{} = T) ->
- commit_docs(T)
- end, NewState#state.targets),
- NewState#state{targets = CommitTargets}.
-
-
-% Backwards compatibility clause. Seq trees used to hold #doc_infos at one time
-changes_cb(#doc_info{id = Id}, #state{source_db = Db} = State) ->
- [FDI = #full_doc_info{}] = couch_db_engine:open_docs(Db, [Id]),
- changes_cb(FDI, State);
-
-changes_cb(#full_doc_info{id = Id} = FDI, #state{} = State) ->
- #state{source_db = SourceDb, targets = Targets} = State,
- {Key, Target} = pick_target(Id, State, Targets),
- FDI1 = process_fdi(FDI, SourceDb, Target#target.db),
- MaxBuffer = State#state.max_buffer_size,
- Target1 = acc_and_flush(FDI1, Target, MaxBuffer, fun commit_docs/1),
- {ok, State#state{targets = Targets#{Key => Target1}}}.
-
-
-commit_docs(#target{buffer = [], db = Db} = Target) ->
- Target#target{db = Db};
-
-commit_docs(#target{buffer = FDIs, db = Db} = Target) ->
- Pairs = [{not_found, FDI} || FDI <- lists:reverse(FDIs)],
- {ok, Db1} = couch_db_engine:write_doc_infos(Db, Pairs, []),
- {ok, Db2} = couch_db_engine:commit_data(Db1),
- Target#target{buffer = [], buffer_size = 0, db = Db2}.
-
-
-process_fdi(FDI, SourceDb, TargetDb) ->
- #full_doc_info{id = Id, rev_tree = RTree} = FDI,
- Acc = #racc{id = Id, source_db = SourceDb, target_db = TargetDb},
- {NewRTree, NewAcc} = couch_key_tree:mapfold(fun revtree_cb/4, Acc, RTree),
- {Active, External} = total_sizes(NewAcc),
- FDI#full_doc_info{
- rev_tree = NewRTree,
- sizes = #size_info{active = Active, external = External}
- }.
-
-
-revtree_cb(_Rev, _Leaf, branch, Acc) ->
- {[], Acc};
-
-revtree_cb({Pos, RevId}, Leaf, leaf, Acc) ->
- #racc{id = Id, source_db = SourceDb, target_db = TargetDb} = Acc,
- #leaf{deleted = Deleted, ptr = Ptr, sizes = LeafSizes} = Leaf,
- Doc0 = #doc{
- id = Id,
- revs = {Pos, [RevId]},
- deleted = Deleted,
- body = Ptr
- },
- Doc1 = couch_db_engine:read_doc_body(SourceDb, Doc0),
- #doc{body = Body, atts = AttInfos0} = Doc1,
- External = case LeafSizes#size_info.external of
- 0 when is_binary(Body) ->
- couch_compress:uncompressed_size(Body);
- 0 ->
- couch_ejson_size:encoded_size(Body);
- N -> N
- end,
- AttInfos = if not is_binary(AttInfos0) -> AttInfos0; true ->
- couch_compress:decompress(AttInfos0)
- end,
- Atts = [process_attachment(Att, SourceDb, TargetDb) || Att <- AttInfos],
- Doc2 = Doc1#doc{atts = Atts},
- Doc3 = couch_db_engine:serialize_doc(TargetDb, Doc2),
- {ok, Doc4, Active} = couch_db_engine:write_doc_body(TargetDb, Doc3),
- % element(3,...) and (4,...) are the stream pointer and size respecitively
- % (see couch_att.erl) They are numeric for compatibility with older formats
- AttSizes = [{element(3, A), element(4, A)} || A <- Atts],
- NewLeaf = Leaf#leaf{
- ptr = Doc4#doc.body,
- sizes = #size_info{active = Active, external = External},
- atts = AttSizes
- },
- {NewLeaf, add_sizes(Active, External, AttSizes, Acc)}.
-
-
-% This is copied almost verbatim from the compactor
-process_attachment({Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}, SourceDb,
- TargetDb) ->
- % 010 upgrade code
- {ok, SrcStream} = couch_db_engine:open_read_stream(SourceDb, BinSp),
- {ok, DstStream} = couch_db_engine:open_write_stream(TargetDb, []),
- ok = couch_stream:copy(SrcStream, DstStream),
- {NewStream, AttLen, AttLen, ActualMd5, _IdentityMd5} =
- couch_stream:close(DstStream),
- {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
- couch_util:check_md5(ExpectedMd5, ActualMd5),
- {Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity};
-
-process_attachment({Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5,
- Enc1}, SourceDb, TargetDb) ->
- {ok, SrcStream} = couch_db_engine:open_read_stream(SourceDb, BinSp),
- {ok, DstStream} = couch_db_engine:open_write_stream(TargetDb, []),
- ok = couch_stream:copy(SrcStream, DstStream),
- {NewStream, AttLen, _, ActualMd5, _IdentityMd5} =
- couch_stream:close(DstStream),
- {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
- couch_util:check_md5(ExpectedMd5, ActualMd5),
- Enc = case Enc1 of
- true -> gzip; % 0110 upgrade code
- false -> identity; % 0110 upgrade code
- _ -> Enc1
- end,
- {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc}.
-
-
-get_engine(Db) ->
- {ok, DbInfoProps} = couch_db:get_db_info(Db),
- proplists:get_value(engine, DbInfoProps).
-
-
-add_sizes(Active, External, Atts, #racc{} = Acc) ->
- #racc{active = ActiveAcc, external = ExternalAcc, atts = AttsAcc} = Acc,
- NewActiveAcc = ActiveAcc + Active,
- NewExternalAcc = ExternalAcc + External,
- NewAttsAcc = lists:umerge(Atts, AttsAcc),
- Acc#racc{
- active = NewActiveAcc,
- external = NewExternalAcc,
- atts = NewAttsAcc
- }.
-
-
-total_sizes(#racc{active = Active, external = External, atts = Atts}) ->
- TotalAtts = lists:foldl(fun({_, S}, A) -> S + A end, 0, Atts),
- {Active + TotalAtts, External + TotalAtts}.
-
-
-get_max_buffer_size() ->
- config:get_integer("reshard", "split_buffer_size", ?DEFAULT_BUFFER_SIZE).
-
-
-copy_local_docs(#state{source_db = Db, targets = Targets} = State) ->
- FoldFun = fun(#doc{id = Id} = Doc, Acc) ->
- UpdatedAcc = case Id of
- <<?LOCAL_DOC_PREFIX, "shard-sync-", _/binary>> ->
- Acc;
- <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
- Acc;
- <<?LOCAL_DOC_PREFIX, _/binary>> ->
- % Users' and replicator app's checkpoints go to their
- % respective shards based on the general hashing algorithm
- {Key, Target} = pick_target(Id, State, Acc),
- #target{buffer = Docs} = Target,
- Acc#{Key => Target#target{buffer = [Doc | Docs]}}
- end,
- {ok, UpdatedAcc}
- end,
- {ok, Targets1} = couch_db:fold_local_docs(Db, FoldFun, Targets, []),
- Targets2 = maps:map(fun(_, #target{db = TDb, buffer = Docs} = T) ->
- case Docs of
- [] ->
- T;
- [_ | _] ->
- Docs1 = lists:reverse(Docs),
- {ok, _} = couch_db:update_docs(TDb, Docs1),
- T#target{buffer = []}
- end
- end, Targets1),
- State#state{targets = Targets2}.
diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl
deleted file mode 100644
index 1ca804c05..000000000
--- a/src/couch/src/couch_db_updater.erl
+++ /dev/null
@@ -1,955 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_updater).
--behaviour(gen_server).
--vsn(1).
-
--export([add_sizes/3, upgrade_sizes/1]).
--export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_db_int.hrl").
-
--define(IDLE_LIMIT_DEFAULT, 61000).
--define(DEFAULT_MAX_PARTITION_SIZE, 16#280000000). % 10 GiB
-
-
--record(merge_acc, {
- revs_limit,
- merge_conflicts,
- add_infos = [],
- rem_seqs = [],
- cur_seq,
- full_partitions = []
-}).
-
-
-init({Engine, DbName, FilePath, Options0}) ->
- erlang:put(io_priority, {db_update, DbName}),
- update_idle_limit_from_config(),
- DefaultSecObj = default_security_object(DbName),
- Options = [{default_security_object, DefaultSecObj} | Options0],
- try
- {ok, EngineState} = couch_db_engine:init(Engine, FilePath, Options),
- Db = init_db(DbName, FilePath, EngineState, Options),
- case lists:member(sys_db, Options) of
- false ->
- couch_stats_process_tracker:track([couchdb, open_databases]);
- true ->
- ok
- end,
- % Don't load validation funs here because the fabric query is
- % liable to race conditions. Instead see
- % couch_db:validate_doc_update, which loads them lazily.
- NewDb = Db#db{main_pid = self()},
- proc_lib:init_ack({ok, NewDb}),
- gen_server:enter_loop(?MODULE, [], NewDb, idle_limit())
- catch
- throw:InitError ->
- proc_lib:init_ack(InitError)
- end.
-
-
-terminate(Reason, Db) ->
- couch_util:shutdown_sync(Db#db.compactor_pid),
- couch_db_engine:terminate(Reason, Db),
- ok.
-
-handle_call(get_db, _From, Db) ->
- {reply, {ok, Db}, Db, idle_limit()};
-handle_call(start_compact, _From, Db) ->
- {noreply, NewDb, _Timeout} = handle_cast(start_compact, Db),
- {reply, {ok, NewDb#db.compactor_pid}, NewDb, idle_limit()};
-handle_call(compactor_pid, _From, #db{compactor_pid = Pid} = Db) ->
- {reply, Pid, Db, idle_limit()};
-handle_call(cancel_compact, _From, #db{compactor_pid = nil} = Db) ->
- {reply, ok, Db, idle_limit()};
-handle_call(cancel_compact, _From, #db{compactor_pid = Pid} = Db) ->
- unlink(Pid),
- exit(Pid, kill),
- couch_server:delete_compaction_files(Db#db.name),
- Db2 = Db#db{compactor_pid = nil},
- ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
- {reply, ok, Db2, idle_limit()};
-
-handle_call({set_security, NewSec}, _From, #db{} = Db) ->
- {ok, NewDb} = couch_db_engine:set_security(Db, NewSec),
- NewSecDb = commit_data(NewDb#db{
- security = NewSec
- }),
- ok = gen_server:call(couch_server, {db_updated, NewSecDb}, infinity),
- {reply, ok, NewSecDb, idle_limit()};
-
-handle_call({set_revs_limit, Limit}, _From, Db) ->
- {ok, Db2} = couch_db_engine:set_revs_limit(Db, Limit),
- Db3 = commit_data(Db2),
- ok = gen_server:call(couch_server, {db_updated, Db3}, infinity),
- {reply, ok, Db3, idle_limit()};
-
-handle_call({set_purge_infos_limit, Limit}, _From, Db) ->
- {ok, Db2} = couch_db_engine:set_purge_infos_limit(Db, Limit),
- ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
- {reply, ok, Db2, idle_limit()};
-
-handle_call({purge_docs, [], _}, _From, Db) ->
- {reply, {ok, []}, Db, idle_limit()};
-
-handle_call({purge_docs, PurgeReqs0, Options}, _From, Db) ->
- % Filter out any previously applied updates during
- % internal replication
- IsRepl = lists:member(replicated_changes, Options),
- PurgeReqs = if not IsRepl -> PurgeReqs0; true ->
- UUIDs = [UUID || {UUID, _Id, _Revs} <- PurgeReqs0],
- PurgeInfos = couch_db_engine:load_purge_infos(Db, UUIDs),
- lists:flatmap(fun
- ({not_found, PReq}) -> [PReq];
- ({{_, _, _, _}, _}) -> []
- end, lists:zip(PurgeInfos, PurgeReqs0))
- end,
- {ok, NewDb, Replies} = purge_docs(Db, PurgeReqs),
- {reply, {ok, Replies}, NewDb, idle_limit()};
-
-handle_call(Msg, From, Db) ->
- case couch_db_engine:handle_db_updater_call(Msg, From, Db) of
- {reply, Resp, NewDb} ->
- {reply, Resp, NewDb, idle_limit()};
- Else ->
- Else
- end.
-
-
-handle_cast({load_validation_funs, ValidationFuns}, Db) ->
- Db2 = Db#db{validate_doc_funs = ValidationFuns},
- ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
- {noreply, Db2, idle_limit()};
-handle_cast(start_compact, Db) ->
- case Db#db.compactor_pid of
- nil ->
- % For now we only support compacting to the same
- % storage engine. After the first round of patches
- % we'll add a field that sets the target engine
- % type to compact to with a new copy compactor.
- UpdateSeq = couch_db_engine:get_update_seq(Db),
- Args = [Db#db.name, UpdateSeq],
- couch_log:info("Starting compaction for db \"~s\" at ~p", Args),
- {ok, Db2} = couch_db_engine:start_compaction(Db),
- ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
- {noreply, Db2, idle_limit()};
- _ ->
- % compact currently running, this is a no-op
- {noreply, Db, idle_limit()}
- end;
-handle_cast({compact_done, _Engine, CompactInfo}, #db{} = OldDb) ->
- {ok, NewDb} = couch_db_engine:finish_compaction(OldDb, CompactInfo),
- {noreply, NewDb};
-
-handle_cast(wakeup, Db) ->
- {noreply, Db, idle_limit()};
-
-handle_cast(Msg, #db{name = Name} = Db) ->
- couch_log:error("Database `~s` updater received unexpected cast: ~p",
- [Name, Msg]),
- {stop, Msg, Db}.
-
-
-handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts},
- Db) ->
- GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs),
- if NonRepDocs == [] ->
- {GroupedDocs3, Clients} = collect_updates(GroupedDocs2,
- [Client], MergeConflicts);
- true ->
- GroupedDocs3 = GroupedDocs2,
- Clients = [Client]
- end,
- NonRepDocs2 = [{Client, NRDoc} || NRDoc <- NonRepDocs],
- try update_docs_int(Db, GroupedDocs3, NonRepDocs2, MergeConflicts) of
- {ok, Db2, UpdatedDDocIds} ->
- ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
- case {couch_db:get_update_seq(Db), couch_db:get_update_seq(Db2)} of
- {Seq, Seq} -> ok;
- _ -> couch_event:notify(Db2#db.name, updated)
- end,
- if NonRepDocs2 /= [] ->
- couch_event:notify(Db2#db.name, local_updated);
- true -> ok
- end,
- [catch(ClientPid ! {done, self()}) || ClientPid <- Clients],
- Db3 = case length(UpdatedDDocIds) > 0 of
- true ->
- % Ken and ddoc_cache are the only things that
- % use the unspecified ddoc_updated message. We
- % should update them to use the new message per
- % ddoc.
- lists:foreach(fun(DDocId) ->
- couch_event:notify(Db2#db.name, {ddoc_updated, DDocId})
- end, UpdatedDDocIds),
- couch_event:notify(Db2#db.name, ddoc_updated),
- ddoc_cache:refresh(Db2#db.name, UpdatedDDocIds),
- refresh_validate_doc_funs(Db2);
- false ->
- Db2
- end,
- {noreply, Db3, hibernate_if_no_idle_limit()}
- catch
- throw: retry ->
- [catch(ClientPid ! {retry, self()}) || ClientPid <- Clients],
- {noreply, Db, hibernate_if_no_idle_limit()}
- end;
-handle_info({'EXIT', _Pid, normal}, Db) ->
- {noreply, Db, idle_limit()};
-handle_info({'EXIT', _Pid, Reason}, Db) ->
- {stop, Reason, Db};
-handle_info(timeout, #db{name=DbName} = Db) ->
- IdleLimitMSec = update_idle_limit_from_config(),
- case couch_db:is_idle(Db) of
- true ->
- LastActivity = couch_db_engine:last_activity(Db),
- DtMSec = timer:now_diff(os:timestamp(), LastActivity) div 1000,
- MSecSinceLastActivity = max(0, DtMSec),
- case MSecSinceLastActivity > IdleLimitMSec of
- true ->
- ok = couch_server:close_db_if_idle(DbName);
- false ->
- ok
- end;
- false ->
- ok
- end,
- % Send a message to wake up and then hibernate. Hibernation here is done to
- % force a thorough garbage collection.
- gen_server:cast(self(), wakeup),
- {noreply, Db, hibernate};
-
-handle_info(Msg, Db) ->
- case couch_db_engine:handle_db_updater_info(Msg, Db) of
- {noreply, NewDb} ->
- {noreply, NewDb, idle_limit()};
- Else ->
- Else
- end.
-
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-sort_and_tag_grouped_docs(Client, GroupedDocs) ->
- % These groups should already be sorted but sometimes clients misbehave.
- % The merge_updates function will fail and the database can end up with
- % duplicate documents if the incoming groups are not sorted, so as a sanity
- % check we sort them again here. See COUCHDB-2735.
- Cmp = fun([#doc{id=A}|_], [#doc{id=B}|_]) -> A < B end,
- lists:map(fun(DocGroup) ->
- [{Client, maybe_tag_doc(D)} || D <- DocGroup]
- end, lists:sort(Cmp, GroupedDocs)).
-
-maybe_tag_doc(#doc{id=Id, revs={Pos,[_Rev|PrevRevs]}, meta=Meta0}=Doc) ->
- case lists:keymember(ref, 1, Meta0) of
- true ->
- Doc;
- false ->
- Key = {Id, {Pos-1, PrevRevs}},
- Doc#doc{meta=[{ref, Key} | Meta0]}
- end.
-
-merge_updates([[{_,#doc{id=X}}|_]=A|RestA], [[{_,#doc{id=X}}|_]=B|RestB]) ->
- [A++B | merge_updates(RestA, RestB)];
-merge_updates([[{_,#doc{id=X}}|_]|_]=A, [[{_,#doc{id=Y}}|_]|_]=B) when X < Y ->
- [hd(A) | merge_updates(tl(A), B)];
-merge_updates([[{_,#doc{id=X}}|_]|_]=A, [[{_,#doc{id=Y}}|_]|_]=B) when X > Y ->
- [hd(B) | merge_updates(A, tl(B))];
-merge_updates([], RestB) ->
- RestB;
-merge_updates(RestA, []) ->
- RestA.
-
-collect_updates(GroupedDocsAcc, ClientsAcc, MergeConflicts) ->
- receive
- % Only collect updates with the same MergeConflicts flag and without
- % local docs. It's easier to just avoid multiple _local doc
- % updaters than deal with their possible conflicts, and local docs
- % writes are relatively rare. Can be optmized later if really needed.
- {update_docs, Client, GroupedDocs, [], MergeConflicts} ->
- GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs),
- GroupedDocsAcc2 =
- merge_updates(GroupedDocsAcc, GroupedDocs2),
- collect_updates(GroupedDocsAcc2, [Client | ClientsAcc],
- MergeConflicts)
- after 0 ->
- {GroupedDocsAcc, ClientsAcc}
- end.
-
-
-init_db(DbName, FilePath, EngineState, Options) ->
- % convert start time tuple to microsecs and store as a binary string
- {MegaSecs, Secs, MicroSecs} = os:timestamp(),
- StartTime = ?l2b(io_lib:format("~p",
- [(MegaSecs*1000000*1000000) + (Secs*1000000) + MicroSecs])),
-
- BDU = couch_util:get_value(before_doc_update, Options, nil),
- ADR = couch_util:get_value(after_doc_read, Options, nil),
-
- NonCreateOpts = [Opt || Opt <- Options, Opt /= create],
-
- InitDb = #db{
- name = DbName,
- filepath = FilePath,
- engine = EngineState,
- instance_start_time = StartTime,
- options = NonCreateOpts,
- before_doc_update = BDU,
- after_doc_read = ADR
- },
-
- DbProps = couch_db_engine:get_props(InitDb),
-
- InitDb#db{
- committed_update_seq = couch_db_engine:get_update_seq(InitDb),
- security = couch_db_engine:get_security(InitDb),
- options = lists:keystore(props, 1, NonCreateOpts, {props, DbProps})
- }.
-
-
-refresh_validate_doc_funs(#db{name = <<"shards/", _/binary>> = Name} = Db) ->
- spawn(fabric, reset_validation_funs, [mem3:dbname(Name)]),
- Db#db{validate_doc_funs = undefined};
-refresh_validate_doc_funs(Db0) ->
- Db = Db0#db{user_ctx=?ADMIN_USER},
- {ok, DesignDocs} = couch_db:get_design_docs(Db),
- ProcessDocFuns = lists:flatmap(
- fun(DesignDocInfo) ->
- {ok, DesignDoc} = couch_db:open_doc_int(
- Db, DesignDocInfo, [ejson_body]),
- case couch_doc:get_validate_doc_fun(DesignDoc) of
- nil -> [];
- Fun -> [Fun]
- end
- end, DesignDocs),
- Db#db{validate_doc_funs=ProcessDocFuns}.
-
-% rev tree functions
-
-flush_trees(_Db, [], AccFlushedTrees) ->
- {ok, lists:reverse(AccFlushedTrees)};
-flush_trees(#db{} = Db,
- [InfoUnflushed | RestUnflushed], AccFlushed) ->
- #full_doc_info{update_seq=UpdateSeq, rev_tree=Unflushed} = InfoUnflushed,
- {Flushed, FinalAcc} = couch_key_tree:mapfold(
- fun(_Rev, Value, Type, SizesAcc) ->
- case Value of
- % This node is a document summary that needs to be
- % flushed to disk.
- #doc{} = Doc ->
- check_doc_atts(Db, Doc),
- ExternalSize = get_meta_body_size(Value#doc.meta),
- {size_info, AttSizeInfo} =
- lists:keyfind(size_info, 1, Doc#doc.meta),
- {ok, NewDoc, WrittenSize} =
- couch_db_engine:write_doc_body(Db, Doc),
- Leaf = #leaf{
- deleted = Doc#doc.deleted,
- ptr = NewDoc#doc.body,
- seq = UpdateSeq,
- sizes = #size_info{
- active = WrittenSize,
- external = ExternalSize
- },
- atts = AttSizeInfo
- },
- {Leaf, add_sizes(Type, Leaf, SizesAcc)};
- #leaf{} ->
- {Value, add_sizes(Type, Value, SizesAcc)};
- _ ->
- {Value, SizesAcc}
- end
- end, {0, 0, []}, Unflushed),
- {FinalAS, FinalES, FinalAtts} = FinalAcc,
- TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts),
- NewInfo = InfoUnflushed#full_doc_info{
- rev_tree = Flushed,
- sizes = #size_info{
- active = FinalAS + TotalAttSize,
- external = FinalES + TotalAttSize
- }
- },
- flush_trees(Db, RestUnflushed, [NewInfo | AccFlushed]).
-
-
-check_doc_atts(Db, Doc) ->
- {atts_stream, Stream} = lists:keyfind(atts_stream, 1, Doc#doc.meta),
- % Make sure that the attachments were written to the currently
- % active attachment stream. If compaction swaps during a write
- % request we may have to rewrite our attachment bodies.
- if Stream == nil -> ok; true ->
- case couch_db:is_active_stream(Db, Stream) of
- true ->
- ok;
- false ->
- % Stream where the attachments were written to is
- % no longer the current attachment stream. This
- % can happen when a database is switched at
- % compaction time.
- couch_log:debug("Stream where the attachments were"
- " written has changed."
- " Possibly retrying.", []),
- throw(retry)
- end
- end.
-
-
-add_sizes(Type, #leaf{sizes=Sizes, atts=AttSizes}, Acc) ->
- % Maybe upgrade from disk_size only
- #size_info{
- active = ActiveSize,
- external = ExternalSize
- } = upgrade_sizes(Sizes),
- {ASAcc, ESAcc, AttsAcc} = Acc,
- NewASAcc = ActiveSize + ASAcc,
- NewESAcc = ESAcc + if Type == leaf -> ExternalSize; true -> 0 end,
- NewAttsAcc = lists:umerge(AttSizes, AttsAcc),
- {NewASAcc, NewESAcc, NewAttsAcc}.
-
-
-upgrade_sizes(#size_info{}=SI) ->
- SI;
-upgrade_sizes({D, E}) ->
- #size_info{active=D, external=E};
-upgrade_sizes(S) when is_integer(S) ->
- #size_info{active=S, external=0}.
-
-
-send_result(Client, Doc, NewResult) ->
- % used to send a result to the client
- catch(Client ! {result, self(), {doc_tag(Doc), NewResult}}).
-
-doc_tag(#doc{meta=Meta}) ->
- case lists:keyfind(ref, 1, Meta) of
- {ref, Ref} -> Ref;
- false -> throw(no_doc_tag);
- Else -> throw({invalid_doc_tag, Else})
- end.
-
-merge_rev_trees([], [], Acc) ->
- {ok, Acc#merge_acc{
- add_infos = lists:reverse(Acc#merge_acc.add_infos)
- }};
-merge_rev_trees([NewDocs | RestDocsList], [OldDocInfo | RestOldInfo], Acc) ->
- #merge_acc{
- revs_limit = Limit,
- merge_conflicts = MergeConflicts,
- full_partitions = FullPartitions
- } = Acc,
-
- % Track doc ids so we can debug large revision trees
- erlang:put(last_id_merged, OldDocInfo#full_doc_info.id),
- NewDocInfo0 = lists:foldl(fun({Client, NewDoc}, OldInfoAcc) ->
- NewInfo = merge_rev_tree(OldInfoAcc, NewDoc, Client, MergeConflicts),
- case is_overflowed(NewInfo, OldInfoAcc, FullPartitions) of
- true when not MergeConflicts ->
- DocId = NewInfo#full_doc_info.id,
- send_result(Client, NewDoc, {partition_overflow, DocId}),
- OldInfoAcc;
- _ ->
- NewInfo
- end
- end, OldDocInfo, NewDocs),
- NewDocInfo1 = maybe_stem_full_doc_info(NewDocInfo0, Limit),
- % When MergeConflicts is false, we updated #full_doc_info.deleted on every
- % iteration of merge_rev_tree. However, merge_rev_tree does not update
- % #full_doc_info.deleted when MergeConflicts is true, since we don't need
- % to know whether the doc is deleted between iterations. Since we still
- % need to know if the doc is deleted after the merge happens, we have to
- % set it here.
- NewDocInfo2 = case MergeConflicts of
- true ->
- NewDocInfo1#full_doc_info{
- deleted = couch_doc:is_deleted(NewDocInfo1)
- };
- false ->
- NewDocInfo1
- end,
- if NewDocInfo2 == OldDocInfo ->
- % nothing changed
- merge_rev_trees(RestDocsList, RestOldInfo, Acc);
- true ->
- % We have updated the document, give it a new update_seq. Its
- % important to note that the update_seq on OldDocInfo should
- % be identical to the value on NewDocInfo1.
- OldSeq = OldDocInfo#full_doc_info.update_seq,
- NewDocInfo3 = NewDocInfo2#full_doc_info{
- update_seq = Acc#merge_acc.cur_seq + 1
- },
- RemoveSeqs = case OldSeq of
- 0 -> Acc#merge_acc.rem_seqs;
- _ -> [OldSeq | Acc#merge_acc.rem_seqs]
- end,
- NewAcc = Acc#merge_acc{
- add_infos = [NewDocInfo3 | Acc#merge_acc.add_infos],
- rem_seqs = RemoveSeqs,
- cur_seq = Acc#merge_acc.cur_seq + 1
- },
- merge_rev_trees(RestDocsList, RestOldInfo, NewAcc)
- end.
-
-merge_rev_tree(OldInfo, NewDoc, Client, false)
- when OldInfo#full_doc_info.deleted ->
- % We're recreating a document that was previously
- % deleted. To check that this is a recreation from
- % the root we assert that the new document has a
- % revision depth of 1 (this is to avoid recreating a
- % doc from a previous internal revision) and is also
- % not deleted. To avoid expanding the revision tree
- % unnecessarily we create a new revision based on
- % the winning deleted revision.
-
- {RevDepth, _} = NewDoc#doc.revs,
- NewDeleted = NewDoc#doc.deleted,
- case RevDepth == 1 andalso not NewDeleted of
- true ->
- % Update the new doc based on revisions in OldInfo
- #doc_info{revs=[WinningRev | _]} = couch_doc:to_doc_info(OldInfo),
- #rev_info{rev={OldPos, OldRev}} = WinningRev,
- Body = case couch_util:get_value(comp_body, NewDoc#doc.meta) of
- CompBody when is_binary(CompBody) ->
- couch_compress:decompress(CompBody);
- _ ->
- NewDoc#doc.body
- end,
- RevIdDoc = NewDoc#doc{
- revs = {OldPos, [OldRev]},
- body = Body
- },
- NewRevId = couch_db:new_revid(RevIdDoc),
- NewDoc2 = NewDoc#doc{revs={OldPos + 1, [NewRevId, OldRev]}},
-
- % Merge our modified new doc into the tree
- #full_doc_info{rev_tree=OldTree} = OldInfo,
- NewTree0 = couch_doc:to_path(NewDoc2),
- case couch_key_tree:merge(OldTree, NewTree0) of
- {NewTree1, new_leaf} ->
- % We changed the revision id so inform the caller
- send_result(Client, NewDoc, {ok, {OldPos+1, NewRevId}}),
- OldInfo#full_doc_info{
- rev_tree = NewTree1,
- deleted = false
- };
- _ ->
- throw(doc_recreation_failed)
- end;
- _ ->
- send_result(Client, NewDoc, conflict),
- OldInfo
- end;
-merge_rev_tree(OldInfo, NewDoc, Client, false) ->
- % We're attempting to merge a new revision into an
- % undeleted document. To not be a conflict we require
- % that the merge results in extending a branch.
-
- OldTree = OldInfo#full_doc_info.rev_tree,
- NewTree0 = couch_doc:to_path(NewDoc),
- NewDeleted = NewDoc#doc.deleted,
- case couch_key_tree:merge(OldTree, NewTree0) of
- {NewTree, new_leaf} when not NewDeleted ->
- OldInfo#full_doc_info{
- rev_tree = NewTree,
- deleted = false
- };
- {NewTree, new_leaf} when NewDeleted ->
- % We have to check if we just deleted this
- % document completely or if it was a conflict
- % resolution.
- OldInfo#full_doc_info{
- rev_tree = NewTree,
- deleted = couch_doc:is_deleted(NewTree)
- };
- _ ->
- send_result(Client, NewDoc, conflict),
- OldInfo
- end;
-merge_rev_tree(OldInfo, NewDoc, _Client, true) ->
- % We're merging in revisions without caring about
- % conflicts. Most likely this is a replication update.
- OldTree = OldInfo#full_doc_info.rev_tree,
- NewTree0 = couch_doc:to_path(NewDoc),
- {NewTree, _} = couch_key_tree:merge(OldTree, NewTree0),
- OldInfo#full_doc_info{rev_tree = NewTree}.
-
-is_overflowed(_New, _Old, []) ->
- false;
-is_overflowed(Old, Old, _FullPartitions) ->
- false;
-is_overflowed(New, Old, FullPartitions) ->
- case New#full_doc_info.id of
- <<"_design/", _/binary>> ->
- false;
- DDocId ->
- Partition = couch_partition:from_docid(DDocId),
- case lists:member(Partition, FullPartitions) of
- true ->
- estimate_size(New) > estimate_size(Old);
- false ->
- false
- end
- end.
-
-maybe_stem_full_doc_info(#full_doc_info{rev_tree = Tree} = Info, Limit) ->
- case config:get_boolean("couchdb", "stem_interactive_updates", true) of
- true ->
- Stemmed = couch_key_tree:stem(Tree, Limit),
- Info#full_doc_info{rev_tree = Stemmed};
- false ->
- Info
- end.
-
-update_docs_int(Db, DocsList, LocalDocs, MergeConflicts) ->
- UpdateSeq = couch_db_engine:get_update_seq(Db),
- RevsLimit = couch_db_engine:get_revs_limit(Db),
-
- Ids = [Id || [{_Client, #doc{id=Id}}|_] <- DocsList],
- % lookup up the old documents, if they exist.
- OldDocLookups = couch_db_engine:open_docs(Db, Ids),
- OldDocInfos = lists:zipwith(fun
- (_Id, #full_doc_info{} = FDI) ->
- FDI;
- (Id, not_found) ->
- #full_doc_info{id=Id}
- end, Ids, OldDocLookups),
-
- %% Get the list of full partitions
- FullPartitions = case couch_db:is_partitioned(Db) of
- true ->
- case max_partition_size() of
- N when N =< 0 ->
- [];
- Max ->
- Partitions = lists:usort(lists:flatmap(fun(Id) ->
- case couch_partition:extract(Id) of
- undefined -> [];
- {Partition, _} -> [Partition]
- end
- end, Ids)),
- [P || P <- Partitions, partition_size(Db, P) >= Max]
- end;
- false ->
- []
- end,
-
- % Merge the new docs into the revision trees.
- AccIn = #merge_acc{
- revs_limit = RevsLimit,
- merge_conflicts = MergeConflicts,
- add_infos = [],
- rem_seqs = [],
- cur_seq = UpdateSeq,
- full_partitions = FullPartitions
- },
- {ok, AccOut} = merge_rev_trees(DocsList, OldDocInfos, AccIn),
- #merge_acc{
- add_infos = NewFullDocInfos,
- rem_seqs = RemSeqs
- } = AccOut,
-
- % Write out the document summaries (the bodies are stored in the nodes of
- % the trees, the attachments are already written to disk)
- {ok, IndexFDIs} = flush_trees(Db, NewFullDocInfos, []),
- Pairs = pair_write_info(OldDocLookups, IndexFDIs),
- LocalDocs2 = update_local_doc_revs(LocalDocs),
-
- {ok, Db1} = couch_db_engine:write_doc_infos(Db, Pairs, LocalDocs2),
-
- WriteCount = length(IndexFDIs),
- couch_stats:increment_counter([couchdb, document_inserts],
- WriteCount - length(RemSeqs)),
- couch_stats:increment_counter([couchdb, document_writes], WriteCount),
- couch_stats:increment_counter(
- [couchdb, local_document_writes],
- length(LocalDocs2)
- ),
-
- % Check if we just updated any design documents, and update the validation
- % funs if we did.
- UpdatedDDocIds = lists:flatmap(fun
- (<<"_design/", _/binary>> = Id) -> [Id];
- (_) -> []
- end, Ids),
-
- {ok, commit_data(Db1), UpdatedDDocIds}.
-
-
-update_local_doc_revs(Docs) ->
- lists:foldl(fun({Client, Doc}, Acc) ->
- case increment_local_doc_revs(Doc) of
- {ok, #doc{revs = {0, [NewRev]}} = NewDoc} ->
- send_result(Client, Doc, {ok, {0, integer_to_binary(NewRev)}}),
- [NewDoc | Acc];
- {error, Error} ->
- send_result(Client, Doc, {error, Error}),
- Acc
- end
- end, [], Docs).
-
-
-increment_local_doc_revs(#doc{deleted = true} = Doc) ->
- {ok, Doc#doc{revs = {0, [0]}}};
-increment_local_doc_revs(#doc{revs = {0, []}} = Doc) ->
- {ok, Doc#doc{revs = {0, [1]}}};
-increment_local_doc_revs(#doc{revs = {0, [RevStr | _]}} = Doc) ->
- try
- PrevRev = binary_to_integer(RevStr),
- {ok, Doc#doc{revs = {0, [PrevRev + 1]}}}
- catch error:badarg ->
- {error, <<"Invalid rev format">>}
- end;
-increment_local_doc_revs(#doc{}) ->
- {error, <<"Invalid rev format">>}.
-
-max_partition_size() ->
- config:get_integer("couchdb", "max_partition_size",
- ?DEFAULT_MAX_PARTITION_SIZE).
-
-partition_size(Db, Partition) ->
- {ok, Info} = couch_db:get_partition_info(Db, Partition),
- Sizes = couch_util:get_value(sizes, Info),
- couch_util:get_value(external, Sizes).
-
-estimate_size(#full_doc_info{} = FDI) ->
- #full_doc_info{rev_tree = RevTree} = FDI,
- Fun = fun
- (_Rev, Value, leaf, SizesAcc) ->
- case Value of
- #doc{} = Doc ->
- ExternalSize = get_meta_body_size(Value#doc.meta),
- {size_info, AttSizeInfo} =
- lists:keyfind(size_info, 1, Doc#doc.meta),
- Leaf = #leaf{
- sizes = #size_info{
- external = ExternalSize
- },
- atts = AttSizeInfo
- },
- add_sizes(leaf, Leaf, SizesAcc);
- #leaf{} ->
- add_sizes(leaf, Value, SizesAcc)
- end;
- (_Rev, _Value, branch, SizesAcc) ->
- SizesAcc
- end,
- {_, FinalES, FinalAtts} = couch_key_tree:fold(Fun, {0, 0, []}, RevTree),
- TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts),
- FinalES + TotalAttSize.
-
-purge_docs(Db, []) ->
- {ok, Db, []};
-
-purge_docs(Db, PurgeReqs) ->
- Ids = lists:usort(lists:map(fun({_UUID, Id, _Revs}) -> Id end, PurgeReqs)),
- FDIs = couch_db_engine:open_docs(Db, Ids),
- USeq = couch_db_engine:get_update_seq(Db),
-
- IdFDIs = lists:zip(Ids, FDIs),
- {NewIdFDIs, Replies} = apply_purge_reqs(PurgeReqs, IdFDIs, USeq, []),
-
- Pairs = lists:flatmap(fun({DocId, OldFDI}) ->
- {DocId, NewFDI} = lists:keyfind(DocId, 1, NewIdFDIs),
- case {OldFDI, NewFDI} of
- {not_found, not_found} ->
- [];
- {#full_doc_info{} = A, #full_doc_info{} = A} ->
- [];
- {#full_doc_info{}, _} ->
- [{OldFDI, NewFDI}]
- end
- end, IdFDIs),
-
- PSeq = couch_db_engine:get_purge_seq(Db),
- {RevPInfos, _} = lists:foldl(fun({UUID, DocId, Revs}, {PIAcc, PSeqAcc}) ->
- Info = {PSeqAcc + 1, UUID, DocId, Revs},
- {[Info | PIAcc], PSeqAcc + 1}
- end, {[], PSeq}, PurgeReqs),
- PInfos = lists:reverse(RevPInfos),
-
- {ok, Db1} = couch_db_engine:purge_docs(Db, Pairs, PInfos),
- Db2 = commit_data(Db1),
- ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
- couch_event:notify(Db2#db.name, updated),
- {ok, Db2, Replies}.
-
-
-apply_purge_reqs([], IdFDIs, _USeq, Replies) ->
- {IdFDIs, lists:reverse(Replies)};
-
-apply_purge_reqs([Req | RestReqs], IdFDIs, USeq, Replies) ->
- {_UUID, DocId, Revs} = Req,
- {value, {_, FDI0}, RestIdFDIs} = lists:keytake(DocId, 1, IdFDIs),
- {NewFDI, RemovedRevs, NewUSeq} = case FDI0 of
- #full_doc_info{rev_tree = Tree} ->
- case couch_key_tree:remove_leafs(Tree, Revs) of
- {_, []} ->
- % No change
- {FDI0, [], USeq};
- {[], Removed} ->
- % Completely purged
- {not_found, Removed, USeq};
- {NewTree, Removed} ->
- % Its possible to purge the #leaf{} that contains
- % the update_seq where this doc sits in the
- % update_seq sequence. Rather than do a bunch of
- % complicated checks we just re-label every #leaf{}
- % and reinsert it into the update_seq sequence.
- {NewTree2, NewUpdateSeq} = couch_key_tree:mapfold(fun
- (_RevId, Leaf, leaf, SeqAcc) ->
- {Leaf#leaf{seq = SeqAcc + 1},
- SeqAcc + 1};
- (_RevId, Value, _Type, SeqAcc) ->
- {Value, SeqAcc}
- end, USeq, NewTree),
-
- FDI1 = FDI0#full_doc_info{
- update_seq = NewUpdateSeq,
- rev_tree = NewTree2
- },
- {FDI1, Removed, NewUpdateSeq}
- end;
- not_found ->
- % Not found means nothing to change
- {not_found, [], USeq}
- end,
- NewIdFDIs = [{DocId, NewFDI} | RestIdFDIs],
- NewReplies = [{ok, RemovedRevs} | Replies],
- apply_purge_reqs(RestReqs, NewIdFDIs, NewUSeq, NewReplies).
-
-
-commit_data(Db) ->
- {ok, Db1} = couch_db_engine:commit_data(Db),
- Db1#db{
- committed_update_seq = couch_db_engine:get_update_seq(Db)
- }.
-
-
-pair_write_info(Old, New) ->
- lists:map(fun(FDI) ->
- case lists:keyfind(FDI#full_doc_info.id, #full_doc_info.id, Old) of
- #full_doc_info{} = OldFDI -> {OldFDI, FDI};
- false -> {not_found, FDI}
- end
- end, New).
-
-
-get_meta_body_size(Meta) ->
- {ejson_size, ExternalSize} = lists:keyfind(ejson_size, 1, Meta),
- ExternalSize.
-
-
-default_security_object(<<"shards/", _/binary>>) ->
- case config:get("couchdb", "default_security", "admin_only") of
- "admin_only" ->
- [{<<"members">>,{[{<<"roles">>,[<<"_admin">>]}]}},
- {<<"admins">>,{[{<<"roles">>,[<<"_admin">>]}]}}];
- Everyone when Everyone == "everyone"; Everyone == "admin_local" ->
- []
- end;
-default_security_object(_DbName) ->
- case config:get("couchdb", "default_security", "admin_only") of
- Admin when Admin == "admin_only"; Admin == "admin_local" ->
- [{<<"members">>,{[{<<"roles">>,[<<"_admin">>]}]}},
- {<<"admins">>,{[{<<"roles">>,[<<"_admin">>]}]}}];
- "everyone" ->
- []
- end.
-
-% These functions rely on using the process dictionary. This is
-% usually frowned upon however in this case it is done to avoid
-% changing to a different server state record. Once PSE (Pluggable
-% Storage Engine) code lands this should be moved to the #db{} record.
-update_idle_limit_from_config() ->
- Default = integer_to_list(?IDLE_LIMIT_DEFAULT),
- IdleLimit = case config:get("couchdb", "idle_check_timeout", Default) of
- "infinity" ->
- infinity;
- Milliseconds ->
- list_to_integer(Milliseconds)
- end,
- put(idle_limit, IdleLimit),
- IdleLimit.
-
-idle_limit() ->
- get(idle_limit).
-
-hibernate_if_no_idle_limit() ->
- case idle_limit() of
- infinity ->
- hibernate;
- Timeout when is_integer(Timeout) ->
- Timeout
- end.
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-
-update_local_doc_revs_test_() ->
- {inparallel, [
- {"Test local doc with valid rev", fun t_good_local_doc/0},
- {"Test local doc with invalid rev", fun t_bad_local_doc/0},
- {"Test deleted local doc", fun t_dead_local_doc/0}
- ]}.
-
-
-t_good_local_doc() ->
- Doc = #doc{
- id = <<"_local/alice">>,
- revs = {0, [<<"1">>]},
- meta = [{ref, make_ref()}]
- },
- [NewDoc] = update_local_doc_revs([{self(), Doc}]),
- ?assertEqual({0, [2]}, NewDoc#doc.revs),
- {ok, Result} = receive_result(Doc),
- ?assertEqual({ok,{0,<<"2">>}}, Result).
-
-
-t_bad_local_doc() ->
- lists:foreach(fun(BadRevs) ->
- Doc = #doc{
- id = <<"_local/alice">>,
- revs = BadRevs,
- meta = [{ref, make_ref()}]
- },
- NewDocs = update_local_doc_revs([{self(), Doc}]),
- ?assertEqual([], NewDocs),
- {ok, Result} = receive_result(Doc),
- ?assertEqual({error,<<"Invalid rev format">>}, Result)
- end, [{0, [<<"a">>]}, {1, [<<"1">>]}]).
-
-
-
-t_dead_local_doc() ->
- Doc = #doc{
- id = <<"_local/alice">>,
- revs = {0, [<<"122">>]},
- deleted = true,
- meta = [{ref, make_ref()}]
- },
- [NewDoc] = update_local_doc_revs([{self(), Doc}]),
- ?assertEqual({0, [0]}, NewDoc#doc.revs),
- {ok, Result} = receive_result(Doc),
- ?assertEqual({ok,{0,<<"0">>}}, Result).
-
-
-receive_result(#doc{meta = Meta}) ->
- Ref = couch_util:get_value(ref, Meta),
- receive
- {result, _, {Ref, Result}} -> {ok, Result}
- end.
-
--endif.
diff --git a/src/couch/src/couch_debug.erl b/src/couch/src/couch_debug.erl
index 290d095bf..80feb615c 100644
--- a/src/couch/src/couch_debug.erl
+++ b/src/couch/src/couch_debug.erl
@@ -370,8 +370,6 @@ fold_tree(Tree, Acc, Fun) ->
linked_processes_info(Pid, Info) ->
link_tree(Pid, Info, fun(P, Props) -> {process_name(P), Props} end).
-print_linked_processes(couch_index_server) ->
- print_couch_index_server_processes();
print_linked_processes(Name) when is_atom(Name) ->
case whereis(Name) of
undefined -> {error, {unknown, Name}};
@@ -386,42 +384,6 @@ print_linked_processes(Pid) when is_pid(Pid) ->
Tree = linked_processes_info(Pid, Info),
print_tree(Tree, TableSpec).
-id("couch_file:init" ++ _, Pid, _Props) ->
- case couch_file:process_info(Pid) of
- {{file_descriptor, prim_file, {Port, Fd}}, FilePath} ->
- term2str([
- term2str(Fd), ":",
- term2str(Port), ":",
- shorten_path(FilePath)]);
- undefined ->
- ""
- end;
-id(_IdStr, _Pid, _Props) ->
- "".
-
-print_couch_index_server_processes() ->
- Info = [reductions, message_queue_len, memory],
- TableSpec = [
- {50, left, name}, {12, centre, reductions},
- {19, centre, message_queue_len}, {14, centre, memory}, {id}
- ],
-
- Tree = link_tree(whereis(couch_index_server), Info, fun(P, Props) ->
- IdStr = process_name(P),
- {IdStr, [{id, id(IdStr, P, Props)} | Props]}
- end),
- print_tree(Tree, TableSpec).
-
-shorten_path(Path) ->
- ViewDir = list_to_binary(config:get("couchdb", "view_index_dir")),
- DatabaseDir = list_to_binary(config:get("couchdb", "database_dir")),
- File = list_to_binary(Path),
- Len = max(
- binary:longest_common_prefix([File, DatabaseDir]),
- binary:longest_common_prefix([File, ViewDir])
- ),
- <<_:Len/binary, Rest/binary>> = File,
- binary_to_list(Rest).
%% Pretty print functions
diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl
index 7224921d4..4d0a13d14 100644
--- a/src/couch/src/couch_doc.erl
+++ b/src/couch/src/couch_doc.erl
@@ -16,14 +16,13 @@
-export([from_json_obj/1, from_json_obj_validate/1]).
-export([from_json_obj/2, from_json_obj_validate/2]).
-export([to_json_obj/2, has_stubs/1, merge_stubs/2]).
--export([validate_docid/1, validate_docid/2, get_validate_doc_fun/1]).
+-export([get_validate_doc_fun/1]).
-export([doc_from_multi_part_stream/2, doc_from_multi_part_stream/3]).
-export([doc_from_multi_part_stream/4]).
-export([doc_to_multi_part_stream/5, len_doc_to_multi_part_stream/4]).
-export([restart_open_doc_revs/3]).
-export([to_path/1]).
--export([with_ejson_body/1]).
-export([is_deleted/1]).
@@ -115,7 +114,7 @@ to_json_attachments(Atts, OutputData, Follows, ShowEnc) ->
[{<<"_attachments">>, {Props}}].
to_json_obj(Doc, Options) ->
- doc_to_json_obj(with_ejson_body(Doc), Options).
+ doc_to_json_obj(Doc, Options).
doc_to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs={Start, RevIds},
meta=Meta}=Doc,Options)->
@@ -198,58 +197,12 @@ parse_revs(_) ->
throw({bad_request, "Invalid list of revisions"}).
-validate_docid(DocId, DbName) ->
- case DbName =:= ?l2b(config:get("mem3", "shards_db", "_dbs")) andalso
- couch_db:is_system_db_name(DocId) of
- true ->
- ok;
- false ->
- validate_docid(DocId)
- end.
-
-validate_docid(<<"">>) ->
- throw({illegal_docid, <<"Document id must not be empty">>});
-validate_docid(<<"_design/">>) ->
- throw({illegal_docid, <<"Illegal document id `_design/`">>});
-validate_docid(<<"_local/">>) ->
- throw({illegal_docid, <<"Illegal document id `_local/`">>});
-validate_docid(Id) when is_binary(Id) ->
- MaxLen = case config:get("couchdb", "max_document_id_length", "infinity") of
- "infinity" -> infinity;
- IntegerVal -> list_to_integer(IntegerVal)
- end,
- case MaxLen > 0 andalso byte_size(Id) > MaxLen of
- true -> throw({illegal_docid, <<"Document id is too long">>});
- false -> ok
- end,
- case couch_util:validate_utf8(Id) of
- false -> throw({illegal_docid, <<"Document id must be valid UTF-8">>});
- true -> ok
- end,
- case Id of
- <<"_design/", _/binary>> -> ok;
- <<"_local/", _/binary>> -> ok;
- <<"_", _/binary>> ->
- case couch_db_plugin:validate_docid(Id) of
- true ->
- ok;
- false ->
- throw(
- {illegal_docid,
- <<"Only reserved document ids may start with underscore.">>})
- end;
- _Else -> ok
- end;
-validate_docid(Id) ->
- couch_log:debug("Document id is not a string: ~p", [Id]),
- throw({illegal_docid, <<"Document id must be a string">>}).
-
transfer_fields([], #doc{body=Fields}=Doc, _) ->
% convert fields back to json object
Doc#doc{body={lists:reverse(Fields)}};
transfer_fields([{<<"_id">>, Id} | Rest], Doc, DbName) ->
- validate_docid(Id, DbName),
+ fabric2_db:validate_docid(Id),
transfer_fields(Rest, Doc#doc{id=Id}, DbName);
transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc, DbName) ->
@@ -518,9 +471,3 @@ flush_parser_messages(Ref) ->
after 0 ->
ok
end.
-
-
-with_ejson_body(#doc{body = Body} = Doc) when is_binary(Body) ->
- Doc#doc{body = couch_compress:decompress(Body)};
-with_ejson_body(#doc{body = {_}} = Doc) ->
- Doc.
diff --git a/src/couch/src/couch_drv.erl b/src/couch/src/couch_drv.erl
index f2ff2ac24..002facd48 100644
--- a/src/couch/src/couch_drv.erl
+++ b/src/couch/src/couch_drv.erl
@@ -19,6 +19,7 @@
-export([start_link/0]).
-include_lib("couch/include/couch_db.hrl").
+-include_lib("kernel/include/logger.hrl").
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
@@ -29,6 +30,7 @@ init([]) ->
ok ->
{ok, nil};
{error, already_loaded} ->
+ ?LOG_INFO(#{what => reload_couch_icu_driver}),
couch_log:info("~p reloading couch_icu_driver", [?MODULE]),
ok = erl_ddll:reload(LibDir, "couch_icu_driver"),
{ok, nil};
diff --git a/src/couch/src/couch_emsort.erl b/src/couch/src/couch_emsort.erl
deleted file mode 100644
index 2a25a2322..000000000
--- a/src/couch/src/couch_emsort.erl
+++ /dev/null
@@ -1,318 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_emsort).
-
-% This is an implementation of an external N-way merge sort. It's primary
-% purpose is to be used during database compaction as an optimization for
-% managing the docid btree.
-%
-% Trunk currently writes the docid btree as its compacting the database but
-% this is quite inneficient as its written out of order in the general case
-% as writes are ordered by update_seq.
-%
-% The general design of this module is a very standard merge sort with one
-% caveat due to append only files. This is described in more detail in the
-% sorting phase.
-%
-% The basic algorithm is in two halves. The first half stores KV pairs to disk
-% which is then followed by the actual sorting phase that streams KV's back
-% to the client using a fold-like function. After some basic definitions we'll
-% describe both phases.
-%
-% Key/Value apairs (aka, KV pairs, or KVs) are simply lists of two-tuples with
-% a key as the first element and an arbitrary value as the second. The key of
-% this pair is what used to determine the sort order based on native Erlang
-% term comparison.
-%
-% Internally, KVs are stored as lists with a max size defined by
-% #ems.chain_chunk. These lists are then chained together on disk using disk
-% offsets as a poor man's linked list. The basic format of a list looks like
-% {KVs, DiskOffset} where DiskOffset is either the atom nil which means "end
-% of the list" or an integer that is a file position offset that is the
-% location of another {KVs, DiskOffset} term. The head of each list is
-% referred to with a single DiskOffset. The set of terms that extend from
-% this initial DiskOffset to the last {KVs, nil} term is referred to in the
-% code as a chain. Two important facts are that one call to couch_emsort:add/2
-% creates a single chain, and that a chain is always sorted on disk (though its
-% possible to be sorted in descending order which will be discussed later).
-%
-% The second major internal structure is the back bone. This is a list of
-% chains that has a quite similar structure to chains but contains different
-% data types and has no guarantee on ordering. The back bone is merely the
-% list of all head DiskOffsets. The structure has the similar structure of
-% {DiskOffsets, DiskOffset} that we use for chains, except that DiskOffsets is
-% a list of integers that refer to the heads of chains. The maximum size of
-% DiskOffsets is defined by #ems.bb_chunk. It is important to note that the
-% backbone has no defined ordering. The other thing of note is that the RAM
-% bounds are loosely defined as:
-%
-% #ems.bb_chunk * #ems.chain_chunk * avg_size(KV).
-%
-% Build Phase
-% -----------
-%
-% As mentioned, each call to couch_emsort:add/2 creates a chain from the
-% list of KVs that are passed in. This list is first sorted and then the
-% chain is created by foldr-ing (note: r) across the list to build the
-% chain on disk. It is important to note that the final chain is then
-% sorted in ascending order on disk.
-%
-%
-% Sort Phase
-% ----------
-%
-% The sort phase is where the merge sort kicks in. This is generally your
-% average merge sort with a caveat for append only storage. First the
-% general outline.
-%
-% The general outline for this sort is that it iteratively merges chains
-% in the backbone until less than #ems.bb_chunk chains exist. At this
-% point it switches to the last merge sort phase where it just streams
-% the sorted KVs back to the client using a fold function.
-%
-% The general chain merging is a pretty standard merge sort. You load up
-% the initial KVs from each phase, pick the next one in sort order and
-% then when you run out of KVs you're left with a single DiskOffset for
-% the head of a single chain that represents the merge. These new
-% DiskOffsets are used to build the new back bone.
-%
-% The one caveat here is that we're using append only storage. This is
-% important because once we make a pass we've effectively reversed the
-% sort order of each chain. Ie, the first merge results in chains that
-% are ordered in descending order. Since, one pass reverses the list
-% the trick is that each phase does two passes. The first phase picks
-% the smallest KV to write next and the second phase picks the largest.
-% In this manner each time we do a back bone merge we end up with chains
-% that are always sorted in an ascending order.
-%
-% The one downfall is that in the interest of simplicity the sorting is
-% restricted to Erlang's native term sorting. A possible extension would
-% be to allow two comparison functions to be used, but this module is
-% currently only used for docid sorting which is hardcoded to be raw
-% Erlang ordering.
-%
-% Diagram
-% -------
-%
-% If it helps, this is a general diagram of the internal structures. A
-% couple points to note since this is ASCII art. The BB pointers across
-% the top are lists of chains going down. Each BBN item is one of the
-% {DiskOffsets, DiskOffset} structures discussed earlier. Going down,
-% the CMN nodes are actually representing #ems.bb_chunk chains in parallel
-% going off the back bone. It is important and not represented in this
-% diagram that within these groups the chains don't have to be the same
-% length. That's just a limitiationg of my ASCII artistic abilities.
-%
-% The BBN* node is marked with a * to denote that it is the only state
-% that we store when writing headeres to disk as it has pointers that
-% lead us to all data in the tree.
-%
-% BB1 <- BB2 <- BB3 <- BBN*
-% | | | |
-% v v v v
-% CA1 CB1 CC1 CD1
-% | | |
-% v v v
-% CA2 CC2 CD2
-% | |
-% v v
-% CA3 CD3
-%
-
--export([open/1, open/2, get_fd/1, get_state/1]).
--export([add/2, merge/1, sort/1, iter/1, next/1]).
-
-
--record(ems, {
- fd,
- root,
- bb_chunk = 10,
- chain_chunk = 100
-}).
-
-
-open(Fd) ->
- {ok, #ems{fd=Fd}}.
-
-
-open(Fd, Options) ->
- {ok, set_options(#ems{fd=Fd}, Options)}.
-
-
-set_options(Ems, []) ->
- Ems;
-set_options(Ems, [{root, Root} | Rest]) ->
- set_options(Ems#ems{root=Root}, Rest);
-set_options(Ems, [{chain_chunk, Count} | Rest]) when is_integer(Count) ->
- set_options(Ems#ems{chain_chunk=Count}, Rest);
-set_options(Ems, [{back_bone_chunk, Count} | Rest]) when is_integer(Count) ->
- set_options(Ems#ems{bb_chunk=Count}, Rest).
-
-
-get_fd(#ems{fd=Fd}) ->
- Fd.
-
-
-get_state(#ems{root=Root}) ->
- Root.
-
-
-add(Ems, []) ->
- {ok, Ems};
-add(Ems, KVs) ->
- Pos = write_kvs(Ems, KVs),
- {ok, add_bb_pos(Ems, Pos)}.
-
-
-sort(#ems{}=Ems) ->
- {ok, Ems1} = merge(Ems),
- iter(Ems1).
-
-
-merge(#ems{root=undefined}=Ems) ->
- {ok, Ems};
-merge(#ems{}=Ems) ->
- {ok, decimate(Ems)}.
-
-
-iter(#ems{root=undefined}=Ems) ->
- {ok, {Ems, []}};
-iter(#ems{root={BB, nil}}=Ems) ->
- Chains = init_chains(Ems, small, BB),
- {ok, {Ems, Chains}};
-iter(#ems{root={_, _}}) ->
- {error, not_merged}.
-
-
-next({_Ems, []}) ->
- finished;
-next({Ems, Chains}) ->
- {KV, RestChains} = choose_kv(small, Ems, Chains),
- {ok, KV, {Ems, RestChains}}.
-
-
-add_bb_pos(#ems{root=undefined}=Ems, Pos) ->
- Ems#ems{root={[Pos], nil}};
-add_bb_pos(#ems{root={BB, Prev}}=Ems, Pos) ->
- {NewBB, NewPrev} = append_item(Ems, {BB, Prev}, Pos, Ems#ems.bb_chunk),
- Ems#ems{root={NewBB, NewPrev}}.
-
-
-write_kvs(Ems, KVs) ->
- % Write the list of KV's to disk in sorted order in chunks
- % of 100. Also make sure that the order is so that they
- % can be streamed in asscending order.
- {LastKVs, LastPos} =
- lists:foldr(fun(KV, Acc) ->
- append_item(Ems, Acc, KV, Ems#ems.chain_chunk)
- end, {[], nil}, lists:sort(KVs)),
- {ok, Final, _} = couch_file:append_term(Ems#ems.fd, {LastKVs, LastPos}),
- Final.
-
-
-decimate(#ems{root={_BB, nil}}=Ems) ->
- % We have less than bb_chunk backbone pointers so we're
- % good to start streaming KV's back to the client.
- Ems;
-decimate(#ems{root={BB, NextBB}}=Ems) ->
- % To make sure we have a bounded amount of data in RAM
- % at any given point we first need to decimate the data
- % by performing the first couple iterations of a merge
- % sort writing the intermediate results back to disk.
-
- % The first pass gives us a sort with pointers linked from
- % largest to smallest.
- {RevBB, RevNextBB} = merge_back_bone(Ems, small, BB, NextBB),
-
- % We have to run a second pass so that links are pointed
- % back from smallest to largest.
- {FwdBB, FwdNextBB} = merge_back_bone(Ems, big, RevBB, RevNextBB),
-
- % Continue deicmating until we have an acceptable bound on
- % the number of keys to use.
- decimate(Ems#ems{root={FwdBB, FwdNextBB}}).
-
-
-merge_back_bone(Ems, Choose, BB, NextBB) ->
- BBPos = merge_chains(Ems, Choose, BB),
- merge_rest_back_bone(Ems, Choose, NextBB, {[BBPos], nil}).
-
-
-merge_rest_back_bone(_Ems, _Choose, nil, Acc) ->
- Acc;
-merge_rest_back_bone(Ems, Choose, BBPos, Acc) ->
- {ok, {BB, NextBB}} = couch_file:pread_term(Ems#ems.fd, BBPos),
- NewPos = merge_chains(Ems, Choose, BB),
- {NewBB, NewPrev} = append_item(Ems, Acc, NewPos, Ems#ems.bb_chunk),
- merge_rest_back_bone(Ems, Choose, NextBB, {NewBB, NewPrev}).
-
-
-merge_chains(Ems, Choose, BB) ->
- Chains = init_chains(Ems, Choose, BB),
- merge_chains(Ems, Choose, Chains, {[], nil}).
-
-
-merge_chains(Ems, _Choose, [], ChainAcc) ->
- {ok, CPos, _} = couch_file:append_term(Ems#ems.fd, ChainAcc),
- CPos;
-merge_chains(#ems{chain_chunk=CC}=Ems, Choose, Chains, Acc) ->
- {KV, RestChains} = choose_kv(Choose, Ems, Chains),
- {NewKVs, NewPrev} = append_item(Ems, Acc, KV, CC),
- merge_chains(Ems, Choose, RestChains, {NewKVs, NewPrev}).
-
-
-init_chains(Ems, Choose, BB) ->
- Chains = lists:map(fun(CPos) ->
- {ok, {KVs, NextKVs}} = couch_file:pread_term(Ems#ems.fd, CPos),
- {KVs, NextKVs}
- end, BB),
- order_chains(Choose, Chains).
-
-
-order_chains(small, Chains) -> lists:sort(Chains);
-order_chains(big, Chains) -> lists:reverse(lists:sort(Chains)).
-
-
-choose_kv(_Choose, _Ems, [{[KV], nil} | Rest]) ->
- {KV, Rest};
-choose_kv(Choose, Ems, [{[KV], Pos} | RestChains]) ->
- {ok, Chain} = couch_file:pread_term(Ems#ems.fd, Pos),
- case Choose of
- small -> {KV, ins_small_chain(RestChains, Chain, [])};
- big -> {KV, ins_big_chain(RestChains, Chain, [])}
- end;
-choose_kv(Choose, _Ems, [{[KV | RestKVs], Prev} | RestChains]) ->
- case Choose of
- small -> {KV, ins_small_chain(RestChains, {RestKVs, Prev}, [])};
- big -> {KV, ins_big_chain(RestChains, {RestKVs, Prev}, [])}
- end.
-
-
-ins_small_chain([{[{K1,_}|_],_}=C1|Rest], {[{K2,_}|_],_}=C2, Acc) when K1<K2 ->
- ins_small_chain(Rest, C2, [C1 | Acc]);
-ins_small_chain(Rest, Chain, Acc) ->
- lists:reverse(Acc, [Chain | Rest]).
-
-
-ins_big_chain([{[{K1,_}|_],_}=C1|Rest], {[{K2,_}|_],_}=C2, Acc) when K1>K2 ->
- ins_big_chain(Rest, C2, [C1 | Acc]);
-ins_big_chain(Rest, Chain, Acc) ->
- lists:reverse(Acc, [Chain | Rest]).
-
-
-append_item(Ems, {List, Prev}, Pos, Size) when length(List) >= Size ->
- {ok, PrevList, _} = couch_file:append_term(Ems#ems.fd, {List, Prev}),
- {[Pos], PrevList};
-append_item(_Ems, {List, Prev}, Pos, _Size) ->
- {[Pos | List], Prev}.
-
diff --git a/src/couch/src/couch_event_sup.erl b/src/couch/src/couch_event_sup.erl
deleted file mode 100644
index 32f1b9b68..000000000
--- a/src/couch/src/couch_event_sup.erl
+++ /dev/null
@@ -1,74 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% The purpose of this module is to allow event handlers to particpate in Erlang
-%% supervisor trees. It provide a monitorable process that crashes if the event
-%% handler fails. The process, when shutdown, deregisters the event handler.
-
--module(couch_event_sup).
--behaviour(gen_server).
--vsn(1).
-
--include_lib("couch/include/couch_db.hrl").
-
--export([start_link/3,start_link/4, stop/1]).
--export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3]).
-
-%
-% Instead calling the
-% ok = gen_event:add_sup_handler(error_logger, my_log, Args)
-%
-% do this:
-% {ok, LinkedPid} = couch_event_sup:start_link(error_logger, my_log, Args)
-%
-% The benefit is the event is now part of the process tree, and can be
-% started, restarted and shutdown consistently like the rest of the server
-% components.
-%
-% And now if the "event" crashes, the supervisor is notified and can restart
-% the event handler.
-%
-% Use this form to named process:
-% {ok, LinkedPid} = couch_event_sup:start_link({local, my_log}, error_logger, my_log, Args)
-%
-
-start_link(EventMgr, EventHandler, Args) ->
- gen_server:start_link(couch_event_sup, {EventMgr, EventHandler, Args}, []).
-
-start_link(ServerName, EventMgr, EventHandler, Args) ->
- gen_server:start_link(ServerName, couch_event_sup, {EventMgr, EventHandler, Args}, []).
-
-stop(Pid) ->
- gen_server:call(Pid, stop).
-
-init({EventMgr, EventHandler, Args}) ->
- case gen_event:add_sup_handler(EventMgr, EventHandler, Args) of
- ok ->
- {ok, {EventMgr, EventHandler}};
- {stop, Error} ->
- {stop, Error}
- end.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_call(stop, _From, State) ->
- {stop, normal, ok, State}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info({gen_event_EXIT, _Handler, Reason}, State) ->
- {stop, Reason, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/src/couch/src/couch_file.erl b/src/couch/src/couch_file.erl
deleted file mode 100644
index 6db23eaa3..000000000
--- a/src/couch/src/couch_file.erl
+++ /dev/null
@@ -1,804 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_file).
--behaviour(gen_server).
--vsn(2).
-
--include_lib("couch/include/couch_db.hrl").
-
-
--define(INITIAL_WAIT, 60000).
--define(MONITOR_CHECK, 10000).
--define(SIZE_BLOCK, 16#1000). % 4 KiB
--define(IS_OLD_STATE(S), is_pid(S#file.db_monitor)).
--define(PREFIX_SIZE, 5).
--define(DEFAULT_READ_COUNT, 1024).
-
--type block_id() :: non_neg_integer().
--type location() :: non_neg_integer().
--type header_size() :: non_neg_integer().
-
--record(file, {
- fd,
- is_sys,
- eof = 0,
- db_monitor,
- pread_limit = 0
-}).
-
-% public API
--export([open/1, open/2, close/1, bytes/1, sync/1, truncate/2, set_db_pid/2]).
--export([pread_term/2, pread_iolist/2, pread_binary/2]).
--export([append_binary/2, append_binary_md5/2]).
--export([append_raw_chunk/2, assemble_file_chunk/1, assemble_file_chunk/2]).
--export([append_term/2, append_term/3, append_term_md5/2, append_term_md5/3]).
--export([write_header/2, read_header/1]).
--export([delete/2, delete/3, nuke_dir/2, init_delete_dir/1]).
--export([last_read/1]).
-
-% gen_server callbacks
--export([init/1, terminate/2, code_change/3, format_status/2]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-%% helper functions
--export([process_info/1]).
-
-%%----------------------------------------------------------------------
-%% Args: Valid Options are [create] and [create,overwrite].
-%% Files are opened in read/write mode.
-%% Returns: On success, {ok, Fd}
-%% or {error, Reason} if the file could not be opened.
-%%----------------------------------------------------------------------
-
-open(Filepath) ->
- open(Filepath, []).
-
-open(Filepath, Options) ->
- case gen_server:start_link(couch_file,
- {Filepath, Options, self(), Ref = make_ref()}, []) of
- {ok, Fd} ->
- {ok, Fd};
- ignore ->
- % get the error
- receive
- {Ref, Pid, {error, Reason} = Error} ->
- case process_info(self(), trap_exit) of
- {trap_exit, true} -> receive {'EXIT', Pid, _} -> ok end;
- {trap_exit, false} -> ok
- end,
- case {lists:member(nologifmissing, Options), Reason} of
- {true, enoent} -> ok;
- _ ->
- couch_log:error("Could not open file ~s: ~s",
- [Filepath, file:format_error(Reason)])
- end,
- Error
- end;
- Error ->
- % We can't say much here, because it could be any kind of error.
- % Just let it bubble and an encapsulating subcomponent can perhaps
- % be more informative. It will likely appear in the SASL log, anyway.
- Error
- end.
-
-
-set_db_pid(Fd, Pid) ->
- gen_server:call(Fd, {set_db_pid, Pid}).
-
-
-%%----------------------------------------------------------------------
-%% Purpose: To append an Erlang term to the end of the file.
-%% Args: Erlang term to serialize and append to the file.
-%% Returns: {ok, Pos, NumBytesWritten} where Pos is the file offset to
-%% the beginning the serialized term. Use pread_term to read the term
-%% back.
-%% or {error, Reason}.
-%%----------------------------------------------------------------------
-
-append_term(Fd, Term) ->
- append_term(Fd, Term, []).
-
-append_term(Fd, Term, Options) ->
- Comp = couch_util:get_value(compression, Options, ?DEFAULT_COMPRESSION),
- append_binary(Fd, couch_compress:compress(Term, Comp)).
-
-append_term_md5(Fd, Term) ->
- append_term_md5(Fd, Term, []).
-
-append_term_md5(Fd, Term, Options) ->
- Comp = couch_util:get_value(compression, Options, ?DEFAULT_COMPRESSION),
- append_binary_md5(Fd, couch_compress:compress(Term, Comp)).
-
-%%----------------------------------------------------------------------
-%% Purpose: To append an Erlang binary to the end of the file.
-%% Args: Erlang term to serialize and append to the file.
-%% Returns: {ok, Pos, NumBytesWritten} where Pos is the file offset to the
-%% beginning the serialized term. Use pread_term to read the term back.
-%% or {error, Reason}.
-%%----------------------------------------------------------------------
-
-append_binary(Fd, Bin) ->
- ioq:call(Fd, {append_bin, assemble_file_chunk(Bin)}, erlang:get(io_priority)).
-
-append_binary_md5(Fd, Bin) ->
- ioq:call(Fd,
- {append_bin, assemble_file_chunk(Bin, couch_hash:md5_hash(Bin))},
- erlang:get(io_priority)).
-
-append_raw_chunk(Fd, Chunk) ->
- ioq:call(Fd, {append_bin, Chunk}, erlang:get(io_priority)).
-
-
-assemble_file_chunk(Bin) ->
- [<<0:1/integer, (iolist_size(Bin)):31/integer>>, Bin].
-
-assemble_file_chunk(Bin, Md5) ->
- [<<1:1/integer, (iolist_size(Bin)):31/integer>>, Md5, Bin].
-
-%%----------------------------------------------------------------------
-%% Purpose: Reads a term from a file that was written with append_term
-%% Args: Pos, the offset into the file where the term is serialized.
-%% Returns: {ok, Term}
-%% or {error, Reason}.
-%%----------------------------------------------------------------------
-
-
-pread_term(Fd, Pos) ->
- {ok, Bin} = pread_binary(Fd, Pos),
- {ok, couch_compress:decompress(Bin)}.
-
-
-%%----------------------------------------------------------------------
-%% Purpose: Reads a binrary from a file that was written with append_binary
-%% Args: Pos, the offset into the file where the term is serialized.
-%% Returns: {ok, Term}
-%% or {error, Reason}.
-%%----------------------------------------------------------------------
-
-pread_binary(Fd, Pos) ->
- {ok, L} = pread_iolist(Fd, Pos),
- {ok, iolist_to_binary(L)}.
-
-
-pread_iolist(Fd, Pos) ->
- case ioq:call(Fd, {pread_iolist, Pos}, erlang:get(io_priority)) of
- {ok, IoList, <<>>} ->
- {ok, IoList};
- {ok, IoList, Md5} ->
- case couch_hash:md5_hash(IoList) of
- Md5 ->
- {ok, IoList};
- _ ->
- couch_log:emergency("File corruption in ~p at position ~B",
- [Fd, Pos]),
- exit({file_corruption, <<"file corruption">>})
- end;
- Error ->
- Error
- end.
-
-%%----------------------------------------------------------------------
-%% Purpose: The length of a file, in bytes.
-%% Returns: {ok, Bytes}
-%% or {error, Reason}.
-%%----------------------------------------------------------------------
-
-% length in bytes
-bytes(Fd) ->
- gen_server:call(Fd, bytes, infinity).
-
-%%----------------------------------------------------------------------
-%% Purpose: Truncate a file to the number of bytes.
-%% Returns: ok
-%% or {error, Reason}.
-%%----------------------------------------------------------------------
-
-truncate(Fd, Pos) ->
- gen_server:call(Fd, {truncate, Pos}, infinity).
-
-%%----------------------------------------------------------------------
-%% Purpose: Ensure all bytes written to the file are flushed to disk.
-%% Returns: ok
-%% or {error, Reason}.
-%%----------------------------------------------------------------------
-
-sync(Filepath) when is_list(Filepath) ->
- case file:open(Filepath, [append, raw]) of
- {ok, Fd} ->
- try
- case file:sync(Fd) of
- ok ->
- ok;
- {error, Reason} ->
- erlang:error({fsync_error, Reason})
- end
- after
- ok = file:close(Fd)
- end;
- {error, Error} ->
- erlang:error(Error)
- end;
-sync(Fd) ->
- case gen_server:call(Fd, sync, infinity) of
- ok ->
- ok;
- {error, Reason} ->
- erlang:error({fsync_error, Reason})
- end.
-
-%%----------------------------------------------------------------------
-%% Purpose: Close the file.
-%% Returns: ok
-%%----------------------------------------------------------------------
-close(Fd) ->
- gen_server:call(Fd, close, infinity).
-
-
-delete(RootDir, Filepath) ->
- delete(RootDir, Filepath, []).
-
-delete(RootDir, FullFilePath, Options) ->
- EnableRecovery = config:get_boolean("couchdb",
- "enable_database_recovery", false),
- Async = not lists:member(sync, Options),
- Context = couch_util:get_value(context, Options, compaction),
- case Context =:= delete andalso EnableRecovery of
- true ->
- rename_file(FullFilePath);
- false ->
- DeleteAfterRename = config:get_boolean("couchdb",
- "delete_after_rename", true),
- delete_file(RootDir, FullFilePath, Async, DeleteAfterRename)
- end.
-
-delete_file(RootDir, Filepath, Async, DeleteAfterRename) ->
- DelFile = filename:join([RootDir,".delete", ?b2l(couch_uuids:random())]),
- case file:rename(Filepath, DelFile) of
- ok when DeleteAfterRename ->
- if (Async) ->
- spawn(file, delete, [DelFile]),
- ok;
- true ->
- file:delete(DelFile)
- end;
- Else ->
- Else
- end.
-
-rename_file(Original) ->
- DeletedFileName = deleted_filename(Original),
- Now = calendar:local_time(),
- case file:rename(Original, DeletedFileName) of
- ok -> file:change_time(DeletedFileName, Now);
- Else -> Else
- end.
-
-deleted_filename(Original) ->
- {{Y, Mon, D}, {H, Min, S}} = calendar:universal_time(),
- Suffix = lists:flatten(
- io_lib:format(".~w~2.10.0B~2.10.0B."
- ++ "~2.10.0B~2.10.0B~2.10.0B.deleted"
- ++ filename:extension(Original), [Y, Mon, D, H, Min, S])),
- filename:rootname(Original) ++ Suffix.
-
-nuke_dir(RootDelDir, Dir) ->
- EnableRecovery = config:get_boolean("couchdb",
- "enable_database_recovery", false),
- case EnableRecovery of
- true ->
- rename_file(Dir);
- false ->
- delete_dir(RootDelDir, Dir)
- end.
-
-delete_dir(RootDelDir, Dir) ->
- DeleteAfterRename = config:get_boolean("couchdb",
- "delete_after_rename", true),
- FoldFun = fun(File) ->
- Path = Dir ++ "/" ++ File,
- case filelib:is_dir(Path) of
- true ->
- ok = nuke_dir(RootDelDir, Path),
- file:del_dir(Path);
- false ->
- delete_file(RootDelDir, Path, false, DeleteAfterRename)
- end
- end,
- case file:list_dir(Dir) of
- {ok, Files} ->
- lists:foreach(FoldFun, Files),
- ok = file:del_dir(Dir);
- {error, enoent} ->
- ok
- end.
-
-
-init_delete_dir(RootDir) ->
- Dir = filename:join(RootDir,".delete"),
- % note: ensure_dir requires an actual filename companent, which is the
- % reason for "foo".
- filelib:ensure_dir(filename:join(Dir,"foo")),
- spawn(fun() ->
- filelib:fold_files(Dir, ".*", true,
- fun(Filename, _) ->
- ok = file:delete(Filename)
- end, ok)
- end),
- ok.
-
-
-read_header(Fd) ->
- case ioq:call(Fd, find_header, erlang:get(io_priority)) of
- {ok, Bin} ->
- {ok, binary_to_term(Bin)};
- Else ->
- Else
- end.
-
-write_header(Fd, Data) ->
- Bin = term_to_binary(Data),
- Md5 = couch_hash:md5_hash(Bin),
- % now we assemble the final header binary and write to disk
- FinalBin = <<Md5/binary, Bin/binary>>,
- ioq:call(Fd, {write_header, FinalBin}, erlang:get(io_priority)).
-
-
-init_status_error(ReturnPid, Ref, Error) ->
- ReturnPid ! {Ref, self(), Error},
- ignore.
-
-
-last_read(Fd) when is_pid(Fd) ->
- Now = os:timestamp(),
- couch_util:process_dict_get(Fd, read_timestamp, Now).
-
-
-% server functions
-
-init({Filepath, Options, ReturnPid, Ref}) ->
- OpenOptions = file_open_options(Options),
- Limit = get_pread_limit(),
- IsSys = lists:member(sys_db, Options),
- update_read_timestamp(),
- case lists:member(create, Options) of
- true ->
- filelib:ensure_dir(Filepath),
- case file:open(Filepath, OpenOptions) of
- {ok, Fd} ->
- %% Save Fd in process dictionary for debugging purposes
- put(couch_file_fd, {Fd, Filepath}),
- {ok, Length} = file:position(Fd, eof),
- case Length > 0 of
- true ->
- % this means the file already exists and has data.
- % FYI: We don't differentiate between empty files and non-existant
- % files here.
- case lists:member(overwrite, Options) of
- true ->
- {ok, 0} = file:position(Fd, 0),
- ok = file:truncate(Fd),
- ok = file:sync(Fd),
- maybe_track_open_os_files(Options),
- erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
- {ok, #file{fd=Fd, is_sys=IsSys, pread_limit=Limit}};
- false ->
- ok = file:close(Fd),
- init_status_error(ReturnPid, Ref, {error, eexist})
- end;
- false ->
- maybe_track_open_os_files(Options),
- erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
- {ok, #file{fd=Fd, is_sys=IsSys, pread_limit=Limit}}
- end;
- Error ->
- init_status_error(ReturnPid, Ref, Error)
- end;
- false ->
- % open in read mode first, so we don't create the file if it doesn't exist.
- case file:open(Filepath, [read, raw]) of
- {ok, Fd_Read} ->
- case file:open(Filepath, OpenOptions) of
- {ok, Fd} ->
- %% Save Fd in process dictionary for debugging purposes
- put(couch_file_fd, {Fd, Filepath}),
- ok = file:close(Fd_Read),
- maybe_track_open_os_files(Options),
- {ok, Eof} = file:position(Fd, eof),
- erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
- {ok, #file{fd=Fd, eof=Eof, is_sys=IsSys, pread_limit=Limit}};
- Error ->
- init_status_error(ReturnPid, Ref, Error)
- end;
- Error ->
- init_status_error(ReturnPid, Ref, Error)
- end
- end.
-
-file_open_options(Options) ->
- [read, raw, binary] ++ case lists:member(read_only, Options) of
- true ->
- [];
- false ->
- [append]
- end.
-
-maybe_track_open_os_files(Options) ->
- case not lists:member(sys_db, Options) of
- true ->
- couch_stats_process_tracker:track([couchdb, open_os_files]);
- false ->
- ok
- end.
-
-terminate(_Reason, #file{fd = nil}) ->
- ok;
-terminate(_Reason, #file{fd = Fd}) ->
- ok = file:close(Fd).
-
-handle_call(Msg, From, File) when ?IS_OLD_STATE(File) ->
- handle_call(Msg, From, upgrade_state(File));
-
-handle_call(close, _From, #file{fd=Fd}=File) ->
- {stop, normal, file:close(Fd), File#file{fd = nil}};
-
-handle_call({pread_iolist, Pos}, _From, File) ->
- update_read_timestamp(),
- {LenIolist, NextPos} = read_raw_iolist_int(File, Pos, 4),
- case iolist_to_binary(LenIolist) of
- <<1:1/integer,Len:31/integer>> -> % an MD5-prefixed term
- {Md5AndIoList, _} = read_raw_iolist_int(File, NextPos, Len+16),
- {Md5, IoList} = extract_md5(Md5AndIoList),
- {reply, {ok, IoList, Md5}, File};
- <<0:1/integer,Len:31/integer>> ->
- {Iolist, _} = read_raw_iolist_int(File, NextPos, Len),
- {reply, {ok, Iolist, <<>>}, File}
- end;
-
-handle_call(bytes, _From, #file{fd = Fd} = File) ->
- {reply, file:position(Fd, eof), File};
-
-handle_call({set_db_pid, Pid}, _From, #file{db_monitor=OldRef}=File) ->
- case is_reference(OldRef) of
- true -> demonitor(OldRef, [flush]);
- false -> ok
- end,
- Ref = monitor(process, Pid),
- {reply, ok, File#file{db_monitor=Ref}};
-
-handle_call(sync, _From, #file{fd=Fd}=File) ->
- case file:sync(Fd) of
- ok ->
- {reply, ok, File};
- {error, _} = Error ->
- % We're intentionally dropping all knowledge
- % of this Fd so that we don't accidentally
- % recover in some whacky edge case that I
- % can't fathom.
- {stop, Error, Error, #file{fd = nil}}
- end;
-
-handle_call({truncate, Pos}, _From, #file{fd=Fd}=File) ->
- {ok, Pos} = file:position(Fd, Pos),
- case file:truncate(Fd) of
- ok ->
- {reply, ok, File#file{eof = Pos}};
- Error ->
- {reply, Error, File}
- end;
-
-handle_call({append_bin, Bin}, _From, #file{fd = Fd, eof = Pos} = File) ->
- Blocks = make_blocks(Pos rem ?SIZE_BLOCK, Bin),
- Size = iolist_size(Blocks),
- case file:write(Fd, Blocks) of
- ok ->
- {reply, {ok, Pos, Size}, File#file{eof = Pos + Size}};
- Error ->
- {reply, Error, reset_eof(File)}
- end;
-
-handle_call({write_header, Bin}, _From, #file{fd = Fd, eof = Pos} = File) ->
- BinSize = byte_size(Bin),
- case Pos rem ?SIZE_BLOCK of
- 0 ->
- Padding = <<>>;
- BlockOffset ->
- Padding = <<0:(8*(?SIZE_BLOCK-BlockOffset))>>
- end,
- FinalBin = [Padding, <<1, BinSize:32/integer>> | make_blocks(5, [Bin])],
- case file:write(Fd, FinalBin) of
- ok ->
- {reply, ok, File#file{eof = Pos + iolist_size(FinalBin)}};
- Error ->
- {reply, Error, reset_eof(File)}
- end;
-
-handle_call(find_header, _From, #file{fd = Fd, eof = Pos} = File) ->
- {reply, find_header(Fd, Pos div ?SIZE_BLOCK), File}.
-
-handle_cast(close, Fd) ->
- {stop,normal,Fd}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-handle_info(Msg, File) when ?IS_OLD_STATE(File) ->
- handle_info(Msg, upgrade_state(File));
-
-handle_info(maybe_close, File) ->
- case is_idle(File) of
- true ->
- {stop, normal, File};
- false ->
- erlang:send_after(?MONITOR_CHECK, self(), maybe_close),
- {noreply, File}
- end;
-
-handle_info({'DOWN', Ref, process, _Pid, _Info}, #file{db_monitor=Ref}=File) ->
- case is_idle(File) of
- true -> {stop, normal, File};
- false -> {noreply, File}
- end.
-
-format_status(_Opt, [PDict, #file{} = File]) ->
- {_Fd, FilePath} = couch_util:get_value(couch_file_fd, PDict),
- [{data, [{"State", File}, {"InitialFilePath", FilePath}]}].
-
-find_header(Fd, Block) ->
- case (catch load_header(Fd, Block)) of
- {ok, Bin} ->
- {ok, Bin};
- _Error ->
- ReadCount = config:get_integer(
- "couchdb", "find_header_read_count", ?DEFAULT_READ_COUNT),
- find_header(Fd, Block -1, ReadCount)
- end.
-
-load_header(Fd, Block) ->
- {ok, <<1, HeaderLen:32/integer, RestBlock/binary>>} =
- file:pread(Fd, Block * ?SIZE_BLOCK, ?SIZE_BLOCK),
- load_header(Fd, Block * ?SIZE_BLOCK, HeaderLen, RestBlock).
-
-load_header(Fd, Pos, HeaderLen) ->
- load_header(Fd, Pos, HeaderLen, <<>>).
-
-load_header(Fd, Pos, HeaderLen, RestBlock) ->
- TotalBytes = calculate_total_read_len(?PREFIX_SIZE, HeaderLen),
- RawBin = case TotalBytes =< byte_size(RestBlock) of
- true ->
- <<RawBin0:TotalBytes/binary, _/binary>> = RestBlock,
- RawBin0;
- false ->
- ReadStart = Pos + ?PREFIX_SIZE + byte_size(RestBlock),
- ReadLen = TotalBytes - byte_size(RestBlock),
- {ok, Missing} = file:pread(Fd, ReadStart, ReadLen),
- <<RestBlock/binary, Missing/binary>>
- end,
- <<Md5Sig:16/binary, HeaderBin/binary>> =
- iolist_to_binary(remove_block_prefixes(?PREFIX_SIZE, RawBin)),
- Md5Sig = couch_hash:md5_hash(HeaderBin),
- {ok, HeaderBin}.
-
-
-%% Read multiple block locations using a single file:pread/2.
--spec find_header(file:fd(), block_id(), non_neg_integer()) ->
- {ok, binary()} | no_valid_header.
-find_header(_Fd, Block, _ReadCount) when Block < 0 ->
- no_valid_header;
-find_header(Fd, Block, ReadCount) ->
- FirstBlock = max(0, Block - ReadCount + 1),
- BlockLocations = [?SIZE_BLOCK*B || B <- lists:seq(FirstBlock, Block)],
- {ok, DataL} = file:pread(Fd, [{L, ?PREFIX_SIZE} || L <- BlockLocations]),
- %% Since BlockLocations are ordered from oldest to newest, we rely
- %% on lists:foldl/3 to reverse the order, making HeaderLocations
- %% correctly ordered from newest to oldest.
- HeaderLocations = lists:foldl(fun
- ({Loc, <<1, HeaderSize:32/integer>>}, Acc) ->
- [{Loc, HeaderSize} | Acc];
- (_, Acc) ->
- Acc
- end, [], lists:zip(BlockLocations, DataL)),
- case find_newest_header(Fd, HeaderLocations) of
- {ok, _Location, HeaderBin} ->
- {ok, HeaderBin};
- _ ->
- ok = file:advise(
- Fd, hd(BlockLocations), ReadCount * ?SIZE_BLOCK, dont_need),
- NextBlock = hd(BlockLocations) div ?SIZE_BLOCK - 1,
- find_header(Fd, NextBlock, ReadCount)
- end.
-
--spec find_newest_header(file:fd(), [{location(), header_size()}]) ->
- {ok, location(), binary()} | not_found.
-find_newest_header(_Fd, []) ->
- not_found;
-find_newest_header(Fd, [{Location, Size} | LocationSizes]) ->
- case (catch load_header(Fd, Location, Size)) of
- {ok, HeaderBin} ->
- {ok, Location, HeaderBin};
- _Error ->
- find_newest_header(Fd, LocationSizes)
- end.
-
-
--spec read_raw_iolist_int(#file{}, Pos::non_neg_integer(), Len::non_neg_integer()) ->
- {Data::iolist(), CurPos::non_neg_integer()}.
-read_raw_iolist_int(Fd, {Pos, _Size}, Len) -> % 0110 UPGRADE CODE
- read_raw_iolist_int(Fd, Pos, Len);
-read_raw_iolist_int(#file{fd = Fd, pread_limit = Limit} = F, Pos, Len) ->
- BlockOffset = Pos rem ?SIZE_BLOCK,
- TotalBytes = calculate_total_read_len(BlockOffset, Len),
- if
- (Pos + TotalBytes) > F#file.eof ->
- couch_stats:increment_counter([pread, exceed_eof]),
- {_Fd, Filepath} = get(couch_file_fd),
- throw({read_beyond_eof, Filepath});
- TotalBytes > Limit ->
- couch_stats:increment_counter([pread, exceed_limit]),
- {_Fd, Filepath} = get(couch_file_fd),
- throw({exceed_pread_limit, Filepath, Limit});
- true ->
- {ok, <<RawBin:TotalBytes/binary>>} = file:pread(Fd, Pos, TotalBytes),
- {remove_block_prefixes(BlockOffset, RawBin), Pos + TotalBytes}
- end.
-
--spec extract_md5(iolist()) -> {binary(), iolist()}.
-extract_md5(FullIoList) ->
- {Md5List, IoList} = split_iolist(FullIoList, 16, []),
- {iolist_to_binary(Md5List), IoList}.
-
-calculate_total_read_len(0, FinalLen) ->
- calculate_total_read_len(1, FinalLen) + 1;
-calculate_total_read_len(BlockOffset, FinalLen) ->
- case ?SIZE_BLOCK - BlockOffset of
- BlockLeft when BlockLeft >= FinalLen ->
- FinalLen;
- BlockLeft ->
- FinalLen + ((FinalLen - BlockLeft) div (?SIZE_BLOCK -1)) +
- if ((FinalLen - BlockLeft) rem (?SIZE_BLOCK -1)) =:= 0 -> 0;
- true -> 1 end
- end.
-
-remove_block_prefixes(_BlockOffset, <<>>) ->
- [];
-remove_block_prefixes(0, <<_BlockPrefix,Rest/binary>>) ->
- remove_block_prefixes(1, Rest);
-remove_block_prefixes(BlockOffset, Bin) ->
- BlockBytesAvailable = ?SIZE_BLOCK - BlockOffset,
- case size(Bin) of
- Size when Size > BlockBytesAvailable ->
- <<DataBlock:BlockBytesAvailable/binary,Rest/binary>> = Bin,
- [DataBlock | remove_block_prefixes(0, Rest)];
- _Size ->
- [Bin]
- end.
-
-make_blocks(_BlockOffset, []) ->
- [];
-make_blocks(0, IoList) ->
- [<<0>> | make_blocks(1, IoList)];
-make_blocks(BlockOffset, IoList) ->
- case split_iolist(IoList, (?SIZE_BLOCK - BlockOffset), []) of
- {Begin, End} ->
- [Begin | make_blocks(0, End)];
- _SplitRemaining ->
- IoList
- end.
-
-%% @doc Returns a tuple where the first element contains the leading SplitAt
-%% bytes of the original iolist, and the 2nd element is the tail. If SplitAt
-%% is larger than byte_size(IoList), return the difference.
--spec split_iolist(IoList::iolist(), SplitAt::non_neg_integer(), Acc::list()) ->
- {iolist(), iolist()} | non_neg_integer().
-split_iolist(List, 0, BeginAcc) ->
- {lists:reverse(BeginAcc), List};
-split_iolist([], SplitAt, _BeginAcc) ->
- SplitAt;
-split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) when SplitAt > byte_size(Bin) ->
- split_iolist(Rest, SplitAt - byte_size(Bin), [Bin | BeginAcc]);
-split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) ->
- <<Begin:SplitAt/binary,End/binary>> = Bin,
- split_iolist([End | Rest], 0, [Begin | BeginAcc]);
-split_iolist([Sublist| Rest], SplitAt, BeginAcc) when is_list(Sublist) ->
- case split_iolist(Sublist, SplitAt, BeginAcc) of
- {Begin, End} ->
- {Begin, [End | Rest]};
- SplitRemaining ->
- split_iolist(Rest, SplitAt - (SplitAt - SplitRemaining), [Sublist | BeginAcc])
- end;
-split_iolist([Byte | Rest], SplitAt, BeginAcc) when is_integer(Byte) ->
- split_iolist(Rest, SplitAt - 1, [Byte | BeginAcc]).
-
-monitored_by_pids() ->
- {monitored_by, PidsAndRefs} = process_info(self(), monitored_by),
- lists:filter(fun is_pid/1, PidsAndRefs).
-
-% System dbs aren't monitored by couch_stats_process_tracker
-is_idle(#file{is_sys=true}) ->
- case monitored_by_pids() of
- [] -> true;
- _ -> false
- end;
-is_idle(#file{is_sys=false}) ->
- Tracker = whereis(couch_stats_process_tracker),
- case monitored_by_pids() of
- [] -> true;
- [Tracker] -> true;
- [_] -> exit(tracker_monitoring_failed);
- _ -> false
- end.
-
--spec process_info(CouchFilePid :: pid()) ->
- {Fd :: pid() | tuple(), FilePath :: string()} | undefined.
-
-process_info(Pid) ->
- couch_util:process_dict_get(Pid, couch_file_fd).
-
-update_read_timestamp() ->
- put(read_timestamp, os:timestamp()).
-
-upgrade_state(#file{db_monitor=DbPid}=File) when is_pid(DbPid) ->
- unlink(DbPid),
- Ref = monitor(process, DbPid),
- File#file{db_monitor=Ref};
-upgrade_state(State) ->
- State.
-
-get_pread_limit() ->
- case config:get_integer("couchdb", "max_pread_size", 0) of
- N when N > 0 -> N;
- _ -> infinity
- end.
-
-%% in event of a partially successful write.
-reset_eof(#file{} = File) ->
- {ok, Eof} = file:position(File#file.fd, eof),
- File#file{eof = Eof}.
-
--ifdef(TEST).
--include_lib("couch/include/couch_eunit.hrl").
-
-deleted_filename_test_() ->
- DbNames = ["dbname", "db.name", "user/dbname"],
- Fixtures = make_filename_fixtures(DbNames),
- lists:map(fun(Fixture) ->
- should_create_proper_deleted_filename(Fixture)
- end, Fixtures).
-
-should_create_proper_deleted_filename(Before) ->
- {Before,
- ?_test(begin
- BeforeExtension = filename:extension(Before),
- BeforeBasename = filename:basename(Before, BeforeExtension),
- Re = "^" ++ BeforeBasename ++ "\.[0-9]{8}\.[0-9]{6}\.deleted\..*$",
- After = deleted_filename(Before),
- ?assertEqual(match,
- re:run(filename:basename(After), Re, [{capture, none}])),
- ?assertEqual(BeforeExtension, filename:extension(After))
- end)}.
-
-make_filename_fixtures(DbNames) ->
- Formats = [
- "~s.couch",
- ".~s_design/mrview/3133e28517e89a3e11435dd5ac4ad85a.view",
- "shards/00000000-1fffffff/~s.1458336317.couch",
- ".shards/00000000-1fffffff/~s.1458336317_design",
- ".shards/00000000-1fffffff/~s.1458336317_design"
- "/mrview/3133e28517e89a3e11435dd5ac4ad85a.view"
- ],
- lists:flatmap(fun(DbName) ->
- lists:map(fun(Format) ->
- filename:join("/srv/data", io_lib:format(Format, [DbName]))
- end, Formats)
- end, DbNames).
-
--endif.
diff --git a/src/couch/src/couch_flags.erl b/src/couch/src/couch_flags.erl
index 5cfe7f6d1..5bd133e29 100644
--- a/src/couch/src/couch_flags.erl
+++ b/src/couch/src/couch_flags.erl
@@ -61,14 +61,10 @@
]).
-include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
--include("couch_db_int.hrl").
-type subject()
- :: #db{}
+ :: map()
| #httpd{}
- | #shard{}
- | #ordered_shard{}
| string()
| binary().
@@ -80,7 +76,7 @@ enabled(Subject) ->
Key = maybe_handle(subject_key, [Subject], fun subject_key/1),
Handle = couch_epi:get_handle({flags, config}),
lists:usort(enabled(Handle, {<<"/", Key/binary>>})
- ++ enabled(Handle, {couch_db:normalize_dbname(Key)})).
+ ++ enabled(Handle, {Key})).
-spec is_enabled(FlagId :: atom(), subject()) -> boolean().
@@ -104,16 +100,12 @@ enabled(Handle, Key) ->
-spec subject_key(subject()) -> binary().
-subject_key(#db{name = Name}) ->
- subject_key(Name);
+subject_key(#{} = Db) ->
+ subject_key(fabric2_db:name(Db));
subject_key(#httpd{path_parts=[Name | _Rest]}) ->
subject_key(Name);
subject_key(#httpd{path_parts=[]}) ->
<<>>;
-subject_key(#shard{name = Name}) ->
- subject_key(Name);
-subject_key(#ordered_shard{name = Name}) ->
- subject_key(Name);
subject_key(Name) when is_list(Name) ->
subject_key(list_to_binary(Name));
subject_key(Name) when is_binary(Name) ->
diff --git a/src/couch/src/couch_flags_config.erl b/src/couch/src/couch_flags_config.erl
index 104a48257..883fe38e8 100644
--- a/src/couch/src/couch_flags_config.erl
+++ b/src/couch/src/couch_flags_config.erl
@@ -13,6 +13,8 @@
% This module implements {flags, config} data provider
-module(couch_flags_config).
+-include_lib("kernel/include/logger.hrl").
+
-export([
enable/2,
data/0,
@@ -80,50 +82,53 @@ data(Config) ->
-spec parse_rules([{Key :: string(), Value :: string()}]) -> [rule()].
parse_rules(Config) ->
- lists:filtermap(fun({K, V}) ->
- case parse_rule(K, V) of
- {error, {Format, Args}} ->
- couch_log:error(Format, Args),
- false;
- Rule ->
- {true, Rule}
- end
- end, Config).
-
--spec parse_rule(Key :: string(), Value :: string()) ->
- rule()
- | {error, Reason :: term()}.
-
-parse_rule(Key, "true") ->
- parse_flags(binary:split(list_to_binary(Key), <<"||">>), true);
-parse_rule(Key, "false") ->
- parse_flags(binary:split(list_to_binary(Key), <<"||">>), false);
+ lists:filtermap(fun({K, V}) -> parse_rule(K, V) end, Config).
+
+-spec parse_rule(Key :: string(), Value :: string()) -> {true, rule()} | false.
+
+parse_rule(Key, Value) when Value =:= "true" orelse Value =:= "false" ->
+ case binary:split(list_to_binary(Key), <<"||">>) of
+ [FlagsBin, PatternBin] ->
+ parse_flags([FlagsBin, PatternBin], list_to_atom(Value));
+ _ ->
+ ?LOG_ERROR(#{
+ what => invalid_flag_setting,
+ key => Key,
+ value => Value,
+ details => "key must be in the form of `[flags]||pattern`"
+ }),
+ false
+ end;
parse_rule(Key, Value) ->
- Reason = {
- "Expected value for the `~p` either `true` or `false`, (got ~p)",
- [Key, Value]
- },
- {error, Reason}.
+ ?LOG_ERROR(#{
+ what => invalid_flag_setting,
+ key => Key,
+ value => Value,
+ details => "value must be a boolean"
+ }),
+ false.
--spec parse_flags([binary()], Value :: boolean()) ->
- rule() | {error, Reason :: term()}.
+-spec parse_flags([binary()], Value :: boolean()) -> {true, rule()} | false.
parse_flags([FlagsBin, PatternBin], Value) ->
case {parse_flags_term(FlagsBin), Value} of
- {{error, _} = Error, _} ->
- Error;
+ {{error, Errors}, _} ->
+ lists:foreach(fun(Error) ->
+ ?LOG_ERROR(#{
+ what => invalid_flag_setting,
+ flags => FlagsBin,
+ error => Error
+ })
+ end, Errors),
+ false;
{Flags, true} ->
- {parse_pattern(PatternBin), Flags, []};
+ {true, {parse_pattern(PatternBin), Flags, []}};
{Flags, false} ->
- {parse_pattern(PatternBin), [], Flags}
- end;
-parse_flags(_Tokens, _) ->
- couch_log:error(
- "Key should be in the form of `[flags]||pattern` (got ~s)", []),
- false.
+ {true, {parse_pattern(PatternBin), [], Flags}}
+ end.
-spec parse_flags_term(Flags :: binary()) ->
- [flag_id()] | {error, Reason :: term()}.
+ [flag_id()] | {error, Failures :: [term()]}.
parse_flags_term(FlagsBin) ->
{Flags, Errors} = lists:splitwith(fun erlang:is_atom/1,
@@ -132,10 +137,7 @@ parse_flags_term(FlagsBin) ->
[] ->
lists:usort(Flags);
_ ->
- {error, {
- "Cannot parse list of tags: ~n~p",
- Errors
- }}
+ {error, Errors}
end.
split_by_comma(Binary) ->
diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index 8f7fedd5e..fd83c258a 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -16,8 +16,6 @@
-include_lib("couch/include/couch_db.hrl").
--export([start_link/0, start_link/1, stop/0, handle_request/5]).
-
-export([header_value/2,header_value/3,qs_value/2,qs_value/3,qs/1,qs_json_value/3]).
-export([path/1,absolute_uri/2,body_length/1]).
-export([verify_is_server_admin/1,unquote/1,quote/1,recv/2,recv_chunked/4,error_info/1]).
@@ -32,163 +30,17 @@
-export([send_response/4,send_response_no_cors/4,send_method_not_allowed/2,
send_error/2,send_error/4, send_redirect/2,send_chunked_error/2]).
-export([send_json/2,send_json/3,send_json/4,last_chunk/1,parse_multipart_request/3]).
--export([accepted_encodings/1,handle_request_int/5,validate_referer/1,validate_ctype/2]).
+-export([accepted_encodings/1,validate_referer/1,validate_ctype/2]).
-export([http_1_0_keep_alive/2]).
-export([validate_host/1]).
-export([validate_bind_address/1]).
-export([check_max_request_length/1]).
--export([handle_request/1]).
--export([set_auth_handlers/0]).
+-export([maybe_decompress/2]).
-define(HANDLER_NAME_IN_MODULE_POS, 6).
-define(MAX_DRAIN_BYTES, 1048576).
-define(MAX_DRAIN_TIME_MSEC, 1000).
-start_link() ->
- start_link(http).
-start_link(http) ->
- Port = config:get("httpd", "port", "5984"),
- start_link(?MODULE, [{port, Port}]);
-start_link(https) ->
- Port = config:get("ssl", "port", "6984"),
- {ok, Ciphers} = couch_util:parse_term(config:get("ssl", "ciphers", undefined)),
- {ok, Versions} = couch_util:parse_term(config:get("ssl", "tls_versions", undefined)),
- {ok, SecureRenegotiate} = couch_util:parse_term(config:get("ssl", "secure_renegotiate", undefined)),
- ServerOpts0 =
- [{cacertfile, config:get("ssl", "cacert_file", undefined)},
- {keyfile, config:get("ssl", "key_file", undefined)},
- {certfile, config:get("ssl", "cert_file", undefined)},
- {password, config:get("ssl", "password", undefined)},
- {secure_renegotiate, SecureRenegotiate},
- {versions, Versions},
- {ciphers, Ciphers}],
-
- case (couch_util:get_value(keyfile, ServerOpts0) == undefined orelse
- couch_util:get_value(certfile, ServerOpts0) == undefined) of
- true ->
- couch_log:error("SSL enabled but PEM certificates are missing", []),
- throw({error, missing_certs});
- false ->
- ok
- end,
-
- ServerOpts = [Opt || {_, V}=Opt <- ServerOpts0, V /= undefined],
-
- ClientOpts = case config:get("ssl", "verify_ssl_certificates", "false") of
- "false" ->
- [];
- "true" ->
- FailIfNoPeerCert = case config:get("ssl", "fail_if_no_peer_cert", "false") of
- "false" -> false;
- "true" -> true
- end,
- [{depth, list_to_integer(config:get("ssl",
- "ssl_certificate_max_depth", "1"))},
- {fail_if_no_peer_cert, FailIfNoPeerCert},
- {verify, verify_peer}] ++
- case config:get("ssl", "verify_fun", undefined) of
- undefined -> [];
- SpecStr ->
- [{verify_fun, make_arity_3_fun(SpecStr)}]
- end
- end,
- SslOpts = ServerOpts ++ ClientOpts,
-
- Options =
- [{port, Port},
- {ssl, true},
- {ssl_opts, SslOpts}],
- start_link(https, Options).
-start_link(Name, Options) ->
- BindAddress = case config:get("httpd", "bind_address", "any") of
- "any" -> any;
- Else -> Else
- end,
- ok = validate_bind_address(BindAddress),
-
- {ok, ServerOptions} = couch_util:parse_term(
- config:get("httpd", "server_options", "[]")),
- {ok, SocketOptions} = couch_util:parse_term(
- config:get("httpd", "socket_options", "[]")),
-
- set_auth_handlers(),
- Handlers = get_httpd_handlers(),
-
- % ensure uuid is set so that concurrent replications
- % get the same value.
- couch_server:get_uuid(),
-
- Loop = fun(Req)->
- case SocketOptions of
- [] ->
- ok;
- _ ->
- ok = mochiweb_socket:setopts(Req:get(socket), SocketOptions)
- end,
- apply(?MODULE, handle_request, [Req | Handlers])
- end,
-
- % set mochiweb options
- FinalOptions = lists:append([Options, ServerOptions, [
- {loop, Loop},
- {name, Name},
- {ip, BindAddress}]]),
-
- % launch mochiweb
- case mochiweb_http:start(FinalOptions) of
- {ok, MochiPid} ->
- {ok, MochiPid};
- {error, Reason} ->
- couch_log:error("Failure to start Mochiweb: ~s~n", [Reason]),
- throw({error, Reason})
- end.
-
-
-stop() ->
- mochiweb_http:stop(couch_httpd),
- catch mochiweb_http:stop(https).
-
-
-set_auth_handlers() ->
- AuthenticationSrcs = make_fun_spec_strs(
- config:get("httpd", "authentication_handlers", "")),
- AuthHandlers = lists:map(
- fun(A) -> {auth_handler_name(A), make_arity_1_fun(A)} end, AuthenticationSrcs),
- AuthenticationFuns = AuthHandlers ++ [
- fun couch_httpd_auth:party_mode_handler/1 %% must be last
- ],
- ok = application:set_env(couch, auth_handlers, AuthenticationFuns).
-
-auth_handler_name(SpecStr) ->
- lists:nth(?HANDLER_NAME_IN_MODULE_POS, re:split(SpecStr, "[\\W_]", [])).
-
-get_httpd_handlers() ->
- {ok, HttpdGlobalHandlers} = application:get_env(couch, httpd_global_handlers),
-
- UrlHandlersList = lists:map(
- fun({UrlKey, SpecStr}) ->
- {?l2b(UrlKey), make_arity_1_fun(SpecStr)}
- end, HttpdGlobalHandlers),
-
- {ok, HttpdDbHandlers} = application:get_env(couch, httpd_db_handlers),
-
- DbUrlHandlersList = lists:map(
- fun({UrlKey, SpecStr}) ->
- {?l2b(UrlKey), make_arity_2_fun(SpecStr)}
- end, HttpdDbHandlers),
-
- {ok, HttpdDesignHandlers} = application:get_env(couch, httpd_design_handlers),
-
- DesignUrlHandlersList = lists:map(
- fun({UrlKey, SpecStr}) ->
- {?l2b(UrlKey), make_arity_3_fun(SpecStr)}
- end, HttpdDesignHandlers),
-
- UrlHandlers = dict:from_list(UrlHandlersList),
- DbUrlHandlers = dict:from_list(DbUrlHandlersList),
- DesignUrlHandlers = dict:from_list(DesignUrlHandlersList),
- DefaultFun = make_arity_1_fun("{couch_httpd_db, handle_request}"),
- [DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers].
% SpecStr is a string like "{my_module, my_fun}"
% or "{my_module, my_fun, <<"my_arg">>}"
@@ -220,175 +72,6 @@ make_arity_3_fun(SpecStr) ->
make_fun_spec_strs(SpecStr) ->
re:split(SpecStr, "(?<=})\\s*,\\s*(?={)", [{return, list}]).
-handle_request(MochiReq) ->
- Body = proplists:get_value(body, MochiReq:get(opts)),
- erlang:put(mochiweb_request_body, Body),
- apply(?MODULE, handle_request, [MochiReq | get_httpd_handlers()]).
-
-handle_request(MochiReq, DefaultFun, UrlHandlers, DbUrlHandlers,
- DesignUrlHandlers) ->
- %% reset rewrite count for new request
- erlang:put(?REWRITE_COUNT, 0),
-
- MochiReq1 = couch_httpd_vhost:dispatch_host(MochiReq),
-
- handle_request_int(MochiReq1, DefaultFun,
- UrlHandlers, DbUrlHandlers, DesignUrlHandlers).
-
-handle_request_int(MochiReq, DefaultFun,
- UrlHandlers, DbUrlHandlers, DesignUrlHandlers) ->
- Begin = os:timestamp(),
- % for the path, use the raw path with the query string and fragment
- % removed, but URL quoting left intact
- RawUri = MochiReq:get(raw_path),
- {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
-
- % get requested path
- RequestedPath = case MochiReq:get_header_value("x-couchdb-vhost-path") of
- undefined ->
- case MochiReq:get_header_value("x-couchdb-requested-path") of
- undefined -> RawUri;
- R -> R
- end;
- P -> P
- end,
-
- HandlerKey =
- case mochiweb_util:partition(Path, "/") of
- {"", "", ""} ->
- <<"/">>; % Special case the root url handler
- {FirstPart, _, _} ->
- list_to_binary(FirstPart)
- end,
- couch_log:debug("~p ~s ~p from ~p~nHeaders: ~p", [
- MochiReq:get(method),
- RawUri,
- MochiReq:get(version),
- peer(MochiReq),
- mochiweb_headers:to_list(MochiReq:get(headers))
- ]),
-
- Method1 =
- case MochiReq:get(method) of
- % already an atom
- Meth when is_atom(Meth) -> Meth;
-
- % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
- % possible (if any module references the atom, then it's existing).
- Meth -> couch_util:to_existing_atom(Meth)
- end,
- increment_method_stats(Method1),
-
- % allow broken HTTP clients to fake a full method vocabulary with an X-HTTP-METHOD-OVERRIDE header
- MethodOverride = MochiReq:get_primary_header_value("X-HTTP-Method-Override"),
- Method2 = case lists:member(MethodOverride, ["GET", "HEAD", "POST",
- "PUT", "DELETE",
- "TRACE", "CONNECT",
- "COPY"]) of
- true ->
- couch_log:info("MethodOverride: ~s (real method was ~s)",
- [MethodOverride, Method1]),
- case Method1 of
- 'POST' -> couch_util:to_existing_atom(MethodOverride);
- _ ->
- % Ignore X-HTTP-Method-Override when the original verb isn't POST.
- % I'd like to send a 406 error to the client, but that'd require a nasty refactor.
- % throw({not_acceptable, <<"X-HTTP-Method-Override may only be used with POST requests.">>})
- Method1
- end;
- _ -> Method1
- end,
-
- % alias HEAD to GET as mochiweb takes care of stripping the body
- Method = case Method2 of
- 'HEAD' -> 'GET';
- Other -> Other
- end,
-
- HttpReq = #httpd{
- mochi_req = MochiReq,
- peer = peer(MochiReq),
- method = Method,
- requested_path_parts =
- [?l2b(unquote(Part)) || Part <- string:tokens(RequestedPath, "/")],
- path_parts = [?l2b(unquote(Part)) || Part <- string:tokens(Path, "/")],
- db_url_handlers = DbUrlHandlers,
- design_url_handlers = DesignUrlHandlers,
- default_fun = DefaultFun,
- url_handlers = UrlHandlers,
- user_ctx = erlang:erase(pre_rewrite_user_ctx),
- auth = erlang:erase(pre_rewrite_auth)
- },
-
- HandlerFun = couch_util:dict_find(HandlerKey, UrlHandlers, DefaultFun),
-
- {ok, Resp} =
- try
- validate_host(HttpReq),
- check_request_uri_length(RawUri),
- case chttpd_cors:maybe_handle_preflight_request(HttpReq) of
- not_preflight ->
- case authenticate_request(HttpReq) of
- #httpd{} = Req ->
- HandlerFun(Req);
- Response ->
- Response
- end;
- Response ->
- Response
- end
- catch
- throw:{http_head_abort, Resp0} ->
- {ok, Resp0};
- throw:{invalid_json, S} ->
- couch_log:error("attempted upload of invalid JSON"
- " (set log_level to debug to log it)", []),
- couch_log:debug("Invalid JSON: ~p",[S]),
- send_error(HttpReq, {bad_request, invalid_json});
- throw:unacceptable_encoding ->
- couch_log:error("unsupported encoding method for the response", []),
- send_error(HttpReq, {not_acceptable, "unsupported encoding"});
- throw:bad_accept_encoding_value ->
- couch_log:error("received invalid Accept-Encoding header", []),
- send_error(HttpReq, bad_request);
- exit:normal ->
- exit(normal);
- exit:snappy_nif_not_loaded ->
- ErrorReason = "To access the database or view index, Apache CouchDB"
- " must be built with Erlang OTP R13B04 or higher.",
- couch_log:error("~s", [ErrorReason]),
- send_error(HttpReq, {bad_otp_release, ErrorReason});
- exit:{body_too_large, _} ->
- send_error(HttpReq, request_entity_too_large);
- exit:{uri_too_long, _} ->
- send_error(HttpReq, request_uri_too_long);
- throw:Error ->
- Stack = erlang:get_stacktrace(),
- couch_log:debug("Minor error in HTTP request: ~p",[Error]),
- couch_log:debug("Stacktrace: ~p",[Stack]),
- send_error(HttpReq, Error);
- error:badarg ->
- Stack = erlang:get_stacktrace(),
- couch_log:error("Badarg error in HTTP request",[]),
- couch_log:info("Stacktrace: ~p",[Stack]),
- send_error(HttpReq, badarg);
- error:function_clause ->
- Stack = erlang:get_stacktrace(),
- couch_log:error("function_clause error in HTTP request",[]),
- couch_log:info("Stacktrace: ~p",[Stack]),
- send_error(HttpReq, function_clause);
- Tag:Error ->
- Stack = erlang:get_stacktrace(),
- couch_log:error("Uncaught error in HTTP request: ~p",
- [{Tag, Error}]),
- couch_log:info("Stacktrace: ~p",[Stack]),
- send_error(HttpReq, Error)
- end,
- RequestTime = round(timer:now_diff(os:timestamp(), Begin)/1000),
- couch_stats:update_histogram([couchdb, request_time], RequestTime),
- couch_stats:increment_counter([couchdb, httpd, requests]),
- {ok, Resp}.
-
validate_host(#httpd{} = Req) ->
case config:get_boolean("httpd", "validate_host", false) of
true ->
@@ -417,26 +100,6 @@ valid_hosts() ->
List = config:get("httpd", "valid_hosts", ""),
re:split(List, ",", [{return, list}]).
-check_request_uri_length(Uri) ->
- check_request_uri_length(Uri, config:get("httpd", "max_uri_length")).
-
-check_request_uri_length(_Uri, undefined) ->
- ok;
-check_request_uri_length(Uri, MaxUriLen) when is_list(MaxUriLen) ->
- case length(Uri) > list_to_integer(MaxUriLen) of
- true ->
- throw(request_uri_too_long);
- false ->
- ok
- end.
-
-authenticate_request(Req) ->
- {ok, AuthenticationFuns} = application:get_env(couch, auth_handlers),
- chttpd:authenticate_request(Req, couch_auth_cache, AuthenticationFuns).
-
-increment_method_stats(Method) ->
- couch_stats:increment_counter([couchdb, httpd_request_methods, Method]).
-
validate_referer(Req) ->
Host = host_for_request(Req),
Referer = header_value(Req, "Referer", fail),
@@ -588,7 +251,8 @@ recv_chunked(#httpd{mochi_req=MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
% Fun is called once with each chunk
% Fun({Length, Binary}, State)
% called with Length == 0 on the last time.
- MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState).
+ MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState,
+ config:get_integer("httpd", "max_http_request_size", 4294967296)).
body_length(#httpd{mochi_req=MochiReq}) ->
MochiReq:get(body_length).
@@ -599,13 +263,16 @@ body(#httpd{mochi_req=MochiReq, req_body=undefined}) ->
body(#httpd{req_body=ReqBody}) ->
ReqBody.
-json_body(Httpd) ->
+json_body(#httpd{req_body=undefined} = Httpd) ->
case body(Httpd) of
undefined ->
throw({bad_request, "Missing request body"});
Body ->
?JSON_DECODE(maybe_decompress(Httpd, Body))
- end.
+ end;
+
+json_body(#httpd{req_body=ReqBody}) ->
+ ReqBody.
json_body_obj(Httpd) ->
case json_body(Httpd) of
@@ -1220,13 +887,6 @@ http_respond_(#httpd{mochi_req = MochiReq}, 413, Headers, Args, Type) ->
http_respond_(#httpd{mochi_req = MochiReq}, Code, Headers, Args, Type) ->
MochiReq:Type({Code, Headers, Args}).
-peer(MochiReq) ->
- case MochiReq:get(socket) of
- {remote, Pid, _} ->
- node(Pid);
- _ ->
- MochiReq:get(peer)
- end.
%%%%%%%% module tests below %%%%%%%%
diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl
index e81cf040e..b7402202d 100644
--- a/src/couch/src/couch_httpd_auth.erl
+++ b/src/couch/src/couch_httpd_auth.erl
@@ -15,6 +15,7 @@
-compile(tuple_calls).
-include_lib("couch/include/couch_db.hrl").
+-include_lib("kernel/include/logger.hrl").
-export([party_mode_handler/1]).
@@ -33,7 +34,7 @@
-export([jwt_authentication_handler/1]).
--import(couch_httpd, [header_value/2, send_json/2,send_json/4, send_method_not_allowed/2]).
+-import(couch_httpd, [header_value/2, send_json/2, send_json/4, send_method_not_allowed/2, maybe_decompress/2]).
-compile({no_auto_import,[integer_to_binary/1, integer_to_binary/2]}).
@@ -212,6 +213,12 @@ get_configured_claims() ->
Re = "((?<key1>[a-z]+)|{(?<key2>[a-z]+)\s*,\s*\"(?<val>[^\"]+)\"})",
case re:run(Claims, Re, [global, {capture, [key1, key2, val], binary}]) of
nomatch when Claims /= "" ->
+ ?LOG_ERROR(#{
+ what => invalid_config_setting,
+ section => jwt_auth,
+ key => required_claims,
+ value => Claims
+ }),
couch_log:error("[jwt_auth] required_claims is set to an invalid value.", []),
throw({misconfigured_server, <<"JWT is not configured correctly">>});
nomatch ->
@@ -246,6 +253,7 @@ cookie_authentication_handler(#httpd{mochi_req=MochiReq}=Req, AuthModule) ->
CurrentTime = make_cookie_time(),
case config:get("couch_httpd_auth", "secret", undefined) of
undefined ->
+ ?LOG_DEBUG(#{what => cookie_auth_secret_undefined}),
couch_log:debug("cookie auth secret is not set",[]),
Req;
SecretStr ->
@@ -265,6 +273,10 @@ cookie_authentication_handler(#httpd{mochi_req=MochiReq}=Req, AuthModule) ->
case couch_passwords:verify(ExpectedHash, Hash) of
true ->
TimeLeft = TimeStamp + Timeout - CurrentTime,
+ ?LOG_DEBUG(#{
+ what => successful_cookie_auth,
+ username => User
+ }),
couch_log:debug("Successful cookie auth as: ~p",
[User]),
Req#httpd{user_ctx=#user_ctx{
@@ -329,7 +341,7 @@ handle_session_req(#httpd{method='POST', mochi_req=MochiReq}=Req, AuthModule) ->
"application/x-www-form-urlencoded" ++ _ ->
mochiweb_util:parse_qs(ReqBody);
"application/json" ++ _ ->
- {Pairs} = ?JSON_DECODE(ReqBody),
+ {Pairs} = ?JSON_DECODE(maybe_decompress(Req, ReqBody)),
lists:map(fun({Key, Value}) ->
{?b2l(Key), ?b2l(Value)}
end, Pairs);
@@ -338,6 +350,7 @@ handle_session_req(#httpd{method='POST', mochi_req=MochiReq}=Req, AuthModule) ->
end,
UserName = ?l2b(extract_username(Form)),
Password = ?l2b(couch_util:get_value("password", Form, "")),
+ ?LOG_DEBUG(#{what => login_attempt, user => UserName}),
couch_log:debug("Attempt Login: ~s",[UserName]),
{ok, UserProps, _AuthCtx} = case AuthModule:get_user_creds(Req, UserName) of
nil -> {ok, [], nil};
@@ -501,6 +514,13 @@ same_site() ->
"lax" -> [{same_site, lax}];
"strict" -> [{same_site, strict}];
_ ->
+ ?LOG_ERROR(#{
+ what => invalid_config_setting,
+ section => couch_httpd_auth,
+ key => same_site,
+ value => SameSite,
+ details => "value must be one of `none`, `lax`, `strict`"
+ }),
couch_log:error("invalid config value couch_httpd_auth.same_site: ~p ",[SameSite]),
[]
end.
@@ -561,5 +581,10 @@ integer_to_binary(Int, Len) when is_integer(Int), is_integer(Len) ->
authentication_warning(#httpd{mochi_req = Req}, User) ->
Peer = Req:get(peer),
+ ?LOG_WARNING(#{
+ what => authentication_failure,
+ user => User,
+ peer => Peer
+ }),
couch_log:warning("~p: Authentication failed for user ~s from ~s",
[?MODULE, User, Peer]).
diff --git a/src/couch/src/couch_httpd_db.erl b/src/couch/src/couch_httpd_db.erl
deleted file mode 100644
index 2418c1a4c..000000000
--- a/src/couch/src/couch_httpd_db.erl
+++ /dev/null
@@ -1,1263 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_db).
-
--compile(tuple_calls).
-
--include_lib("couch/include/couch_db.hrl").
-
--export([handle_request/1, handle_compact_req/2, handle_design_req/2,
- db_req/2, couch_doc_open/4, handle_db_changes_req/2,
- update_doc_result_to_json/1, update_doc_result_to_json/2,
- handle_design_info_req/3, parse_copy_destination_header/1,
- parse_changes_query/2, handle_changes_req/4]).
-
--import(couch_httpd,
- [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
- start_json_response/2,send_chunk/2,last_chunk/1,end_json_response/1,
- start_chunked_response/3, absolute_uri/2, send/2,
- start_response_length/4, send_error/4]).
-
--record(doc_query_args, {
- options = [],
- rev = nil,
- open_revs = [],
- update_type = interactive_edit,
- atts_since = nil
-}).
-
-% Database request handlers
-handle_request(#httpd{path_parts=[DbName|RestParts],method=Method,
- db_url_handlers=DbUrlHandlers}=Req)->
- case {Method, RestParts} of
- {'PUT', []} ->
- create_db_req(Req, DbName);
- {'DELETE', []} ->
- % if we get ?rev=... the user is using a faulty script where the
- % document id is empty by accident. Let them recover safely.
- case couch_httpd:qs_value(Req, "rev", false) of
- false -> delete_db_req(Req, DbName);
- _Rev -> throw({bad_request,
- "You tried to DELETE a database with a ?rev= parameter. "
- ++ "Did you mean to DELETE a document instead?"})
- end;
- {_, []} ->
- do_db_req(Req, fun db_req/2);
- {_, [SecondPart|_]} ->
- Handler = couch_util:dict_find(SecondPart, DbUrlHandlers, fun db_req/2),
- do_db_req(Req, Handler)
- end.
-
-
-handle_db_changes_req(Req, Db) ->
- ChangesArgs = parse_changes_query(Req, Db),
- ChangesFun = couch_changes:handle_db_changes(ChangesArgs, Req, Db),
- handle_changes_req(Req, Db, ChangesArgs, ChangesFun).
-
-
-handle_changes_req(#httpd{method='POST'}=Req, Db, ChangesArgs, ChangesFun) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- handle_changes_req1(Req, Db, ChangesArgs, ChangesFun);
-handle_changes_req(#httpd{method='GET'}=Req, Db, ChangesArgs, ChangesFun) ->
- handle_changes_req1(Req, Db, ChangesArgs, ChangesFun);
-handle_changes_req(#httpd{}=Req, _Db, _ChangesArgs, _ChangesFun) ->
- couch_httpd:send_method_not_allowed(Req, "GET,HEAD,POST").
-
-handle_changes_req1(Req, Db, ChangesArgs, ChangesFun) ->
- DbName = couch_db:name(Db),
- AuthDbName = ?l2b(config:get("couch_httpd_auth", "authentication_db")),
- case AuthDbName of
- DbName ->
- % in the authentication database, _changes is admin-only.
- ok = couch_db:check_is_admin(Db);
- _Else ->
- % on other databases, _changes is free for all.
- ok
- end,
-
- MakeCallback = fun(Resp) ->
- fun({change, {ChangeProp}=Change, _}, "eventsource") ->
- Seq = proplists:get_value(<<"seq">>, ChangeProp),
- couch_httpd:send_chunk(Resp, ["data: ", ?JSON_ENCODE(Change),
- "\n", "id: ", ?JSON_ENCODE(Seq),
- "\n\n"]);
- ({change, Change, _}, "continuous") ->
- couch_httpd:send_chunk(Resp, [?JSON_ENCODE(Change) | "\n"]);
- ({change, Change, Prepend}, _) ->
- couch_httpd:send_chunk(Resp, [Prepend, ?JSON_ENCODE(Change)]);
- (start, "eventsource") ->
- ok;
- (start, "continuous") ->
- ok;
- (start, _) ->
- couch_httpd:send_chunk(Resp, "{\"results\":[\n");
- ({stop, _EndSeq}, "eventsource") ->
- couch_httpd:end_json_response(Resp);
- ({stop, EndSeq}, "continuous") ->
- couch_httpd:send_chunk(
- Resp,
- [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"]
- ),
- couch_httpd:end_json_response(Resp);
- ({stop, EndSeq}, _) ->
- couch_httpd:send_chunk(
- Resp,
- io_lib:format("\n],\n\"last_seq\":~w}\n", [EndSeq])
- ),
- couch_httpd:end_json_response(Resp);
- (timeout, "eventsource") ->
- couch_httpd:send_chunk(Resp, "event: heartbeat\ndata: \n\n");
- (timeout, _) ->
- couch_httpd:send_chunk(Resp, "\n")
- end
- end,
- WrapperFun = case ChangesArgs#changes_args.feed of
- "normal" ->
- {ok, Info} = couch_db:get_db_info(Db),
- CurrentEtag = couch_httpd:make_etag(Info),
- fun(FeedChangesFun) ->
- couch_httpd:etag_respond(
- Req,
- CurrentEtag,
- fun() ->
- {ok, Resp} = couch_httpd:start_json_response(
- Req, 200, [{"ETag", CurrentEtag}]
- ),
- FeedChangesFun(MakeCallback(Resp))
- end
- )
- end;
- "eventsource" ->
- Headers = [
- {"Content-Type", "text/event-stream"},
- {"Cache-Control", "no-cache"}
- ],
- {ok, Resp} = couch_httpd:start_chunked_response(Req, 200, Headers),
- fun(FeedChangesFun) ->
- FeedChangesFun(MakeCallback(Resp))
- end;
- _ ->
- % "longpoll" or "continuous"
- {ok, Resp} = couch_httpd:start_json_response(Req, 200),
- fun(FeedChangesFun) ->
- FeedChangesFun(MakeCallback(Resp))
- end
- end,
- couch_stats:increment_counter(
- [couchdb, httpd, clients_requesting_changes]),
- try
- WrapperFun(ChangesFun)
- after
- couch_stats:decrement_counter(
- [couchdb, httpd, clients_requesting_changes])
- end.
-
-
-
-handle_compact_req(#httpd{method='POST'}=Req, Db) ->
- case Req#httpd.path_parts of
- [_DbName, <<"_compact">>] ->
- ok = couch_db:check_is_admin(Db),
- couch_httpd:validate_ctype(Req, "application/json"),
- _ = couch_httpd:body(Req),
- {ok, _} = couch_db:start_compact(Db),
- send_json(Req, 202, {[{ok, true}]});
- [_DbName, <<"_compact">>, DesignName | _] ->
- DesignId = <<"_design/", DesignName/binary>>,
- DDoc = couch_httpd_db:couch_doc_open(
- Db, DesignId, nil, [ejson_body]
- ),
- couch_mrview_http:handle_compact_req(Req, Db, DDoc)
- end;
-
-handle_compact_req(Req, _Db) ->
- send_method_not_allowed(Req, "POST").
-
-
-handle_design_req(#httpd{
- path_parts=[_DbName, _Design, DesignName, <<"_",_/binary>> = Action | _Rest],
- design_url_handlers = DesignUrlHandlers
- }=Req, Db) ->
- case couch_db:is_system_db(Db) of
- true ->
- case (catch couch_db:check_is_admin(Db)) of
- ok -> ok;
- _ ->
- throw({forbidden, <<"Only admins can access design document",
- " actions for system databases.">>})
- end;
- false -> ok
- end,
-
- % maybe load ddoc through fabric
- DesignId = <<"_design/", DesignName/binary>>,
- case couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]) of
- not_found ->
- DbName = mem3:dbname(couch_db:name(Db)),
- {ok, DDoc} = fabric:open_doc(DbName, DesignId, [?ADMIN_CTX]);
- DDoc ->
- ok
- end,
- Handler = couch_util:dict_find(Action, DesignUrlHandlers, fun(_, _, _) ->
- throw({not_found, <<"missing handler: ", Action/binary>>})
- end),
- Handler(Req, Db, DDoc);
-
-handle_design_req(Req, Db) ->
- db_req(Req, Db).
-
-handle_design_info_req(#httpd{
- method='GET',
- path_parts=[_DbName, _Design, DesignName, _]
- }=Req, Db, _DDoc) ->
- DesignId = <<"_design/", DesignName/binary>>,
- DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]),
- couch_mrview_http:handle_info_req(Req, Db, DDoc).
-
-create_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
- ok = couch_httpd:verify_is_server_admin(Req),
- Engine = case couch_httpd:qs_value(Req, "engine") of
- EngineStr when is_list(EngineStr) ->
- [{engine, iolist_to_binary(EngineStr)}];
- _ ->
- []
- end,
- case couch_server:create(DbName, [{user_ctx, UserCtx}] ++ Engine) of
- {ok, Db} ->
- couch_db:close(Db),
- DbUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
- send_json(Req, 201, [{"Location", DbUrl}], {[{ok, true}]});
- Error ->
- throw(Error)
- end.
-
-delete_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
- ok = couch_httpd:verify_is_server_admin(Req),
- Options = case couch_httpd:qs_value(Req, "sync") of
- "true" -> [sync, {user_ctx, UserCtx}];
- _ -> [{user_ctx, UserCtx}]
- end,
- case couch_server:delete(DbName, Options) of
- ok ->
- send_json(Req, 200, {[{ok, true}]});
- Error ->
- throw(Error)
- end.
-
-do_db_req(#httpd{user_ctx=UserCtx,path_parts=[DbName|_]}=Req, Fun) ->
- case couch_db:open(DbName, [{user_ctx, UserCtx}]) of
- {ok, Db} ->
- try
- Fun(Req, Db)
- after
- catch couch_db:close(Db)
- end;
- Error ->
- throw(Error)
- end.
-
-db_req(#httpd{method='GET',path_parts=[_DbName]}=Req, Db) ->
- {ok, DbInfo} = couch_db:get_db_info(Db),
- send_json(Req, {DbInfo});
-
-db_req(#httpd{method='POST',path_parts=[_DbName]}=Req, Db) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- Doc = couch_db:doc_from_json_obj_validate(Db, couch_httpd:json_body(Req)),
- validate_attachment_names(Doc),
- Doc2 = case Doc#doc.id of
- <<"">> ->
- Doc#doc{id=couch_uuids:new(), revs={0, []}};
- _ ->
- Doc
- end,
- DocId = Doc2#doc.id,
- update_doc(Req, Db, DocId, Doc2);
-
-db_req(#httpd{path_parts=[_DbName]}=Req, _Db) ->
- send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_ensure_full_commit">>]}=Req, Db) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- _ = couch_httpd:body(Req),
- StartTime = couch_db:get_instance_start_time(Db),
- send_json(Req, 201, {[
- {ok, true},
- {instance_start_time, StartTime}
- ]});
-
-db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
- couch_stats:increment_counter([couchdb, httpd, bulk_requests]),
- couch_httpd:validate_ctype(Req, "application/json"),
- {JsonProps} = couch_httpd:json_body_obj(Req),
- case couch_util:get_value(<<"docs">>, JsonProps) of
- undefined ->
- send_error(Req, 400, <<"bad_request">>, <<"Missing JSON list of 'docs'">>);
- DocsArray ->
- couch_stats:update_histogram([couchdb, httpd, bulk_docs], length(DocsArray)),
- case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
- "true" ->
- Options = [full_commit];
- "false" ->
- Options = [delay_commit];
- _ ->
- Options = []
- end,
- case couch_util:get_value(<<"new_edits">>, JsonProps, true) of
- true ->
- Docs = lists:map(
- fun({ObjProps} = JsonObj) ->
- Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj),
- validate_attachment_names(Doc),
- Id = case Doc#doc.id of
- <<>> -> couch_uuids:new();
- Id0 -> Id0
- end,
- case couch_util:get_value(<<"_rev">>, ObjProps) of
- undefined ->
- Revs = {0, []};
- Rev ->
- {Pos, RevId} = couch_doc:parse_rev(Rev),
- Revs = {Pos, [RevId]}
- end,
- Doc#doc{id=Id,revs=Revs}
- end,
- DocsArray),
- Options2 =
- case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
- true -> [all_or_nothing|Options];
- _ -> Options
- end,
- case couch_db:update_docs(Db, Docs, Options2) of
- {ok, Results} ->
- % output the results
- DocResults = lists:zipwith(fun update_doc_result_to_json/2,
- Docs, Results),
- send_json(Req, 201, DocResults);
- {aborted, Errors} ->
- ErrorsJson =
- lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 417, ErrorsJson)
- end;
- false ->
- Docs = lists:map(fun(JsonObj) ->
- Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj),
- validate_attachment_names(Doc),
- Doc
- end, DocsArray),
- {ok, Errors} = couch_db:update_docs(Db, Docs, Options, replicated_changes),
- ErrorsJson =
- lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 201, ErrorsJson)
- end
- end;
-db_req(#httpd{path_parts=[_,<<"_bulk_docs">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
- couch_stats:increment_counter([couchdb, httpd, purge_requests]),
- couch_httpd:validate_ctype(Req, "application/json"),
- {IdRevs} = couch_httpd:json_body_obj(Req),
- PurgeReqs = lists:map(fun({Id, JsonRevs}) ->
- {couch_uuids:new(), Id, couch_doc:parse_revs(JsonRevs)}
- end, IdRevs),
-
- {ok, Replies} = couch_db:purge_docs(Db, PurgeReqs),
-
- Results = lists:zipwith(fun({Id, _}, {ok, Reply}) ->
- {Id, couch_doc:revs_to_strs(Reply)}
- end, IdRevs, Replies),
-
- {ok, Db2} = couch_db:reopen(Db),
- PurgeSeq = couch_db:get_purge_seq(Db2),
- send_json(Req, 200, {[{purge_seq, PurgeSeq}, {purged, {Results}}]});
-
-db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
- JsonDocIdRevs2 = [{Id, [couch_doc:parse_rev(RevStr) || RevStr <- RevStrs]} || {Id, RevStrs} <- JsonDocIdRevs],
- {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2),
- Results2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs, _} <- Results],
- send_json(Req, {[
- {missing_revs, {Results2}}
- ]});
-
-db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
- JsonDocIdRevs2 =
- [{Id, couch_doc:parse_revs(RevStrs)} || {Id, RevStrs} <- JsonDocIdRevs],
- {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2),
- Results2 =
- lists:map(fun({Id, MissingRevs, PossibleAncestors}) ->
- {Id,
- {[{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
- if PossibleAncestors == [] ->
- [];
- true ->
- [{possible_ancestors,
- couch_doc:revs_to_strs(PossibleAncestors)}]
- end}}
- end, Results),
- send_json(Req, {Results2});
-
-db_req(#httpd{path_parts=[_,<<"_revs_diff">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>]}=Req, Db) ->
- SecObj = couch_httpd:json_body(Req),
- ok = couch_db:set_security(Db, SecObj),
- send_json(Req, {[{<<"ok">>, true}]});
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_security">>]}=Req, Db) ->
- send_json(Req, couch_db:get_security(Db));
-
-db_req(#httpd{path_parts=[_,<<"_security">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "PUT,GET");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_revs_limit">>]}=Req,
- Db) ->
- Limit = couch_httpd:json_body(Req),
- case is_integer(Limit) of
- true ->
- ok = couch_db:set_revs_limit(Db, Limit),
- send_json(Req, {[{<<"ok">>, true}]});
- false ->
- throw({bad_request, <<"Rev limit has to be an integer">>})
- end;
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
- send_json(Req, couch_db:get_revs_limit(Db));
-
-db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "PUT,GET");
-
-% Special case to enable using an unencoded slash in the URL of design docs,
-% as slashes in document IDs must otherwise be URL encoded.
-db_req(#httpd{method='GET',mochi_req=MochiReq, path_parts=[DbName,<<"_design/",_/binary>>|_]}=Req, _Db) ->
- PathFront = "/" ++ couch_httpd:quote(binary_to_list(DbName)) ++ "/",
- [_|PathTail] = re:split(MochiReq:get(raw_path), "_design%2F",
- [{return, list}]),
- couch_httpd:send_redirect(Req, PathFront ++ "_design/" ++
- mochiweb_util:join(PathTail, "_design%2F"));
-
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name]}=Req, Db) ->
- db_doc_req(Req, Db, <<"_design/",Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name|FileNameParts]}=Req, Db) ->
- db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts);
-
-
-% Special case to allow for accessing local documents without %2F
-% encoding the docid. Throws out requests that don't have the second
-% path part or that specify an attachment name.
-db_req(#httpd{path_parts=[_DbName, <<"_local">>]}, _Db) ->
- throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local/">>]}, _Db) ->
- throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">>, Name]}=Req, Db) ->
- db_doc_req(Req, Db, <<"_local/", Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">> | _Rest]}, _Db) ->
- throw({bad_request, <<"_local documents do not accept attachments.">>});
-
-db_req(#httpd{path_parts=[_, DocId]}=Req, Db) ->
- db_doc_req(Req, Db, DocId);
-
-db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
- db_attachment_req(Req, Db, DocId, FileNameParts).
-
-db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
- % check for the existence of the doc to handle the 404 case.
- couch_doc_open(Db, DocId, nil, []),
- case couch_httpd:qs_value(Req, "rev") of
- undefined ->
- JsonObj = {[{<<"_deleted">>,true}]},
- Doc = couch_doc_from_req(Req, Db, DocId, JsonObj),
- update_doc(Req, Db, DocId, Doc);
- Rev ->
- JsonObj = {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]},
- Doc = couch_doc_from_req(Req, Db, DocId, JsonObj),
- update_doc(Req, Db, DocId, Doc)
- end;
-
-db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) ->
- #doc_query_args{
- rev = Rev,
- open_revs = Revs,
- options = Options1,
- atts_since = AttsSince
- } = parse_doc_query(Req),
- Options = case AttsSince of
- nil ->
- Options1;
- RevList when is_list(RevList) ->
- [{atts_since, RevList}, attachments | Options1]
- end,
- case Revs of
- [] ->
- Doc = couch_doc_open(Db, DocId, Rev, Options),
- send_doc(Req, Doc, Options);
- _ ->
- {ok, Results} = couch_db:open_doc_revs(Db, DocId, Revs, Options),
- case MochiReq:accepts_content_type("multipart/mixed") of
- false ->
- {ok, Resp} = start_json_response(Req, 200),
- send_chunk(Resp, "["),
- % We loop through the docs. The first time through the separator
- % is whitespace, then a comma on subsequent iterations.
- lists:foldl(
- fun(Result, AccSeparator) ->
- case Result of
- {ok, Doc} ->
- JsonDoc = couch_doc:to_json_obj(Doc, Options),
- Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
- send_chunk(Resp, AccSeparator ++ Json);
- {{not_found, missing}, RevId} ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
- send_chunk(Resp, AccSeparator ++ Json)
- end,
- "," % AccSeparator now has a comma
- end,
- "", Results),
- send_chunk(Resp, "]"),
- end_json_response(Resp);
- true ->
- send_docs_multipart(Req, Results, Options)
- end
- end;
-
-
-db_doc_req(#httpd{method='POST'}=Req, Db, DocId) ->
- couch_httpd:validate_referer(Req),
- couch_db:validate_docid(Db, DocId),
- couch_httpd:validate_ctype(Req, "multipart/form-data"),
- Form = couch_httpd:parse_form(Req),
- case couch_util:get_value("_doc", Form) of
- undefined ->
- Rev = couch_doc:parse_rev(couch_util:get_value("_rev", Form)),
- {ok, [{ok, Doc}]} = couch_db:open_doc_revs(Db, DocId, [Rev], []);
- Json ->
- Doc = couch_doc_from_req(Req, Db, DocId, ?JSON_DECODE(Json))
- end,
- UpdatedAtts = [
- couch_att:new([
- {name, validate_attachment_name(Name)},
- {type, list_to_binary(ContentType)},
- {data, Content}
- ]) ||
- {Name, {ContentType, _}, Content} <-
- proplists:get_all_values("_attachments", Form)
- ],
- #doc{atts=OldAtts} = Doc,
- OldAtts2 = lists:flatmap(
- fun(Att) ->
- OldName = couch_att:fetch(name, Att),
- case [1 || A <- UpdatedAtts, couch_att:fetch(name, A) == OldName] of
- [] -> [Att]; % the attachment wasn't in the UpdatedAtts, return it
- _ -> [] % the attachment was in the UpdatedAtts, drop it
- end
- end, OldAtts),
- NewDoc = Doc#doc{
- atts = UpdatedAtts ++ OldAtts2
- },
- update_doc(Req, Db, DocId, NewDoc);
-
-db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) ->
- couch_db:validate_docid(Db, DocId),
-
- case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
- ("multipart/related;" ++ _) = ContentType ->
- couch_httpd:check_max_request_length(Req),
- {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(
- ContentType, fun() -> receive_request_data(Req) end),
- Doc = couch_doc_from_req(Req, Db, DocId, Doc0),
- try
- Result = update_doc(Req, Db, DocId, Doc),
- WaitFun(),
- Result
- catch throw:Err ->
- % Document rejected by a validate_doc_update function.
- couch_httpd_multipart:abort_multipart_stream(Parser),
- throw(Err)
- end;
- _Else ->
- Body = couch_httpd:json_body(Req),
- Doc = couch_doc_from_req(Req, Db, DocId, Body),
- update_doc(Req, Db, DocId, Doc)
- end;
-
-db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) ->
- SourceRev =
- case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
- missing_rev -> nil;
- Rev -> Rev
- end,
- {TargetDocId0, TargetRevs} = parse_copy_destination_header(Req),
- TargetDocId = list_to_binary(mochiweb_util:unquote(TargetDocId0)),
- % open old doc
- Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
- % save new doc
- update_doc(Req, Db, TargetDocId, Doc#doc{id=TargetDocId, revs=TargetRevs});
-
-db_doc_req(Req, _Db, _DocId) ->
- send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY").
-
-
-send_doc(Req, Doc, Options) ->
- case Doc#doc.meta of
- [] ->
- DiskEtag = couch_httpd:doc_etag(Doc),
- % output etag only when we have no meta
- couch_httpd:etag_respond(Req, DiskEtag, fun() ->
- send_doc_efficiently(Req, Doc, [{"ETag", DiskEtag}], Options)
- end);
- _ ->
- send_doc_efficiently(Req, Doc, [], Options)
- end.
-
-
-send_doc_efficiently(Req, #doc{atts=[]}=Doc, Headers, Options) ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
-send_doc_efficiently(#httpd{mochi_req = MochiReq} = Req,
- #doc{atts = Atts} = Doc, Headers, Options) ->
- case lists:member(attachments, Options) of
- true ->
- case MochiReq:accepts_content_type("multipart/related") of
- false ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
- true ->
- Boundary = couch_uuids:random(),
- JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc,
- [attachments, follows, att_encoding_info | Options])),
- {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
- Boundary,JsonBytes, Atts, true),
- CType = {"Content-Type", ?b2l(ContentType)},
- {ok, Resp} = start_response_length(Req, 200, [CType|Headers], Len),
- couch_doc:doc_to_multi_part_stream(Boundary,JsonBytes,Atts,
- fun(Data) -> couch_httpd:send(Resp, Data) end, true)
- end;
- false ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
- end.
-
-send_docs_multipart(Req, Results, Options1) ->
- OuterBoundary = couch_uuids:random(),
- InnerBoundary = couch_uuids:random(),
- Options = [attachments, follows, att_encoding_info | Options1],
- CType = {"Content-Type",
- "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
- {ok, Resp} = start_chunked_response(Req, 200, [CType]),
- couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
- lists:foreach(
- fun({ok, #doc{atts=Atts}=Doc}) ->
- JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
- {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
- InnerBoundary, JsonBytes, Atts, true),
- couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ",
- ContentType/binary, "\r\n\r\n">>),
- couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
- fun(Data) -> couch_httpd:send_chunk(Resp, Data)
- end, true),
- couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>);
- ({{not_found, missing}, RevId}) ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
- couch_httpd:send_chunk(Resp,
- [<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
- Json,
- <<"\r\n--", OuterBoundary/binary>>])
- end, Results),
- couch_httpd:send_chunk(Resp, <<"--">>),
- couch_httpd:last_chunk(Resp).
-
-send_ranges_multipart(Req, ContentType, Len, Att, Ranges) ->
- Boundary = couch_uuids:random(),
- CType = {"Content-Type",
- "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""},
- {ok, Resp} = start_chunked_response(Req, 206, [CType]),
- couch_httpd:send_chunk(Resp, <<"--", Boundary/binary>>),
- lists:foreach(fun({From, To}) ->
- ContentRange = ?l2b(make_content_range(From, To, Len)),
- couch_httpd:send_chunk(Resp,
- <<"\r\nContent-Type: ", ContentType/binary, "\r\n",
- "Content-Range: ", ContentRange/binary, "\r\n",
- "\r\n">>),
- couch_att:range_foldl(Att, From, To + 1,
- fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
- couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>)
- end, Ranges),
- couch_httpd:send_chunk(Resp, <<"--">>),
- couch_httpd:last_chunk(Resp),
- {ok, Resp}.
-
-receive_request_data(Req) ->
- receive_request_data(Req, couch_httpd:body_length(Req)).
-
-receive_request_data(Req, LenLeft) when LenLeft > 0 ->
- Len = erlang:min(4096, LenLeft),
- Data = couch_httpd:recv(Req, Len),
- {Data, fun() -> receive_request_data(Req, LenLeft - iolist_size(Data)) end};
-receive_request_data(_Req, _) ->
- throw(<<"expected more data">>).
-
-make_content_range(From, To, Len) ->
- io_lib:format("bytes ~B-~B/~B", [From, To, Len]).
-
-update_doc_result_to_json({{Id, Rev}, Error}) ->
- {_Code, Err, Msg} = couch_httpd:error_info(Error),
- {[{id, Id}, {rev, couch_doc:rev_to_str(Rev)},
- {error, Err}, {reason, Msg}]}.
-
-update_doc_result_to_json(#doc{id=DocId}, Result) ->
- update_doc_result_to_json(DocId, Result);
-update_doc_result_to_json(DocId, {ok, NewRev}) ->
- {[{ok, true}, {id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]};
-update_doc_result_to_json(DocId, Error) ->
- {_Code, ErrorStr, Reason} = couch_httpd:error_info(Error),
- {[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}.
-
-
-update_doc(Req, Db, DocId, #doc{deleted=false}=Doc) ->
- DbName = couch_db:name(Db),
- Loc = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(DocId)),
- update_doc(Req, Db, DocId, Doc, [{"Location", Loc}]);
-update_doc(Req, Db, DocId, Doc) ->
- update_doc(Req, Db, DocId, Doc, []).
-
-update_doc(Req, Db, DocId, Doc, Headers) ->
- #doc_query_args{
- update_type = UpdateType
- } = parse_doc_query(Req),
- update_doc(Req, Db, DocId, Doc, Headers, UpdateType).
-
-update_doc(Req, Db, DocId, #doc{deleted=Deleted}=Doc, Headers, UpdateType) ->
- case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
- "true" ->
- Options = [full_commit];
- "false" ->
- Options = [delay_commit];
- _ ->
- Options = []
- end,
- case couch_httpd:qs_value(Req, "batch") of
- "ok" ->
- % async batching
- spawn(fun() ->
- case catch(couch_db:update_doc(Db, Doc, Options, UpdateType)) of
- {ok, _} -> ok;
- Error ->
- couch_log:info("Batch doc error (~s): ~p",[DocId, Error])
- end
- end),
- send_json(Req, 202, Headers, {[
- {ok, true},
- {id, DocId}
- ]});
- _Normal ->
- % normal
- {ok, NewRev} = couch_db:update_doc(Db, Doc, Options, UpdateType),
- NewRevStr = couch_doc:rev_to_str(NewRev),
- ResponseHeaders = [{"ETag", <<"\"", NewRevStr/binary, "\"">>}] ++ Headers,
- send_json(Req,
- if Deleted orelse Req#httpd.method == 'DELETE' -> 200;
- true -> 201 end,
- ResponseHeaders, {[
- {ok, true},
- {id, DocId},
- {rev, NewRevStr}]})
- end.
-
-couch_doc_from_req(Req, _Db, DocId, #doc{revs=Revs}=Doc) ->
- validate_attachment_names(Doc),
- Rev = case couch_httpd:qs_value(Req, "rev") of
- undefined ->
- undefined;
- QSRev ->
- couch_doc:parse_rev(QSRev)
- end,
- Revs2 =
- case Revs of
- {Start, [RevId|_]} ->
- if Rev /= undefined andalso Rev /= {Start, RevId} ->
- throw({bad_request, "Document rev from request body and query "
- "string have different values"});
- true ->
- case extract_header_rev(Req, {Start, RevId}) of
- missing_rev -> {0, []};
- _ -> Revs
- end
- end;
- _ ->
- case extract_header_rev(Req, Rev) of
- missing_rev -> {0, []};
- {Pos, RevId2} -> {Pos, [RevId2]}
- end
- end,
- Doc#doc{id=DocId, revs=Revs2};
-couch_doc_from_req(Req, Db, DocId, Json) ->
- Doc = couch_db:doc_from_json_obj_validate(Db, Json),
- couch_doc_from_req(Req, Db, DocId, Doc).
-
-% Useful for debugging
-% couch_doc_open(Db, DocId) ->
-% couch_doc_open(Db, DocId, nil, []).
-
-couch_doc_open(Db, DocId, Rev, Options) ->
- case Rev of
- nil -> % open most recent rev
- case couch_db:open_doc(Db, DocId, Options) of
- {ok, Doc} ->
- Doc;
- Error ->
- throw(Error)
- end;
- _ -> % open a specific rev (deletions come back as stubs)
- case couch_db:open_doc_revs(Db, DocId, [Rev], Options) of
- {ok, [{ok, Doc}]} ->
- Doc;
- {ok, [{{not_found, missing}, Rev}]} ->
- throw(not_found);
- {ok, [Else]} ->
- throw(Else)
- end
- end.
-
-% Attachment request handlers
-
-db_attachment_req(#httpd{method='GET',mochi_req=MochiReq}=Req, Db, DocId, FileNameParts) ->
- FileName = list_to_binary(mochiweb_util:join(lists:map(fun binary_to_list/1, FileNameParts),"/")),
- #doc_query_args{
- rev=Rev,
- options=Options
- } = parse_doc_query(Req),
- #doc{
- atts=Atts
- } = Doc = couch_doc_open(Db, DocId, Rev, Options),
- case [A || A <- Atts, couch_att:fetch(name, A) == FileName] of
- [] ->
- throw({not_found, "Document is missing attachment"});
- [Att] ->
- [Type, Enc, DiskLen, AttLen, Md5] = couch_att:fetch([type, encoding, disk_len, att_len, md5], Att),
- Etag = case Md5 of
- <<>> -> couch_httpd:doc_etag(Doc);
- _ -> "\"" ++ ?b2l(base64:encode(Md5)) ++ "\""
- end,
- ReqAcceptsAttEnc = lists:member(
- atom_to_list(Enc),
- couch_httpd:accepted_encodings(Req)
- ),
- Len = case {Enc, ReqAcceptsAttEnc} of
- {identity, _} ->
- % stored and served in identity form
- DiskLen;
- {_, false} when DiskLen =/= AttLen ->
- % Stored encoded, but client doesn't accept the encoding we used,
- % so we need to decode on the fly. DiskLen is the identity length
- % of the attachment.
- DiskLen;
- {_, true} ->
- % Stored and served encoded. AttLen is the encoded length.
- AttLen;
- _ ->
- % We received an encoded attachment and stored it as such, so we
- % don't know the identity length. The client doesn't accept the
- % encoding, and since we cannot serve a correct Content-Length
- % header we'll fall back to a chunked response.
- undefined
- end,
- Headers = [
- {"ETag", Etag},
- {"Cache-Control", "must-revalidate"},
- {"Content-Type", binary_to_list(Type)}
- ] ++ case ReqAcceptsAttEnc of
- true when Enc =/= identity ->
- % RFC 2616 says that the 'identify' encoding should not be used in
- % the Content-Encoding header
- [{"Content-Encoding", atom_to_list(Enc)}];
- _ ->
- []
- end ++ case Enc of
- identity ->
- [{"Accept-Ranges", "bytes"}];
- _ ->
- [{"Accept-Ranges", "none"}]
- end,
- AttFun = case ReqAcceptsAttEnc of
- false ->
- fun couch_att:foldl_decode/3;
- true ->
- fun couch_att:foldl/3
- end,
- couch_httpd:etag_respond(
- Req,
- Etag,
- fun() ->
- case Len of
- undefined ->
- {ok, Resp} = start_chunked_response(Req, 200, Headers),
- AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
- last_chunk(Resp);
- _ ->
- Ranges = parse_ranges(MochiReq:get(range), Len),
- case {Enc, Ranges} of
- {identity, [{From, To}]} ->
- Headers1 = [{"Content-Range", make_content_range(From, To, Len)}]
- ++ Headers,
- {ok, Resp} = start_response_length(Req, 206, Headers1, To - From + 1),
- couch_att:range_foldl(Att, From, To + 1,
- fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp});
- {identity, Ranges} when is_list(Ranges) andalso length(Ranges) < 10 ->
- send_ranges_multipart(Req, Type, Len, Att, Ranges);
- _ ->
- Headers1 = Headers ++
- if Enc =:= identity orelse ReqAcceptsAttEnc =:= true ->
- [{"Content-MD5", base64:encode(Md5)}];
- true ->
- []
- end,
- {ok, Resp} = start_response_length(Req, 200, Headers1, Len),
- AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp})
- end
- end
- end
- )
- end;
-
-
-db_attachment_req(#httpd{method=Method,mochi_req=MochiReq}=Req, Db, DocId, FileNameParts)
- when (Method == 'PUT') or (Method == 'DELETE') ->
- FileName = validate_attachment_name(
- mochiweb_util:join(
- lists:map(fun binary_to_list/1,
- FileNameParts),"/")),
- NewAtt = case Method of
- 'DELETE' ->
- [];
- _ ->
- MimeType = case couch_httpd:header_value(Req,"Content-Type") of
- % We could throw an error here or guess by the FileName.
- % Currently, just giving it a default.
- undefined -> <<"application/octet-stream">>;
- CType -> list_to_binary(CType)
- end,
- Data = case couch_httpd:body_length(Req) of
- undefined ->
- <<"">>;
- {unknown_transfer_encoding, Unknown} ->
- exit({unknown_transfer_encoding, Unknown});
- chunked ->
- fun(MaxChunkSize, ChunkFun, InitState) ->
- couch_httpd:recv_chunked(
- Req, MaxChunkSize, ChunkFun, InitState
- )
- end;
- 0 ->
- <<"">>;
- Length when is_integer(Length) ->
- Expect = case couch_httpd:header_value(Req, "expect") of
- undefined ->
- undefined;
- Value when is_list(Value) ->
- string:to_lower(Value)
- end,
- case Expect of
- "100-continue" ->
- MochiReq:start_raw_response({100, gb_trees:empty()});
- _Else ->
- ok
- end,
- fun() -> couch_httpd:recv(Req, 0) end;
- Length ->
- exit({length_not_integer, Length})
- end,
- AttLen = case couch_httpd:header_value(Req,"Content-Length") of
- undefined -> undefined;
- Len -> list_to_integer(Len)
- end,
- ContentEnc = string:to_lower(string:strip(
- couch_httpd:header_value(Req,"Content-Encoding","identity")
- )),
- Encoding = case ContentEnc of
- "identity" ->
- identity;
- "gzip" ->
- gzip;
- _ ->
- throw({
- bad_ctype,
- "Only gzip and identity content-encodings are supported"
- })
- end,
- [couch_att:new([
- {name, FileName},
- {type, MimeType},
- {data, Data},
- {att_len, AttLen},
- {md5, get_md5_header(Req)},
- {encoding, Encoding}
- ])]
- end,
-
- Doc = case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
- missing_rev -> % make the new doc
- if Method =/= 'DELETE' -> ok; true ->
- % check for the existence of the doc to handle the 404 case.
- couch_doc_open(Db, DocId, nil, [])
- end,
- couch_db:validate_docid(Db, DocId),
- #doc{id=DocId};
- Rev ->
- case couch_db:open_doc_revs(Db, DocId, [Rev], []) of
- {ok, [{ok, Doc0}]} -> Doc0;
- {ok, [{{not_found, missing}, Rev}]} -> throw(conflict);
- {ok, [Error]} -> throw(Error)
- end
- end,
-
- #doc{atts=Atts} = Doc,
- DocEdited = Doc#doc{
- atts = NewAtt ++ [A || A <- Atts, couch_att:fetch(name, A) /= FileName]
- },
-
- Headers = case Method of
- 'DELETE' ->
- [];
- _ ->
- [{"Location", absolute_uri(Req, "/" ++
- couch_util:url_encode(couch_db:name(Db)) ++ "/" ++
- couch_util:url_encode(DocId) ++ "/" ++
- couch_util:url_encode(FileName)
- )}]
- end,
- update_doc(Req, Db, DocId, DocEdited, Headers);
-
-db_attachment_req(Req, _Db, _DocId, _FileNameParts) ->
- send_method_not_allowed(Req, "DELETE,GET,HEAD,PUT").
-
-parse_ranges(undefined, _Len) ->
- undefined;
-parse_ranges(fail, _Len) ->
- undefined;
-parse_ranges(Ranges, Len) ->
- parse_ranges(Ranges, Len, []).
-
-parse_ranges([], _Len, Acc) ->
- lists:reverse(Acc);
-parse_ranges([{0, none}|_], _Len, _Acc) ->
- undefined;
-parse_ranges([{From, To}|_], _Len, _Acc) when is_integer(From) andalso is_integer(To) andalso To < From ->
- throw(requested_range_not_satisfiable);
-parse_ranges([{From, To}|Rest], Len, Acc) when is_integer(To) andalso To >= Len ->
- parse_ranges([{From, Len-1}] ++ Rest, Len, Acc);
-parse_ranges([{none, To}|Rest], Len, Acc) ->
- parse_ranges([{Len - To, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From, none}|Rest], Len, Acc) ->
- parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From,To}|Rest], Len, Acc) ->
- parse_ranges(Rest, Len, [{From, To}] ++ Acc).
-
-get_md5_header(Req) ->
- ContentMD5 = couch_httpd:header_value(Req, "Content-MD5"),
- Length = couch_httpd:body_length(Req),
- Trailer = couch_httpd:header_value(Req, "Trailer"),
- case {ContentMD5, Length, Trailer} of
- _ when is_list(ContentMD5) orelse is_binary(ContentMD5) ->
- base64:decode(ContentMD5);
- {_, chunked, undefined} ->
- <<>>;
- {_, chunked, _} ->
- case re:run(Trailer, "\\bContent-MD5\\b", [caseless]) of
- {match, _} ->
- md5_in_footer;
- _ ->
- <<>>
- end;
- _ ->
- <<>>
- end.
-
-parse_doc_query(Req) ->
- lists:foldl(fun({Key,Value}, Args) ->
- case {Key, Value} of
- {"attachments", "true"} ->
- Options = [attachments | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"meta", "true"} ->
- Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"revs", "true"} ->
- Options = [revs | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"local_seq", "true"} ->
- Options = [local_seq | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"revs_info", "true"} ->
- Options = [revs_info | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"conflicts", "true"} ->
- Options = [conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"deleted_conflicts", "true"} ->
- Options = [deleted_conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"rev", Rev} ->
- Args#doc_query_args{rev=couch_doc:parse_rev(Rev)};
- {"open_revs", "all"} ->
- Args#doc_query_args{open_revs=all};
- {"open_revs", RevsJsonStr} ->
- JsonArray = ?JSON_DECODE(RevsJsonStr),
- Args#doc_query_args{open_revs=couch_doc:parse_revs(JsonArray)};
- {"latest", "true"} ->
- Options = [latest | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"atts_since", RevsJsonStr} ->
- JsonArray = ?JSON_DECODE(RevsJsonStr),
- Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)};
- {"new_edits", "false"} ->
- Args#doc_query_args{update_type=replicated_changes};
- {"new_edits", "true"} ->
- Args#doc_query_args{update_type=interactive_edit};
- {"att_encoding_info", "true"} ->
- Options = [att_encoding_info | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- _Else -> % unknown key value pair, ignore.
- Args
- end
- end, #doc_query_args{}, couch_httpd:qs(Req)).
-
-parse_changes_query(Req, Db) ->
- ChangesArgs = lists:foldl(fun({Key, Value}, Args) ->
- case {string:to_lower(Key), Value} of
- {"feed", "live"} ->
- %% sugar for continuous
- Args#changes_args{feed="continuous"};
- {"feed", _} ->
- Args#changes_args{feed=Value};
- {"descending", "true"} ->
- Args#changes_args{dir=rev};
- {"since", "now"} ->
- UpdateSeq = couch_util:with_db(couch_db:name(Db), fun(WDb) ->
- couch_db:get_update_seq(WDb)
- end),
- Args#changes_args{since=UpdateSeq};
- {"since", _} ->
- Args#changes_args{since=list_to_integer(Value)};
- {"last-event-id", _} ->
- Args#changes_args{since=list_to_integer(Value)};
- {"limit", _} ->
- Args#changes_args{limit=list_to_integer(Value)};
- {"style", _} ->
- Args#changes_args{style=list_to_existing_atom(Value)};
- {"heartbeat", "true"} ->
- Args#changes_args{heartbeat=true};
- {"heartbeat", _} ->
- Args#changes_args{heartbeat=list_to_integer(Value)};
- {"timeout", _} ->
- Args#changes_args{timeout=list_to_integer(Value)};
- {"include_docs", "true"} ->
- Args#changes_args{include_docs=true};
- {"attachments", "true"} ->
- Opts = Args#changes_args.doc_options,
- Args#changes_args{doc_options=[attachments|Opts]};
- {"att_encoding_info", "true"} ->
- Opts = Args#changes_args.doc_options,
- Args#changes_args{doc_options=[att_encoding_info|Opts]};
- {"conflicts", "true"} ->
- Args#changes_args{conflicts=true};
- {"filter", _} ->
- Args#changes_args{filter=Value};
- _Else -> % unknown key value pair, ignore.
- Args
- end
- end, #changes_args{}, couch_httpd:qs(Req)),
- %% if it's an EventSource request with a Last-event-ID header
- %% that should override the `since` query string, since it's
- %% probably the browser reconnecting.
- case ChangesArgs#changes_args.feed of
- "eventsource" ->
- case couch_httpd:header_value(Req, "last-event-id") of
- undefined ->
- ChangesArgs;
- Value ->
- ChangesArgs#changes_args{since=list_to_integer(Value)}
- end;
- _ ->
- ChangesArgs
- end.
-
-extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)->
- extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
-extract_header_rev(Req, ExplicitRev) ->
- Etag = case couch_httpd:header_value(Req, "If-Match") of
- undefined -> undefined;
- Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
- end,
- case {ExplicitRev, Etag} of
- {undefined, undefined} -> missing_rev;
- {_, undefined} -> ExplicitRev;
- {undefined, _} -> Etag;
- _ when ExplicitRev == Etag -> Etag;
- _ ->
- throw({bad_request, "Document rev and etag have different values"})
- end.
-
-
-parse_copy_destination_header(Req) ->
- case couch_httpd:header_value(Req, "Destination") of
- undefined ->
- throw({bad_request, "Destination header is mandatory for COPY."});
- Destination ->
- case re:run(Destination, "^https?://", [{capture, none}]) of
- match ->
- throw({bad_request, "Destination URL must be relative."});
- nomatch ->
- % see if ?rev=revid got appended to the Destination header
- case re:run(Destination, "\\?", [{capture, none}]) of
- nomatch ->
- {list_to_binary(Destination), {0, []}};
- match ->
- [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
- [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
- {Pos, RevId} = couch_doc:parse_rev(Rev),
- {list_to_binary(DocId), {Pos, [RevId]}}
- end
- end
- end.
-
-validate_attachment_names(Doc) ->
- lists:foreach(fun(Att) ->
- Name = couch_att:fetch(name, Att),
- validate_attachment_name(Name)
- end, Doc#doc.atts).
-
-validate_attachment_name(Name) when is_list(Name) ->
- validate_attachment_name(list_to_binary(Name));
-validate_attachment_name(<<"_",_/binary>>) ->
- throw({bad_request, <<"Attachment name can't start with '_'">>});
-validate_attachment_name(Name) ->
- case couch_util:validate_utf8(Name) of
- true -> Name;
- false -> throw({bad_request, <<"Attachment name is not UTF-8 encoded">>})
- end.
diff --git a/src/couch/src/couch_httpd_misc_handlers.erl b/src/couch/src/couch_httpd_misc_handlers.erl
deleted file mode 100644
index 3d25f5853..000000000
--- a/src/couch/src/couch_httpd_misc_handlers.erl
+++ /dev/null
@@ -1,269 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_misc_handlers).
-
--export([handle_welcome_req/2,handle_favicon_req/2,handle_utils_dir_req/2,
- handle_all_dbs_req/1,
- handle_uuids_req/1,handle_config_req/1,
- handle_task_status_req/1, handle_file_req/2]).
-
-
--include_lib("couch/include/couch_db.hrl").
-
--import(couch_httpd,
- [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
- start_json_response/2,send_chunk/2,last_chunk/1,end_json_response/1,
- start_chunked_response/3, send_error/4]).
-
-% httpd global handlers
-
-handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) ->
- send_json(Req, {[
- {couchdb, WelcomeMessage},
- {uuid, couch_server:get_uuid()},
- {version, list_to_binary(couch_server:get_version())}
- ] ++ case config:get("vendor") of
- [] ->
- [];
- Properties ->
- [{vendor, {[{?l2b(K), ?l2b(V)} || {K, V} <- Properties]}}]
- end
- });
-handle_welcome_req(Req, _) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_favicon_req(#httpd{method='GET'}=Req, DocumentRoot) ->
- {{Year,Month,Day},Time} = erlang:universaltime(),
- OneYearFromNow = {{Year+1,Month,Day},Time},
- CachingHeaders = [
- %favicon should expire a year from now
- {"Cache-Control", "public, max-age=31536000"},
- {"Expires", couch_util:rfc1123_date(OneYearFromNow)}
- ],
- couch_httpd:serve_file(Req, "favicon.ico", DocumentRoot, CachingHeaders);
-
-handle_favicon_req(Req, _) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_file_req(#httpd{method='GET'}=Req, Document) ->
- couch_httpd:serve_file(Req, filename:basename(Document), filename:dirname(Document));
-
-handle_file_req(Req, _) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_utils_dir_req(Req, _) ->
- send_error(Req, 410, <<"no_node_local_fauxton">>,
- ?l2b("The web interface is no longer available on the node-local port.")).
-
-
-handle_all_dbs_req(#httpd{method='GET'}=Req) ->
- {ok, DbNames} = couch_server:all_databases(),
- send_json(Req, DbNames);
-handle_all_dbs_req(Req) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-
-handle_task_status_req(#httpd{method='GET'}=Req) ->
- ok = couch_httpd:verify_is_server_admin(Req),
- % convert the list of prop lists to a list of json objects
- send_json(Req, [{Props} || Props <- couch_task_status:all()]);
-handle_task_status_req(Req) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-
-handle_uuids_req(#httpd{method='GET'}=Req) ->
- Max = list_to_integer(config:get("uuids","max_count","1000")),
- Count = try list_to_integer(couch_httpd:qs_value(Req, "count", "1")) of
- N when N > Max ->
- throw({bad_request, <<"count parameter too large">>});
- N when N < 0 ->
- throw({bad_request, <<"count must be a positive integer">>});
- N -> N
- catch
- error:badarg ->
- throw({bad_request, <<"count must be a positive integer">>})
- end,
- UUIDs = [couch_uuids:new() || _ <- lists:seq(1, Count)],
- Etag = couch_httpd:make_etag(UUIDs),
- couch_httpd:etag_respond(Req, Etag, fun() ->
- CacheBustingHeaders = [
- {"Date", couch_util:rfc1123_date()},
- {"Cache-Control", "no-cache"},
- % Past date, ON PURPOSE!
- {"Expires", "Mon, 01 Jan 1990 00:00:00 GMT"},
- {"Pragma", "no-cache"},
- {"ETag", Etag}
- ],
- send_json(Req, 200, CacheBustingHeaders, {[{<<"uuids">>, UUIDs}]})
- end);
-handle_uuids_req(Req) ->
- send_method_not_allowed(Req, "GET").
-
-
-% Config request handler
-
-
-% GET /_config/
-% GET /_config
-handle_config_req(#httpd{method='GET', path_parts=[_]}=Req) ->
- ok = couch_httpd:verify_is_server_admin(Req),
- Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
- case dict:is_key(Section, Acc) of
- true ->
- dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
- false ->
- dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
- end
- end, dict:new(), config:all()),
- KVs = dict:fold(fun(Section, Values, Acc) ->
- [{list_to_binary(Section), {Values}} | Acc]
- end, [], Grouped),
- send_json(Req, 200, {KVs});
-% GET /_config/Section
-handle_config_req(#httpd{method='GET', path_parts=[_,Section]}=Req) ->
- ok = couch_httpd:verify_is_server_admin(Req),
- KVs = [{list_to_binary(Key), list_to_binary(Value)}
- || {Key, Value} <- config:get(Section)],
- send_json(Req, 200, {KVs});
-% GET /_config/Section/Key
-handle_config_req(#httpd{method='GET', path_parts=[_, Section, Key]}=Req) ->
- ok = couch_httpd:verify_is_server_admin(Req),
- case config:get(Section, Key, undefined) of
- undefined ->
- throw({not_found, unknown_config_value});
- Value ->
- send_json(Req, 200, list_to_binary(Value))
- end;
-% POST /_config/_reload - Flushes unpersisted config values from RAM
-handle_config_req(#httpd{method='POST', path_parts=[_, <<"_reload">>]}=Req) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- _ = couch_httpd:body(Req),
- ok = couch_httpd:verify_is_server_admin(Req),
- ok = config:reload(),
- send_json(Req, 200, {[{ok, true}]});
-% PUT or DELETE /_config/Section/Key
-handle_config_req(#httpd{method=Method, path_parts=[_, Section, Key]}=Req)
- when (Method == 'PUT') or (Method == 'DELETE') ->
- ok = couch_httpd:verify_is_server_admin(Req),
- couch_util:check_config_blacklist(Section),
- Persist = couch_httpd:header_value(Req, "X-Couch-Persist") /= "false",
- case config:get("httpd", "config_whitelist", undefined) of
- undefined ->
- % No whitelist; allow all changes.
- handle_approved_config_req(Req, Persist);
- WhitelistValue ->
- % Provide a failsafe to protect against inadvertently locking
- % onesself out of the config by supplying a syntactically-incorrect
- % Erlang term. To intentionally lock down the whitelist, supply a
- % well-formed list which does not include the whitelist config
- % variable itself.
- FallbackWhitelist = [{<<"httpd">>, <<"config_whitelist">>}],
-
- Whitelist = case couch_util:parse_term(WhitelistValue) of
- {ok, Value} when is_list(Value) ->
- Value;
- {ok, _NonListValue} ->
- FallbackWhitelist;
- {error, _} ->
- [{WhitelistSection, WhitelistKey}] = FallbackWhitelist,
- couch_log:error("Only whitelisting ~s/~s due to error"
- " parsing: ~p",
- [WhitelistSection, WhitelistKey,
- WhitelistValue]),
- FallbackWhitelist
- end,
-
- IsRequestedKeyVal = fun(Element) ->
- case Element of
- {A, B} ->
- % For readability, tuples may be used instead of binaries
- % in the whitelist.
- case {couch_util:to_binary(A), couch_util:to_binary(B)} of
- {Section, Key} ->
- true;
- {Section, <<"*">>} ->
- true;
- _Else ->
- false
- end;
- _Else ->
- false
- end
- end,
-
- case lists:any(IsRequestedKeyVal, Whitelist) of
- true ->
- % Allow modifying this whitelisted variable.
- handle_approved_config_req(Req, Persist);
- _NotWhitelisted ->
- % Disallow modifying this non-whitelisted variable.
- send_error(Req, 400, <<"modification_not_allowed">>,
- ?l2b("This config variable is read-only"))
- end
- end;
-handle_config_req(Req) ->
- send_method_not_allowed(Req, "GET,PUT,POST,DELETE").
-
-% PUT /_config/Section/Key
-% "value"
-handle_approved_config_req(Req, Persist) ->
- Query = couch_httpd:qs(Req),
- UseRawValue = case lists:keyfind("raw", 1, Query) of
- false -> false; % Not specified
- {"raw", ""} -> false; % Specified with no value, i.e. "?raw" and "?raw="
- {"raw", "false"} -> false;
- {"raw", "true"} -> true;
- {"raw", InvalidValue} -> InvalidValue
- end,
- handle_approved_config_req(Req, Persist, UseRawValue).
-
-handle_approved_config_req(#httpd{method='PUT', path_parts=[_, Section, Key]}=Req,
- Persist, UseRawValue)
- when UseRawValue =:= false orelse UseRawValue =:= true ->
- RawValue = couch_httpd:json_body(Req),
- Value = case UseRawValue of
- true ->
- % Client requests no change to the provided value.
- RawValue;
- false ->
- % Pre-process the value as necessary.
- case Section of
- <<"admins">> ->
- couch_passwords:hash_admin_password(RawValue);
- _ ->
- couch_util:trim(RawValue)
- end
- end,
- OldValue = config:get(Section, Key, ""),
- case config:set(Section, Key, ?b2l(Value), Persist) of
- ok ->
- send_json(Req, 200, list_to_binary(OldValue));
- Error ->
- throw(Error)
- end;
-
-handle_approved_config_req(#httpd{method='PUT'}=Req, _Persist, UseRawValue) ->
- Err = io_lib:format("Bad value for 'raw' option: ~s", [UseRawValue]),
- send_json(Req, 400, {[{error, ?l2b(Err)}]});
-
-% DELETE /_config/Section/Key
-handle_approved_config_req(#httpd{method='DELETE',path_parts=[_,Section,Key]}=Req,
- Persist, _UseRawValue) ->
- case config:get(Section, Key, undefined) of
- undefined ->
- throw({not_found, unknown_config_value});
- OldValue ->
- config:delete(Section, Key, Persist),
- send_json(Req, 200, list_to_binary(OldValue))
- end.
-
diff --git a/src/couch/src/couch_httpd_multipart.erl b/src/couch/src/couch_httpd_multipart.erl
index 33795a3a1..7e6e7d6c9 100644
--- a/src/couch/src/couch_httpd_multipart.erl
+++ b/src/couch/src/couch_httpd_multipart.erl
@@ -22,6 +22,7 @@
]).
-include_lib("couch/include/couch_db.hrl").
+-include_lib("kernel/include/logger.hrl").
decode_multipart_stream(ContentType, DataFun, Ref) ->
Parent = self(),
@@ -45,10 +46,20 @@ decode_multipart_stream(ContentType, DataFun, Ref) ->
{'DOWN', ParserRef, _, _, normal} ->
ok;
{'DOWN', ParserRef, process, Parser, {{nocatch, {Error, Msg}}, _}} ->
+ ?LOG_ERROR(#{
+ what => multipart_streamer_failure,
+ ref => ParserRef,
+ details => Msg
+ }),
couch_log:error("Multipart streamer ~p died with reason ~p",
[ParserRef, Msg]),
throw({Error, Msg});
{'DOWN', ParserRef, _, _, Reason} ->
+ ?LOG_ERROR(#{
+ what => multipart_streamer_failure,
+ ref => ParserRef,
+ details => Reason
+ }),
couch_log:error("Multipart streamer ~p died with reason ~p",
[ParserRef, Reason]),
throw({error, Reason})
diff --git a/src/couch/src/couch_httpd_rewrite.erl b/src/couch/src/couch_httpd_rewrite.erl
deleted file mode 100644
index 2845c0b16..000000000
--- a/src/couch/src/couch_httpd_rewrite.erl
+++ /dev/null
@@ -1,484 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% bind_path is based on bind method from Webmachine
-
-
-%% @doc Module for URL rewriting by pattern matching.
-
--module(couch_httpd_rewrite).
-
--compile(tuple_calls).
-
--export([handle_rewrite_req/3]).
--include_lib("couch/include/couch_db.hrl").
-
--define(SEPARATOR, $\/).
--define(MATCH_ALL, {bind, <<"*">>}).
-
-
-%% doc The http rewrite handler. All rewriting is done from
-%% /dbname/_design/ddocname/_rewrite by default.
-%%
-%% each rules should be in rewrites member of the design doc.
-%% Ex of a complete rule :
-%%
-%% {
-%% ....
-%% "rewrites": [
-%% {
-%% "from": "",
-%% "to": "index.html",
-%% "method": "GET",
-%% "query": {}
-%% }
-%% ]
-%% }
-%%
-%% from: is the path rule used to bind current uri to the rule. It
-%% use pattern matching for that.
-%%
-%% to: rule to rewrite an url. It can contain variables depending on binding
-%% variables discovered during pattern matching and query args (url args and from
-%% the query member.)
-%%
-%% method: method to bind the request method to the rule. by default "*"
-%% query: query args you want to define they can contain dynamic variable
-%% by binding the key to the bindings
-%%
-%%
-%% to and from are path with patterns. pattern can be string starting with ":" or
-%% "*". ex:
-%% /somepath/:var/*
-%%
-%% This path is converted in erlang list by splitting "/". Each var are
-%% converted in atom. "*" is converted to '*' atom. The pattern matching is done
-%% by splitting "/" in request url in a list of token. A string pattern will
-%% match equal token. The star atom ('*' in single quotes) will match any number
-%% of tokens, but may only be present as the last pathtern in a pathspec. If all
-%% tokens are matched and all pathterms are used, then the pathspec matches. It works
-%% like webmachine. Each identified token will be reused in to rule and in query
-%%
-%% The pattern matching is done by first matching the request method to a rule. by
-%% default all methods match a rule. (method is equal to "*" by default). Then
-%% It will try to match the path to one rule. If no rule match, then a 404 error
-%% is displayed.
-%%
-%% Once a rule is found we rewrite the request url using the "to" and
-%% "query" members. The identified token are matched to the rule and
-%% will replace var. if '*' is found in the rule it will contain the remaining
-%% part if it exists.
-%%
-%% Examples:
-%%
-%% Dispatch rule URL TO Tokens
-%%
-%% {"from": "/a/b", /a/b?k=v /some/b?k=v var =:= b
-%% "to": "/some/"} k = v
-%%
-%% {"from": "/a/b", /a/b /some/b?var=b var =:= b
-%% "to": "/some/:var"}
-%%
-%% {"from": "/a", /a /some
-%% "to": "/some/*"}
-%%
-%% {"from": "/a/*", /a/b/c /some/b/c
-%% "to": "/some/*"}
-%%
-%% {"from": "/a", /a /some
-%% "to": "/some/*"}
-%%
-%% {"from": "/a/:foo/*", /a/b/c /some/b/c?foo=b foo =:= b
-%% "to": "/some/:foo/*"}
-%%
-%% {"from": "/a/:foo", /a/b /some/?k=b&foo=b foo =:= b
-%% "to": "/some",
-%% "query": {
-%% "k": ":foo"
-%% }}
-%%
-%% {"from": "/a", /a?foo=b /some/b foo =:= b
-%% "to": "/some/:foo",
-%% }}
-
-
-
-handle_rewrite_req(#httpd{
- path_parts=[DbName, <<"_design">>, DesignName, _Rewrite|PathParts],
- method=Method,
- mochi_req=MochiReq}=Req, _Db, DDoc) ->
-
- % we are in a design handler
- DesignId = <<"_design/", DesignName/binary>>,
- Prefix = <<"/", (?l2b(couch_util:url_encode(DbName)))/binary, "/", DesignId/binary>>,
- QueryList = lists:map(fun decode_query_value/1, couch_httpd:qs(Req)),
-
- RewritesSoFar = erlang:get(?REWRITE_COUNT),
- MaxRewrites = list_to_integer(config:get("httpd", "rewrite_limit", "100")),
- case RewritesSoFar >= MaxRewrites of
- true ->
- throw({bad_request, <<"Exceeded rewrite recursion limit">>});
- false ->
- erlang:put(?REWRITE_COUNT, RewritesSoFar + 1)
- end,
-
- #doc{body={Props}} = DDoc,
-
- % get rules from ddoc
- case couch_util:get_value(<<"rewrites">>, Props) of
- undefined ->
- couch_httpd:send_error(Req, 404, <<"rewrite_error">>,
- <<"Invalid path.">>);
- Bin when is_binary(Bin) ->
- couch_httpd:send_error(Req, 400, <<"rewrite_error">>,
- <<"Rewrite rules are a String. They must be a JSON Array.">>);
- Rules ->
- % create dispatch list from rules
- DispatchList = [make_rule(Rule) || {Rule} <- Rules],
- Method1 = couch_util:to_binary(Method),
-
- % get raw path by matching url to a rule. Throws not_found.
- {NewPathParts0, Bindings0} =
- try_bind_path(DispatchList, Method1, PathParts, QueryList),
- NewPathParts = [quote_plus(X) || X <- NewPathParts0],
- Bindings = maybe_encode_bindings(Bindings0),
-
- Path0 = string:join(NewPathParts, [?SEPARATOR]),
-
- % if path is relative detect it and rewrite path
- Path1 = case mochiweb_util:safe_relative_path(Path0) of
- undefined ->
- ?b2l(Prefix) ++ "/" ++ Path0;
- P1 ->
- ?b2l(Prefix) ++ "/" ++ P1
- end,
-
- Path2 = normalize_path(Path1),
-
- Path3 = case Bindings of
- [] ->
- Path2;
- _ ->
- [Path2, "?", mochiweb_util:urlencode(Bindings)]
- end,
-
- RawPath1 = ?b2l(iolist_to_binary(Path3)),
-
- % In order to do OAuth correctly, we have to save the
- % requested path. We use default so chained rewriting
- % wont replace the original header.
- Headers = mochiweb_headers:default("x-couchdb-requested-path",
- MochiReq:get(raw_path),
- MochiReq:get(headers)),
-
- couch_log:debug("rewrite to ~p ~n", [RawPath1]),
-
- % build a new mochiweb request
- MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
- MochiReq:get(method),
- RawPath1,
- MochiReq:get(version),
- Headers),
-
- % cleanup, It force mochiweb to reparse raw uri.
- MochiReq1:cleanup(),
-
- #httpd{
- db_url_handlers = DbUrlHandlers,
- design_url_handlers = DesignUrlHandlers,
- default_fun = DefaultFun,
- url_handlers = UrlHandlers,
- user_ctx = UserCtx,
- auth = Auth
- } = Req,
-
- erlang:put(pre_rewrite_auth, Auth),
- erlang:put(pre_rewrite_user_ctx, UserCtx),
- couch_httpd:handle_request_int(MochiReq1, DefaultFun,
- UrlHandlers, DbUrlHandlers, DesignUrlHandlers)
- end.
-
-quote_plus({bind, X}) ->
- mochiweb_util:quote_plus(X);
-quote_plus(X) ->
- mochiweb_util:quote_plus(X).
-
-%% @doc Try to find a rule matching current url. If none is found
-%% 404 error not_found is raised
-try_bind_path([], _Method, _PathParts, _QueryList) ->
- throw(not_found);
-try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
- [{PathParts1, Method1}, RedirectPath, QueryArgs, Formats] = Dispatch,
- case bind_method(Method1, Method) of
- true ->
- case bind_path(PathParts1, PathParts, []) of
- {ok, Remaining, Bindings} ->
- Bindings1 = Bindings ++ QueryList,
- % we parse query args from the rule and fill
- % it eventually with bindings vars
- QueryArgs1 = make_query_list(QueryArgs, Bindings1,
- Formats, []),
- % remove params in QueryLists1 that are already in
- % QueryArgs1
- Bindings2 = lists:foldl(fun({K, V}, Acc) ->
- K1 = to_binding(K),
- KV = case couch_util:get_value(K1, QueryArgs1) of
- undefined -> [{K1, V}];
- _V1 -> []
- end,
- Acc ++ KV
- end, [], Bindings1),
-
- FinalBindings = Bindings2 ++ QueryArgs1,
- NewPathParts = make_new_path(RedirectPath, FinalBindings,
- Remaining, []),
- {NewPathParts, FinalBindings};
- fail ->
- try_bind_path(Rest, Method, PathParts, QueryList)
- end;
- false ->
- try_bind_path(Rest, Method, PathParts, QueryList)
- end.
-
-%% rewriting dynamically the quey list given as query member in
-%% rewrites. Each value is replaced by one binding or an argument
-%% passed in url.
-make_query_list([], _Bindings, _Formats, Acc) ->
- Acc;
-make_query_list([{Key, {Value}}|Rest], Bindings, Formats, Acc) ->
- Value1 = {Value},
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_binary(Value) ->
- Value1 = replace_var(Value, Bindings, Formats),
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_list(Value) ->
- Value1 = replace_var(Value, Bindings, Formats),
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) ->
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value}|Acc]).
-
-replace_var(<<"*">>=Value, Bindings, Formats) ->
- get_var(Value, Bindings, Value, Formats);
-replace_var(<<":", Var/binary>> = Value, Bindings, Formats) ->
- get_var(Var, Bindings, Value, Formats);
-replace_var(Value, _Bindings, _Formats) when is_binary(Value) ->
- Value;
-replace_var(Value, Bindings, Formats) when is_list(Value) ->
- lists:reverse(lists:foldl(fun
- (<<":", Var/binary>>=Value1, Acc) ->
- [get_var(Var, Bindings, Value1, Formats)|Acc];
- (Value1, Acc) ->
- [Value1|Acc]
- end, [], Value));
-replace_var(Value, _Bindings, _Formats) ->
- Value.
-
-maybe_json(Key, Value) ->
- case lists:member(Key, [<<"key">>, <<"startkey">>, <<"start_key">>,
- <<"endkey">>, <<"end_key">>, <<"keys">>]) of
- true ->
- ?JSON_ENCODE(Value);
- false ->
- Value
- end.
-
-get_var(VarName, Props, Default, Formats) ->
- VarName1 = to_binding(VarName),
- Val = couch_util:get_value(VarName1, Props, Default),
- maybe_format(VarName, Val, Formats).
-
-maybe_format(VarName, Value, Formats) ->
- case couch_util:get_value(VarName, Formats) of
- undefined ->
- Value;
- Format ->
- format(Format, Value)
- end.
-
-format(<<"int">>, Value) when is_integer(Value) ->
- Value;
-format(<<"int">>, Value) when is_binary(Value) ->
- format(<<"int">>, ?b2l(Value));
-format(<<"int">>, Value) when is_list(Value) ->
- case (catch list_to_integer(Value)) of
- IntVal when is_integer(IntVal) ->
- IntVal;
- _ ->
- Value
- end;
-format(<<"bool">>, Value) when is_binary(Value) ->
- format(<<"bool">>, ?b2l(Value));
-format(<<"bool">>, Value) when is_list(Value) ->
- case string:to_lower(Value) of
- "true" -> true;
- "false" -> false;
- _ -> Value
- end;
-format(_Format, Value) ->
- Value.
-
-%% doc: build new patch from bindings. bindings are query args
-%% (+ dynamic query rewritten if needed) and bindings found in
-%% bind_path step.
-make_new_path([], _Bindings, _Remaining, Acc) ->
- lists:reverse(Acc);
-make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
- Acc1 = lists:reverse(Acc) ++ Remaining,
- Acc1;
-make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
- Acc1 = lists:reverse(Acc) ++ Remaining,
- Acc1;
-make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) ->
- P2 = case couch_util:get_value({bind, P}, Bindings) of
- undefined -> << "undefined">>;
- P1 ->
- iolist_to_binary(P1)
- end,
- make_new_path(Rest, Bindings, Remaining, [P2|Acc]);
-make_new_path([P|Rest], Bindings, Remaining, Acc) ->
- make_new_path(Rest, Bindings, Remaining, [P|Acc]).
-
-
-%% @doc If method of the query fith the rule method. If the
-%% method rule is '*', which is the default, all
-%% request method will bind. It allows us to make rules
-%% depending on HTTP method.
-bind_method(?MATCH_ALL, _Method ) ->
- true;
-bind_method({bind, Method}, Method) ->
- true;
-bind_method(_, _) ->
- false.
-
-
-%% @doc bind path. Using the rule from we try to bind variables given
-%% to the current url by pattern matching
-bind_path([], [], Bindings) ->
- {ok, [], Bindings};
-bind_path([?MATCH_ALL], [Match|_RestMatch]=Rest, Bindings) ->
- {ok, Rest, [{?MATCH_ALL, Match}|Bindings]};
-bind_path(_, [], _) ->
- fail;
-bind_path([{bind, Token}|RestToken],[Match|RestMatch],Bindings) ->
- bind_path(RestToken, RestMatch, [{{bind, Token}, Match}|Bindings]);
-bind_path([Token|RestToken], [Token|RestMatch], Bindings) ->
- bind_path(RestToken, RestMatch, Bindings);
-bind_path(_, _, _) ->
- fail.
-
-
-%% normalize path.
-normalize_path(Path) ->
- "/" ++ string:join(normalize_path1(string:tokens(Path,
- "/"), []), [?SEPARATOR]).
-
-
-normalize_path1([], Acc) ->
- lists:reverse(Acc);
-normalize_path1([".."|Rest], Acc) ->
- Acc1 = case Acc of
- [] -> [".."|Acc];
- [T|_] when T =:= ".." -> [".."|Acc];
- [_|R] -> R
- end,
- normalize_path1(Rest, Acc1);
-normalize_path1(["."|Rest], Acc) ->
- normalize_path1(Rest, Acc);
-normalize_path1([Path|Rest], Acc) ->
- normalize_path1(Rest, [Path|Acc]).
-
-
-%% @doc transform json rule in erlang for pattern matching
-make_rule(Rule) ->
- Method = case couch_util:get_value(<<"method">>, Rule) of
- undefined -> ?MATCH_ALL;
- M -> to_binding(M)
- end,
- QueryArgs = case couch_util:get_value(<<"query">>, Rule) of
- undefined -> [];
- {Args} -> Args
- end,
- FromParts = case couch_util:get_value(<<"from">>, Rule) of
- undefined -> [?MATCH_ALL];
- From ->
- parse_path(From)
- end,
- ToParts = case couch_util:get_value(<<"to">>, Rule) of
- undefined ->
- throw({error, invalid_rewrite_target});
- To ->
- parse_path(To)
- end,
- Formats = case couch_util:get_value(<<"formats">>, Rule) of
- undefined -> [];
- {Fmts} -> Fmts
- end,
- [{FromParts, Method}, ToParts, QueryArgs, Formats].
-
-parse_path(Path) ->
- {ok, SlashRE} = re:compile(<<"\\/">>),
- path_to_list(re:split(Path, SlashRE), [], 0).
-
-%% @doc convert a path rule (from or to) to an erlang list
-%% * and path variable starting by ":" are converted
-%% in erlang atom.
-path_to_list([], Acc, _DotDotCount) ->
- lists:reverse(Acc);
-path_to_list([<<>>|R], Acc, DotDotCount) ->
- path_to_list(R, Acc, DotDotCount);
-path_to_list([<<"*">>|R], Acc, DotDotCount) ->
- path_to_list(R, [?MATCH_ALL|Acc], DotDotCount);
-path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 ->
- case config:get("httpd", "secure_rewrites", "true") of
- "false" ->
- path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
- _Else ->
- couch_log:info("insecure_rewrite_rule ~p blocked",
- [lists:reverse(Acc) ++ [<<"..">>] ++ R]),
- throw({insecure_rewrite_rule, "too many ../.. segments"})
- end;
-path_to_list([<<"..">>|R], Acc, DotDotCount) ->
- path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-path_to_list([P|R], Acc, DotDotCount) ->
- P1 = case P of
- <<":", Var/binary>> ->
- to_binding(Var);
- _ -> P
- end,
- path_to_list(R, [P1|Acc], DotDotCount).
-
-maybe_encode_bindings([]) ->
- [];
-maybe_encode_bindings(Props) ->
- lists:foldl(fun
- ({{bind, <<"*">>}, _V}, Acc) ->
- Acc;
- ({{bind, K}, V}, Acc) ->
- V1 = iolist_to_binary(maybe_json(K, V)),
- [{K, V1}|Acc]
- end, [], Props).
-
-decode_query_value({K,V}) ->
- case lists:member(K, ["key", "startkey", "start_key",
- "endkey", "end_key", "keys"]) of
- true ->
- {to_binding(K), ?JSON_DECODE(V)};
- false ->
- {to_binding(K), ?l2b(V)}
- end.
-
-to_binding({bind, V}) ->
- {bind, V};
-to_binding(V) when is_list(V) ->
- to_binding(?l2b(V));
-to_binding(V) ->
- {bind, V}.
diff --git a/src/couch/src/couch_httpd_vhost.erl b/src/couch/src/couch_httpd_vhost.erl
index 574dba9c8..3d6b4da01 100644
--- a/src/couch/src/couch_httpd_vhost.erl
+++ b/src/couch/src/couch_httpd_vhost.erl
@@ -27,6 +27,7 @@
-export([handle_config_change/5, handle_config_terminate/3]).
-include_lib("couch/include/couch_db.hrl").
+-include_lib("kernel/include/logger.hrl").
-define(SEPARATOR, $\/).
-define(MATCH_ALL, {bind, '*'}).
@@ -146,6 +147,11 @@ redirect_to_vhost(MochiReq, VhostTarget) ->
Path = MochiReq:get(raw_path),
Target = append_path(VhostTarget, Path),
+ ?LOG_DEBUG(#{
+ what => vhost_redirect,
+ raw_path => Path,
+ target => Target
+ }),
couch_log:debug("Vhost Target: '~p'~n", [Target]),
Headers = mochiweb_headers:enter("x-couchdb-vhost-path", Path,
diff --git a/src/couch/src/couch_lru.erl b/src/couch/src/couch_lru.erl
deleted file mode 100644
index a3057136f..000000000
--- a/src/couch/src/couch_lru.erl
+++ /dev/null
@@ -1,67 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_lru).
--export([new/0, sizes/1, insert/2, update/2, close/1]).
-
--include("couch_server_int.hrl").
-
-new() ->
- {gb_trees:empty(), dict:new()}.
-
-sizes({Tree, Dict}) ->
- {gb_trees:size(Tree), dict:size(Dict)}.
-
-insert(DbName, {Tree0, Dict0}) ->
- Lru = couch_util:unique_monotonic_integer(),
- {gb_trees:insert(Lru, DbName, Tree0), dict:store(DbName, Lru, Dict0)}.
-
-update(DbName, {Tree0, Dict0}) ->
- case dict:find(DbName, Dict0) of
- {ok, Old} ->
- New = couch_util:unique_monotonic_integer(),
- Tree = gb_trees:insert(New, DbName, gb_trees:delete(Old, Tree0)),
- Dict = dict:store(DbName, New, Dict0),
- {Tree, Dict};
- error ->
- % We closed this database before processing the update. Ignore
- {Tree0, Dict0}
- end.
-
-%% Attempt to close the oldest idle database.
-close({Tree, _} = Cache) ->
- close_int(gb_trees:next(gb_trees:iterator(Tree)), Cache).
-
-%% internals
-
-close_int(none, _) ->
- false;
-close_int({Lru, DbName, Iter}, {Tree, Dict} = Cache) ->
- case ets:update_element(couch_dbs, DbName, {#entry.lock, locked}) of
- true ->
- [#entry{db = Db, pid = Pid}] = ets:lookup(couch_dbs, DbName),
- case couch_db:is_idle(Db) of true ->
- true = ets:delete(couch_dbs, DbName),
- true = ets:delete(couch_dbs_pid_to_name, Pid),
- exit(Pid, kill),
- {true, {gb_trees:delete(Lru, Tree), dict:erase(DbName, Dict)}};
- false ->
- ElemSpec = {#entry.lock, unlocked},
- true = ets:update_element(couch_dbs, DbName, ElemSpec),
- couch_stats:increment_counter([couchdb, couch_server, lru_skip]),
- close_int(gb_trees:next(Iter), update(DbName, Cache))
- end;
- false ->
- NewTree = gb_trees:delete(Lru, Tree),
- NewIter = gb_trees:iterator(NewTree),
- close_int(gb_trees:next(NewIter), {NewTree, dict:erase(DbName, Dict)})
-end.
diff --git a/src/couch/src/couch_multidb_changes.erl b/src/couch/src/couch_multidb_changes.erl
deleted file mode 100644
index 09278656e..000000000
--- a/src/couch/src/couch_multidb_changes.erl
+++ /dev/null
@@ -1,903 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_multidb_changes).
-
--behaviour(gen_server).
-
--export([
- start_link/4
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3,
- format_status/2
-]).
-
--export([
- changes_reader/3,
- changes_reader_cb/3
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--define(CTX, {user_ctx, #user_ctx{roles=[<<"_admin">>, <<"_replicator">>]}}).
-
--define(AVG_DELAY_MSEC, 10).
--define(MAX_DELAY_MSEC, 120000).
-
--record(state, {
- tid :: ets:tid(),
- mod :: atom(),
- ctx :: term(),
- suffix :: binary(),
- event_server :: reference(),
- scanner :: nil | pid(),
- pids :: [{binary(), pid()}],
- skip_ddocs :: boolean()
-}).
-
-% Behavior API
-
-% For each db shard with a matching suffix, report created,
-% deleted, found (discovered) and change events.
-
--callback db_created(DbName :: binary(), Context :: term()) ->
- Context :: term().
-
--callback db_deleted(DbName :: binary(), Context :: term()) ->
- Context :: term().
-
--callback db_found(DbName :: binary(), Context :: term()) ->
- Context :: term().
-
--callback db_change(DbName :: binary(), Change :: term(), Context :: term()) ->
- Context :: term().
-
-
-% External API
-
-
-% Opts list can contain:
-% - `skip_ddocs` : Skip design docs
-
--spec start_link(binary(), module(), term(), list()) ->
- {ok, pid()} | ignore | {error, term()}.
-start_link(DbSuffix, Module, Context, Opts) when
- is_binary(DbSuffix), is_atom(Module), is_list(Opts) ->
- gen_server:start_link(?MODULE, [DbSuffix, Module, Context, Opts], []).
-
-
-% gen_server callbacks
-
-init([DbSuffix, Module, Context, Opts]) ->
- process_flag(trap_exit, true),
- Server = self(),
- {ok, #state{
- tid = ets:new(?MODULE, [set, protected]),
- mod = Module,
- ctx = Context,
- suffix = DbSuffix,
- event_server = register_with_event_server(Server),
- scanner = spawn_link(fun() -> scan_all_dbs(Server, DbSuffix) end),
- pids = [],
- skip_ddocs = proplists:is_defined(skip_ddocs, Opts)
- }}.
-
-
-terminate(_Reason, _State) ->
- ok.
-
-
-handle_call({change, DbName, Change}, _From,
- #state{skip_ddocs=SkipDDocs, mod=Mod, ctx=Ctx} = State) ->
- case {SkipDDocs, is_design_doc(Change)} of
- {true, true} ->
- {reply, ok, State};
- {_, _} ->
- {reply, ok, State#state{ctx=Mod:db_change(DbName, Change, Ctx)}}
- end;
-
-handle_call({checkpoint, DbName, EndSeq}, _From, #state{tid=Ets} = State) ->
- case ets:lookup(Ets, DbName) of
- [] ->
- true = ets:insert(Ets, {DbName, EndSeq, false});
- [{DbName, _OldSeq, Rescan}] ->
- true = ets:insert(Ets, {DbName, EndSeq, Rescan})
- end,
- {reply, ok, State}.
-
-
-handle_cast({resume_scan, DbName}, State) ->
- {noreply, resume_scan(DbName, State)}.
-
-
-handle_info({'$couch_event', DbName, Event}, #state{suffix = Suf} = State) ->
- case Suf =:= couch_db:dbname_suffix(DbName) of
- true ->
- {noreply, db_callback(Event, DbName, State)};
- _ ->
- {noreply, State}
- end;
-
-handle_info({'DOWN', Ref, _, _, Info}, #state{event_server = Ref} = State) ->
- {stop, {couch_event_server_died, Info}, State};
-
-handle_info({'EXIT', From, normal}, #state{scanner = From} = State) ->
- {noreply, State#state{scanner=nil}};
-
-handle_info({'EXIT', From, Reason}, #state{scanner = From} = State) ->
- {stop, {scanner_died, Reason}, State};
-
-handle_info({'EXIT', From, Reason}, #state{pids = Pids} = State) ->
- couch_log:debug("~p change feed exited ~p", [State#state.suffix, From]),
- case lists:keytake(From, 2, Pids) of
- {value, {DbName, From}, NewPids} ->
- if Reason == normal -> ok; true ->
- Fmt = "~s : Known change feed ~w died :: ~w",
- couch_log:error(Fmt, [?MODULE, From, Reason])
- end,
- NewState = State#state{pids = NewPids},
- case ets:lookup(State#state.tid, DbName) of
- [{DbName, _EndSeq, true}] ->
- {noreply, resume_scan(DbName, NewState)};
- _ ->
- {noreply, NewState}
- end;
- false when Reason == normal ->
- {noreply, State};
- false ->
- Fmt = "~s(~p) : Unknown pid ~w died :: ~w",
- couch_log:error(Fmt, [?MODULE, State#state.suffix, From, Reason]),
- {stop, {unexpected_exit, From, Reason}, State}
- end;
-
-handle_info(_Msg, State) ->
- {noreply, State}.
-
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-
-format_status(_Opt, [_PDict, State]) ->
- #state{
- pids=Pids
- } = State,
- Scrubbed = State#state{
- pids={length, length(Pids)}
- },
- [{data, [{"State",
- ?record_to_keyval(state, Scrubbed)
- }]}].
-
-% Private functions
-
--spec register_with_event_server(pid()) -> reference().
-register_with_event_server(Server) ->
- Ref = erlang:monitor(process, couch_event_server),
- couch_event:register_all(Server),
- Ref.
-
-
--spec db_callback(created | deleted | updated, binary(), #state{}) -> #state{}.
-db_callback(created, DbName, #state{mod = Mod, ctx = Ctx} = State) ->
- NewState = State#state{ctx = Mod:db_created(DbName, Ctx)},
- resume_scan(DbName, NewState);
-db_callback(deleted, DbName, #state{mod = Mod, ctx = Ctx} = State) ->
- State#state{ctx = Mod:db_deleted(DbName, Ctx)};
-db_callback(updated, DbName, State) ->
- resume_scan(DbName, State);
-db_callback(_Other, _DbName, State) ->
- State.
-
-
--spec resume_scan(binary(), #state{}) -> #state{}.
-resume_scan(DbName, #state{pids=Pids, tid=Ets} = State) ->
- case {lists:keyfind(DbName, 1, Pids), ets:lookup(Ets, DbName)} of
- {{DbName, _}, []} ->
- % Found existing change feed, but not entry in ETS
- % Flag a need to rescan from begining
- true = ets:insert(Ets, {DbName, 0, true}),
- State;
- {{DbName, _}, [{DbName, EndSeq, _}]} ->
- % Found existing change feed and entry in ETS
- % Flag a need to rescan from last ETS checkpoint
- true = ets:insert(Ets, {DbName, EndSeq, true}),
- State;
- {false, []} ->
- % No existing change feed running. No entry in ETS.
- % This is first time seeing this db shard.
- % Notify user with a found callback. Insert checkpoint
- % entry in ETS to start from 0. And start a change feed.
- true = ets:insert(Ets, {DbName, 0, false}),
- Mod = State#state.mod,
- Ctx = Mod:db_found(DbName, State#state.ctx),
- Pid = start_changes_reader(DbName, 0),
- State#state{ctx=Ctx, pids=[{DbName, Pid} | Pids]};
- {false, [{DbName, EndSeq, _}]} ->
- % No existing change feed running. Found existing checkpoint.
- % Start a new change reader from last checkpoint.
- true = ets:insert(Ets, {DbName, EndSeq, false}),
- Pid = start_changes_reader(DbName, EndSeq),
- State#state{pids=[{DbName, Pid} | Pids]}
- end.
-
-
-start_changes_reader(DbName, Since) ->
- spawn_link(?MODULE, changes_reader, [self(), DbName, Since]).
-
-
-changes_reader(Server, DbName, Since) ->
- {ok, Db} = couch_db:open_int(DbName, [?CTX, sys_db]),
- ChangesArgs = #changes_args{
- include_docs = true,
- since = Since,
- feed = "normal",
- timeout = infinity
- },
- ChFun = couch_changes:handle_db_changes(ChangesArgs, {json_req, null}, Db),
- ChFun({fun ?MODULE:changes_reader_cb/3, {Server, DbName}}).
-
-
-changes_reader_cb({change, Change, _}, _, {Server, DbName}) ->
- ok = gen_server:call(Server, {change, DbName, Change}, infinity),
- {Server, DbName};
-changes_reader_cb({stop, EndSeq}, _, {Server, DbName}) ->
- ok = gen_server:call(Server, {checkpoint, DbName, EndSeq}, infinity),
- {Server, DbName};
-changes_reader_cb(_, _, Acc) ->
- Acc.
-
-
-scan_all_dbs(Server, DbSuffix) when is_pid(Server) ->
- ok = scan_local_db(Server, DbSuffix),
- {ok, Db} = mem3_util:ensure_exists(
- config:get("mem3", "shards_db", "_dbs")),
- ChangesFun = couch_changes:handle_db_changes(#changes_args{}, nil, Db),
- ChangesFun({fun scan_changes_cb/3, {Server, DbSuffix, 1}}),
- couch_db:close(Db).
-
-
-scan_changes_cb({change, {Change}, _}, _, {_Server, DbSuffix, _Count} = Acc) ->
- DbName = couch_util:get_value(<<"id">>, Change),
- case DbName of <<"_design/", _/binary>> -> Acc; _Else ->
- NameMatch = DbSuffix =:= couch_db:dbname_suffix(DbName),
- case {NameMatch, couch_replicator_utils:is_deleted(Change)} of
- {false, _} ->
- Acc;
- {true, true} ->
- Acc;
- {true, false} ->
- Shards = local_shards(DbName),
- lists:foldl(fun notify_fold/2, Acc, Shards)
- end
- end;
-scan_changes_cb(_, _, Acc) ->
- Acc.
-
-
-local_shards(DbName) ->
- try
- [ShardName || #shard{name = ShardName} <- mem3:local_shards(DbName)]
- catch
- error:database_does_not_exist ->
- []
- end.
-
-
-notify_fold(DbName, {Server, DbSuffix, Count}) ->
- Jitter = jitter(Count),
- spawn_link(fun() ->
- timer:sleep(Jitter),
- gen_server:cast(Server, {resume_scan, DbName})
- end),
- {Server, DbSuffix, Count + 1}.
-
-
-% Jitter is proportional to the number of shards found so far. This is done to
-% avoid a stampede and notifying the callback function with potentially a large
-% number of shards back to back during startup.
-jitter(N) ->
- Range = min(2 * N * ?AVG_DELAY_MSEC, ?MAX_DELAY_MSEC),
- couch_rand:uniform(Range).
-
-
-scan_local_db(Server, DbSuffix) when is_pid(Server) ->
- case couch_db:open_int(DbSuffix, [?CTX, sys_db, nologifmissing]) of
- {ok, Db} ->
- gen_server:cast(Server, {resume_scan, DbSuffix}),
- ok = couch_db:close(Db);
- _Error ->
- ok
- end.
-
-
-is_design_doc({Change}) ->
- case lists:keyfind(<<"id">>, 1, Change) of
- false ->
- false;
- {_, Id} ->
- is_design_doc_id(Id)
- end.
-
-
-is_design_doc_id(<<?DESIGN_DOC_PREFIX, _/binary>>) ->
- true;
-is_design_doc_id(_) ->
- false.
-
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_eunit.hrl").
-
--define(MOD, multidb_test_module).
--define(SUFFIX, <<"suff">>).
--define(DBNAME, <<"shards/40000000-5fffffff/acct/suff.0123456789">>).
-
-couch_multidb_changes_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_handle_call_change(),
- t_handle_call_change_filter_design_docs(),
- t_handle_call_checkpoint_new(),
- t_handle_call_checkpoint_existing(),
- t_handle_info_created(),
- t_handle_info_deleted(),
- t_handle_info_updated(),
- t_handle_info_other_event(),
- t_handle_info_created_other_db(),
- t_handle_info_scanner_exit_normal(),
- t_handle_info_scanner_crashed(),
- t_handle_info_event_server_exited(),
- t_handle_info_unknown_pid_exited(),
- t_handle_info_change_feed_exited(),
- t_handle_info_change_feed_exited_and_need_rescan(),
- t_spawn_changes_reader(),
- t_changes_reader_cb_change(),
- t_changes_reader_cb_stop(),
- t_changes_reader_cb_other(),
- t_handle_call_resume_scan_no_chfeed_no_ets_entry(),
- t_handle_call_resume_scan_chfeed_no_ets_entry(),
- t_handle_call_resume_scan_chfeed_ets_entry(),
- t_handle_call_resume_scan_no_chfeed_ets_entry(),
- t_start_link(),
- t_start_link_no_ddocs(),
- t_misc_gen_server_callbacks()
- ]
- }
- }.
-
-
-setup_all() ->
- mock_logs(),
- mock_callback_mod(),
- meck:expect(couch_event, register_all, 1, ok),
- meck:expect(config, get, ["mem3", "shards_db", '_'], "_dbs"),
- meck:expect(mem3_util, ensure_exists, 1, {ok, dbs}),
- ChangesFun = meck:val(fun(_) -> ok end),
- meck:expect(couch_changes, handle_db_changes, 3, ChangesFun),
- meck:expect(couch_db, open_int,
- fun(?DBNAME, [?CTX, sys_db]) -> {ok, db};
- (_, _) -> {not_found, no_db_file}
- end),
- meck:expect(couch_db, close, 1, ok),
- mock_changes_reader(),
- % create process to stand in for couch_event_server
- % mocking erlang:monitor doesn't work, so give it real process to monitor
- EvtPid = spawn_link(fun() -> receive looper -> ok end end),
- true = register(couch_event_server, EvtPid),
- EvtPid.
-
-
-teardown_all(EvtPid) ->
- unlink(EvtPid),
- exit(EvtPid, kill),
- meck:unload().
-
-
-setup() ->
- meck:reset([
- ?MOD,
- couch_changes,
- couch_db,
- couch_event,
- couch_log
- ]).
-
-
-teardown(_) ->
- ok.
-
-
-t_handle_call_change() ->
- ?_test(begin
- State = mock_state(),
- Change = change_row(<<"blah">>),
- handle_call_ok({change, ?DBNAME, Change}, State),
- ?assert(meck:validate(?MOD)),
- ?assert(meck:called(?MOD, db_change, [?DBNAME, Change, zig]))
- end).
-
-
-t_handle_call_change_filter_design_docs() ->
- ?_test(begin
- State0 = mock_state(),
- State = State0#state{skip_ddocs = true},
- Change = change_row(<<"_design/blah">>),
- handle_call_ok({change, ?DBNAME, Change}, State),
- ?assert(meck:validate(?MOD)),
- ?assertNot(meck:called(?MOD, db_change, [?DBNAME, Change, zig]))
- end).
-
-
-t_handle_call_checkpoint_new() ->
- ?_test(begin
- Tid = mock_ets(),
- State = mock_state(Tid),
- handle_call_ok({checkpoint, ?DBNAME, 1}, State),
- ?assertEqual([{?DBNAME, 1, false}], ets:tab2list(Tid)),
- ets:delete(Tid)
- end).
-
-
-t_handle_call_checkpoint_existing() ->
- ?_test(begin
- Tid = mock_ets(),
- State = mock_state(Tid),
- true = ets:insert(Tid, {?DBNAME, 1, true}),
- handle_call_ok({checkpoint, ?DBNAME, 2}, State),
- ?assertEqual([{?DBNAME, 2, true}], ets:tab2list(Tid)),
- ets:delete(Tid)
- end).
-
-
-t_handle_info_created() ->
- ?_test(begin
- Tid = mock_ets(),
- State = mock_state(Tid),
- handle_info_check({'$couch_event', ?DBNAME, created}, State),
- ?assert(meck:validate(?MOD)),
- ?assert(meck:called(?MOD, db_created, [?DBNAME, zig]))
- end).
-
-
-t_handle_info_deleted() ->
- ?_test(begin
- State = mock_state(),
- handle_info_check({'$couch_event', ?DBNAME, deleted}, State),
- ?assert(meck:validate(?MOD)),
- ?assert(meck:called(?MOD, db_deleted, [?DBNAME, zig]))
- end).
-
-
-t_handle_info_updated() ->
- ?_test(begin
- Tid = mock_ets(),
- State = mock_state(Tid),
- handle_info_check({'$couch_event', ?DBNAME, updated}, State),
- ?assert(meck:validate(?MOD)),
- ?assert(meck:called(?MOD, db_found, [?DBNAME, zig]))
- end).
-
-
-t_handle_info_other_event() ->
- ?_test(begin
- State = mock_state(),
- handle_info_check({'$couch_event', ?DBNAME, somethingelse}, State),
- ?assertNot(meck:called(?MOD, db_created, [?DBNAME, somethingelse])),
- ?assertNot(meck:called(?MOD, db_deleted, [?DBNAME, somethingelse])),
- ?assertNot(meck:called(?MOD, db_found, [?DBNAME, somethingelse]))
- end).
-
-
-t_handle_info_created_other_db() ->
- ?_test(begin
- State = mock_state(),
- handle_info_check({'$couch_event', <<"otherdb">>, created}, State),
- ?assertNot(meck:called(?MOD, db_created, [?DBNAME, zig]))
- end).
-
-
-t_handle_info_scanner_exit_normal() ->
- ?_test(begin
- Res = handle_info({'EXIT', spid, normal}, mock_state()),
- ?assertMatch({noreply, _}, Res),
- {noreply, RState} = Res,
- ?assertEqual(nil, RState#state.scanner)
- end).
-
-
-t_handle_info_scanner_crashed() ->
- ?_test(begin
- Res = handle_info({'EXIT', spid, oops}, mock_state()),
- ?assertMatch({stop, {scanner_died, oops}, _State}, Res)
- end).
-
-
-t_handle_info_event_server_exited() ->
- ?_test(begin
- Res = handle_info({'DOWN', esref, type, espid, reason}, mock_state()),
- ?assertMatch({stop, {couch_event_server_died, reason}, _}, Res)
- end).
-
-
-t_handle_info_unknown_pid_exited() ->
- ?_test(begin
- State0 = mock_state(),
- Res0 = handle_info({'EXIT', somepid, normal}, State0),
- ?assertMatch({noreply, State0}, Res0),
- State1 = mock_state(),
- Res1 = handle_info({'EXIT', somepid, oops}, State1),
- ?assertMatch({stop, {unexpected_exit, somepid, oops}, State1}, Res1)
- end).
-
-
-t_handle_info_change_feed_exited() ->
- ?_test(begin
- Tid0 = mock_ets(),
- State0 = mock_state(Tid0, cpid),
- Res0 = handle_info({'EXIT', cpid, normal}, State0),
- ?assertMatch({noreply, _}, Res0),
- {noreply, RState0} = Res0,
- ?assertEqual([], RState0#state.pids),
- ets:delete(Tid0),
- Tid1 = mock_ets(),
- State1 = mock_state(Tid1, cpid),
- Res1 = handle_info({'EXIT', cpid, oops}, State1),
- ?assertMatch({noreply, _}, Res1),
- {noreply, RState1} = Res1,
- ?assertEqual([], RState1#state.pids),
- ets:delete(Tid1)
- end).
-
-
-t_handle_info_change_feed_exited_and_need_rescan() ->
- ?_test(begin
- Tid = mock_ets(),
- true = ets:insert(Tid, {?DBNAME, 1, true}),
- State = mock_state(Tid, cpid),
- Res = handle_info({'EXIT', cpid, normal}, State),
- ?assertMatch({noreply, _}, Res),
- {noreply, RState} = Res,
- % rescan flag should have been reset to false
- ?assertEqual([{?DBNAME, 1, false}], ets:tab2list(Tid)),
- % a mock change feed process should be running
- [{?DBNAME, Pid}] = RState#state.pids,
- ?assert(is_pid(Pid)),
- ChArgs = kill_mock_changes_reader_and_get_its_args(Pid),
- ?assertEqual({self(), ?DBNAME}, ChArgs),
- ets:delete(Tid)
- end).
-
-
-t_spawn_changes_reader() ->
- ?_test(begin
- Pid = start_changes_reader(?DBNAME, 3),
- ?assert(erlang:is_process_alive(Pid)),
- ChArgs = kill_mock_changes_reader_and_get_its_args(Pid),
- ?assertEqual({self(), ?DBNAME}, ChArgs),
- ?assert(meck:validate(couch_db)),
- ?assert(meck:validate(couch_changes)),
- ?assert(meck:called(couch_db, open_int, [?DBNAME, [?CTX, sys_db]])),
- ?assert(meck:called(couch_changes, handle_db_changes, [
- #changes_args{
- include_docs = true,
- since = 3,
- feed = "normal",
- timeout = infinity
- }, {json_req, null}, db]))
- end).
-
-
-t_changes_reader_cb_change() ->
- ?_test(begin
- {ok, Pid} = start_link(?SUFFIX, ?MOD, zig, []),
- Change = change_row(<<"blah">>),
- ChArg = {change, Change, ignore},
- {Pid, ?DBNAME} = changes_reader_cb(ChArg, chtype, {Pid, ?DBNAME}),
- ?assert(meck:called(?MOD, db_change, [?DBNAME, Change, zig])),
- unlink(Pid),
- exit(Pid, kill)
- end).
-
-
-t_changes_reader_cb_stop() ->
- ?_test(begin
- {ok, Pid} = start_link(?SUFFIX, ?MOD, zig, []),
- ChArg = {stop, 11},
- {Pid, ?DBNAME} = changes_reader_cb(ChArg, chtype, {Pid, ?DBNAME}),
- % We checkpoint on stop, check if checkpointed at correct sequence
- #state{tid = Tid} = sys:get_state(Pid),
- ?assertEqual([{?DBNAME, 11, false}], ets:tab2list(Tid)),
- unlink(Pid),
- exit(Pid, kill)
- end).
-
-
-t_changes_reader_cb_other() ->
- ?_assertEqual(acc, changes_reader_cb(other, chtype, acc)).
-
-
-t_handle_call_resume_scan_no_chfeed_no_ets_entry() ->
- ?_test(begin
- Tid = mock_ets(),
- State = mock_state(Tid),
- RState = resume_scan(?DBNAME, State),
- % Check if inserted checkpoint entry in ets starting at 0
- ?assertEqual([{?DBNAME, 0, false}], ets:tab2list(Tid)),
- % Check if called db_found callback
- ?assert(meck:called(?MOD, db_found, [?DBNAME, zig])),
- % Check if started a change reader
- [{?DBNAME, Pid}] = RState#state.pids,
- ChArgs = kill_mock_changes_reader_and_get_its_args(Pid),
- ?assertEqual({self(), ?DBNAME}, ChArgs),
- ?assert(meck:called(couch_changes, handle_db_changes, [
- #changes_args{
- include_docs = true,
- since = 0,
- feed = "normal",
- timeout = infinity
- }, {json_req, null}, db])),
- ets:delete(Tid)
- end).
-
-
-t_handle_call_resume_scan_chfeed_no_ets_entry() ->
- ?_test(begin
- Tid = mock_ets(),
- Pid = start_changes_reader(?DBNAME, 0),
- State = mock_state(Tid, Pid),
- resume_scan(?DBNAME, State),
- % Check ets checkpoint is set to 0 and rescan = true
- ?assertEqual([{?DBNAME, 0, true}], ets:tab2list(Tid)),
- ets:delete(Tid),
- kill_mock_changes_reader_and_get_its_args(Pid)
- end).
-
-
-t_handle_call_resume_scan_chfeed_ets_entry() ->
- ?_test(begin
- Tid = mock_ets(),
- true = ets:insert(Tid, [{?DBNAME, 2, false}]),
- Pid = start_changes_reader(?DBNAME, 1),
- State = mock_state(Tid, Pid),
- resume_scan(?DBNAME, State),
- % Check ets checkpoint is set to same endseq but rescan = true
- ?assertEqual([{?DBNAME, 2, true}], ets:tab2list(Tid)),
- ets:delete(Tid),
- kill_mock_changes_reader_and_get_its_args(Pid)
- end).
-
-
-t_handle_call_resume_scan_no_chfeed_ets_entry() ->
- ?_test(begin
- Tid = mock_ets(),
- true = ets:insert(Tid, [{?DBNAME, 1, true}]),
- State = mock_state(Tid),
- RState = resume_scan(?DBNAME, State),
- % Check if reset rescan to false but kept same endseq
- ?assertEqual([{?DBNAME, 1, false}], ets:tab2list(Tid)),
- % Check if started a change reader
- [{?DBNAME, Pid}] = RState#state.pids,
- ChArgs = kill_mock_changes_reader_and_get_its_args(Pid),
- ?assertEqual({self(), ?DBNAME}, ChArgs),
- ?assert(meck:called(couch_changes, handle_db_changes, [
- #changes_args{
- include_docs = true,
- since = 1,
- feed = "normal",
- timeout = infinity
- }, {json_req, null}, db])),
- ets:delete(Tid)
- end).
-
-
-t_start_link() ->
- ?_test(begin
- {ok, Pid} = start_link(?SUFFIX, ?MOD, nil, []),
- ?assert(is_pid(Pid)),
- ?assertMatch(#state{
- mod = ?MOD,
- suffix = ?SUFFIX,
- ctx = nil,
- pids = [],
- skip_ddocs = false
- }, sys:get_state(Pid)),
- unlink(Pid),
- exit(Pid, kill),
- ?assert(meck:called(couch_event, register_all, [Pid]))
- end).
-
-
-t_start_link_no_ddocs() ->
- ?_test(begin
- {ok, Pid} = start_link(?SUFFIX, ?MOD, nil, [skip_ddocs]),
- ?assert(is_pid(Pid)),
- ?assertMatch(#state{
- mod = ?MOD,
- suffix = ?SUFFIX,
- ctx = nil,
- pids = [],
- skip_ddocs = true
- }, sys:get_state(Pid)),
- unlink(Pid),
- exit(Pid, kill)
- end).
-
-
-t_misc_gen_server_callbacks() ->
- ?_test(begin
- ?assertEqual(ok, terminate(reason, state)),
- ?assertEqual({ok, state}, code_change(old, state, extra))
- end).
-
-
-scan_dbs_test_() ->
-{
- setup,
- fun() ->
- Ctx = test_util:start_couch([mem3, fabric]),
- GlobalDb = ?tempdb(),
- ok = fabric:create_db(GlobalDb, [?CTX]),
- #shard{name = LocalDb} = hd(mem3:local_shards(GlobalDb)),
- {Ctx, GlobalDb, LocalDb}
- end,
- fun({Ctx, GlobalDb, _LocalDb}) ->
- fabric:delete_db(GlobalDb, [?CTX]),
- test_util:stop_couch(Ctx)
- end,
- {with, [
- fun t_find_shard/1,
- fun t_shard_not_found/1,
- fun t_pass_local/1,
- fun t_fail_local/1
- ]}
-}.
-
-
-t_find_shard({_, DbName, _}) ->
- ?_test(begin
- ?assertEqual(2, length(local_shards(DbName)))
- end).
-
-
-t_shard_not_found(_) ->
- ?_test(begin
- ?assertEqual([], local_shards(?tempdb()))
- end).
-
-
-t_pass_local({_, _, LocalDb}) ->
- ?_test(begin
- scan_local_db(self(), LocalDb),
- receive
- {'$gen_cast', Msg} ->
- ?assertEqual(Msg, {resume_scan, LocalDb})
- after 0 ->
- ?assert(false)
- end
- end).
-
-
-t_fail_local({_, _, LocalDb}) ->
- ?_test(begin
- scan_local_db(self(), <<"some_other_db">>),
- receive
- {'$gen_cast', Msg} ->
- ?assertNotEqual(Msg, {resume_scan, LocalDb})
- after 0 ->
- ?assert(true)
- end
- end).
-
-
-% Test helper functions
-
-mock_logs() ->
- meck:expect(couch_log, error, 2, ok),
- meck:expect(couch_log, notice, 2, ok),
- meck:expect(couch_log, info, 2, ok),
- meck:expect(couch_log, debug, 2, ok).
-
-
-mock_callback_mod() ->
- meck:new(?MOD, [non_strict]),
- meck:expect(?MOD, db_created, fun(_DbName, Ctx) -> Ctx end),
- meck:expect(?MOD, db_deleted, fun(_DbName, Ctx) -> Ctx end),
- meck:expect(?MOD, db_found, fun(_DbName, Ctx) -> Ctx end),
- meck:expect(?MOD, db_change, fun(_DbName, _Change, Ctx) -> Ctx end).
-
-
-mock_changes_reader_loop({_CbFun, {Server, DbName}}) ->
- receive
- die ->
- exit({Server, DbName})
- end.
-
-kill_mock_changes_reader_and_get_its_args(Pid) ->
- Ref = monitor(process, Pid),
- unlink(Pid),
- Pid ! die,
- receive
- {'DOWN', Ref, _, Pid, {Server, DbName}} ->
- {Server, DbName}
- after 1000 ->
- erlang:error(spawn_change_reader_timeout)
- end.
-
-
-mock_changes_reader() ->
- meck:expect(couch_changes, handle_db_changes,
- fun
- (_ChArgs, _Req, db) -> fun mock_changes_reader_loop/1;
- (_ChArgs, _Req, dbs) -> fun(_) -> ok end
- end).
-
-
-mock_ets() ->
- ets:new(multidb_test_ets, [set, public]).
-
-
-mock_state() ->
- #state{
- mod = ?MOD,
- ctx = zig,
- suffix = ?SUFFIX,
- event_server = esref,
- scanner = spid,
- pids = []}.
-
-
-mock_state(Ets) ->
- State = mock_state(),
- State#state{tid = Ets}.
-
-
-mock_state(Ets, Pid) ->
- State = mock_state(Ets),
- State#state{pids = [{?DBNAME, Pid}]}.
-
-
-change_row(Id) when is_binary(Id) ->
- {[
- {<<"seq">>, 1},
- {<<"id">>, Id},
- {<<"changes">>, [{[{<<"rev">>, <<"1-f00">>}]}]},
- {doc, {[{<<"_id">>, Id}, {<<"_rev">>, <<"1-f00">>}]}}
- ]}.
-
-
-handle_call_ok(Msg, State) ->
- ?assertMatch({reply, ok, _}, handle_call(Msg, from, State)).
-
-
-handle_info_check(Msg, State) ->
- ?assertMatch({noreply, _}, handle_info(Msg, State)).
-
-
--endif.
diff --git a/src/couch/src/couch_os_process.erl b/src/couch/src/couch_os_process.erl
index 63a241433..e17782ea3 100644
--- a/src/couch/src/couch_os_process.erl
+++ b/src/couch/src/couch_os_process.erl
@@ -51,7 +51,7 @@ send(Pid, Data) ->
gen_server:cast(Pid, {send, Data}).
prompt(Pid, Data) ->
- case ioq:call(Pid, {prompt, Data}, erlang:get(io_priority)) of
+ case gen_server:call(Pid, {prompt, Data}, infinity) of
{ok, Result} ->
Result;
Error ->
diff --git a/src/couch/src/couch_partition.erl b/src/couch/src/couch_partition.erl
index f2efcaa5e..cb78323c3 100644
--- a/src/couch/src/couch_partition.erl
+++ b/src/couch/src/couch_partition.erl
@@ -122,7 +122,7 @@ validate_docid(DocId) when is_binary(DocId) ->
throw({illegal_docid, <<"Doc id must be of form partition:id">>});
{Partition, PartitionedDocId} ->
validate_partition(Partition),
- couch_doc:validate_docid(PartitionedDocId)
+ fabric2_db:validate_docid(PartitionedDocId)
end.
diff --git a/src/couch/src/couch_primary_sup.erl b/src/couch/src/couch_primary_sup.erl
index dc2d9e51a..b6d370fbb 100644
--- a/src/couch/src/couch_primary_sup.erl
+++ b/src/couch/src/couch_primary_sup.erl
@@ -25,12 +25,6 @@ init([]) ->
infinity,
supervisor,
[couch_drv]},
- {couch_task_status,
- {couch_task_status, start_link, []},
- permanent,
- brutal_kill,
- worker,
- [couch_task_status]},
{couch_server,
{couch_server, sup_start_link, []},
permanent,
diff --git a/src/couch/src/couch_proc_manager.erl b/src/couch/src/couch_proc_manager.erl
index b83d78882..4627a9fd9 100644
--- a/src/couch/src/couch_proc_manager.erl
+++ b/src/couch/src/couch_proc_manager.erl
@@ -41,6 +41,7 @@
]).
-include_lib("couch/include/couch_db.hrl").
+-include_lib("kernel/include/logger.hrl").
-define(PROCS, couch_proc_manager_procs).
-define(WAITERS, couch_proc_manager_waiters).
@@ -321,8 +322,7 @@ find_proc(#client{lang = Lang, ddoc = DDoc, ddoc_key = DDocKey} = Client) ->
find_proc(Lang, Fun) ->
try iter_procs(Lang, Fun)
- catch error:Reason ->
- StackTrace = erlang:get_stacktrace(),
+ catch error:Reason:StackTrace ->
couch_log:error("~p ~p ~p", [?MODULE, Reason, StackTrace]),
{error, Reason}
end.
diff --git a/src/couch/src/couch_query_servers.erl b/src/couch/src/couch_query_servers.erl
index 8eb07abbf..d2523826b 100644
--- a/src/couch/src/couch_query_servers.erl
+++ b/src/couch/src/couch_query_servers.erl
@@ -18,7 +18,6 @@
-export([filter_docs/5]).
-export([filter_view/3]).
-export([finalize/2]).
--export([rewrite/3]).
-export([with_ddoc_proc/2, proc_prompt/2, ddoc_prompt/3, ddoc_proc_prompt/3, json_doc/1]).
@@ -392,84 +391,6 @@ validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
end.
-rewrite(Req, Db, DDoc) ->
- Fields = [F || F <- chttpd_external:json_req_obj_fields(),
- F =/= <<"info">>, F =/= <<"form">>,
- F =/= <<"uuid">>, F =/= <<"id">>],
- JsonReq = chttpd_external:json_req_obj(Req, Db, null, Fields),
- case couch_query_servers:ddoc_prompt(DDoc, [<<"rewrites">>], [JsonReq]) of
- {[{<<"forbidden">>, Message}]} ->
- throw({forbidden, Message});
- {[{<<"unauthorized">>, Message}]} ->
- throw({unauthorized, Message});
- [<<"no_dispatch_rule">>] ->
- undefined;
- [<<"ok">>, {V}=Rewrite] when is_list(V) ->
- ok = validate_rewrite_response(Rewrite),
- Rewrite;
- [<<"ok">>, _] ->
- throw_rewrite_error(<<"bad rewrite">>);
- V ->
- couch_log:error("bad rewrite return ~p", [V]),
- throw({unknown_error, V})
- end.
-
-validate_rewrite_response({Fields}) when is_list(Fields) ->
- validate_rewrite_response_fields(Fields).
-
-validate_rewrite_response_fields([{Key, Value} | Rest]) ->
- validate_rewrite_response_field(Key, Value),
- validate_rewrite_response_fields(Rest);
-validate_rewrite_response_fields([]) ->
- ok.
-
-validate_rewrite_response_field(<<"method">>, Method) when is_binary(Method) ->
- ok;
-validate_rewrite_response_field(<<"method">>, _) ->
- throw_rewrite_error(<<"bad method">>);
-validate_rewrite_response_field(<<"path">>, Path) when is_binary(Path) ->
- ok;
-validate_rewrite_response_field(<<"path">>, _) ->
- throw_rewrite_error(<<"bad path">>);
-validate_rewrite_response_field(<<"body">>, Body) when is_binary(Body) ->
- ok;
-validate_rewrite_response_field(<<"body">>, _) ->
- throw_rewrite_error(<<"bad body">>);
-validate_rewrite_response_field(<<"headers">>, {Props}=Headers) when is_list(Props) ->
- validate_object_fields(Headers);
-validate_rewrite_response_field(<<"headers">>, _) ->
- throw_rewrite_error(<<"bad headers">>);
-validate_rewrite_response_field(<<"query">>, {Props}=Query) when is_list(Props) ->
- validate_object_fields(Query);
-validate_rewrite_response_field(<<"query">>, _) ->
- throw_rewrite_error(<<"bad query">>);
-validate_rewrite_response_field(<<"code">>, Code) when is_integer(Code) andalso Code >= 200 andalso Code < 600 ->
- ok;
-validate_rewrite_response_field(<<"code">>, _) ->
- throw_rewrite_error(<<"bad code">>);
-validate_rewrite_response_field(K, V) ->
- couch_log:debug("unknown rewrite field ~p=~p", [K, V]),
- ok.
-
-validate_object_fields({Props}) when is_list(Props) ->
- lists:foreach(fun
- ({Key, Value}) when is_binary(Key) andalso is_binary(Value) ->
- ok;
- ({Key, Value}) ->
- Reason = io_lib:format(
- "object key/value must be strings ~p=~p", [Key, Value]),
- throw_rewrite_error(Reason);
- (Value) ->
- throw_rewrite_error(io_lib:format("bad value ~p", [Value]))
- end, Props).
-
-
-throw_rewrite_error(Reason) when is_list(Reason)->
- throw_rewrite_error(iolist_to_binary(Reason));
-throw_rewrite_error(Reason) when is_binary(Reason) ->
- throw({rewrite_error, Reason}).
-
-
json_doc_options() ->
json_doc_options([]).
@@ -500,9 +421,21 @@ filter_docs(Req, Db, DDoc, FName, Docs) ->
end,
Options = json_doc_options(),
JsonDocs = [json_doc(Doc, Options) || Doc <- Docs],
+ try
+ {ok, filter_docs_int(DDoc, FName, JsonReq, JsonDocs)}
+ catch
+ throw:{os_process_error,{exit_status,1}} ->
+ %% batch used too much memory, retry sequentially.
+ Fun = fun(JsonDoc) ->
+ filter_docs_int(DDoc, FName, JsonReq, [JsonDoc])
+ end,
+ {ok, lists:flatmap(Fun, JsonDocs)}
+ end.
+
+filter_docs_int(DDoc, FName, JsonReq, JsonDocs) ->
[true, Passes] = ddoc_prompt(DDoc, [<<"filters">>, FName],
[JsonDocs, JsonReq]),
- {ok, Passes}.
+ Passes.
ddoc_proc_prompt({Proc, DDocId}, FunPath, Args) ->
proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args]).
diff --git a/src/couch/src/couch_secondary_sup.erl b/src/couch/src/couch_secondary_sup.erl
index bb7821555..293e1b52a 100644
--- a/src/couch/src/couch_secondary_sup.erl
+++ b/src/couch/src/couch_secondary_sup.erl
@@ -27,17 +27,11 @@ init([]) ->
dynamic}
],
Daemons = [
- {index_server, {couch_index_server, start_link, []}},
{query_servers, {couch_proc_manager, start_link, []}},
{vhosts, {couch_httpd_vhost, start_link, []}},
{uuids, {couch_uuids, start, []}}
],
- MaybeHttp = case http_enabled() of
- true -> [{httpd, {couch_httpd, start_link, []}}];
- false -> couch_httpd:set_auth_handlers(), []
- end,
-
MaybeHttps = case https_enabled() of
true -> [{httpsd, {chttpd, start_link, [https]}}];
false -> []
@@ -55,13 +49,10 @@ init([]) ->
[Module]}
end
|| {Name, Spec}
- <- Daemons ++ MaybeHttp ++ MaybeHttps, Spec /= ""],
+ <- Daemons ++ MaybeHttps, Spec /= ""],
{ok, {{one_for_one, 50, 3600},
couch_epi:register_service(couch_db_epi, Children)}}.
-http_enabled() ->
- config:get_boolean("httpd", "enable", false).
-
https_enabled() ->
% 1. [ssl] enable = true | false
% 2. if [daemons] httpsd == {chttpd, start_link, [https]} -> pretend true as well
diff --git a/src/couch/src/couch_server.erl b/src/couch/src/couch_server.erl
index 42eab738c..8fd074a78 100644
--- a/src/couch/src/couch_server.erl
+++ b/src/couch/src/couch_server.erl
@@ -15,43 +15,18 @@
-behaviour(config_listener).
-vsn(3).
--export([open/2,create/2,delete/2,get_version/0,get_version/1,get_git_sha/0,get_uuid/0]).
--export([all_databases/0, all_databases/2]).
+-export([get_version/0,get_version/1,get_git_sha/0,get_uuid/0]).
-export([init/1, handle_call/3,sup_start_link/0]).
--export([handle_cast/2,code_change/3,handle_info/2,terminate/2,format_status/2]).
--export([dev_start/0,is_admin/2,has_admins/0,get_stats/0]).
--export([close_lru/0]).
--export([close_db_if_idle/1]).
--export([delete_compaction_files/1]).
--export([exists/1]).
--export([get_engine_extensions/0]).
--export([get_engine_path/2]).
--export([lock/2, unlock/1]).
+-export([handle_cast/2,code_change/3,handle_info/2,terminate/2]).
+-export([is_admin/2,has_admins/0]).
% config_listener api
-export([handle_config_change/5, handle_config_terminate/3]).
-include_lib("couch/include/couch_db.hrl").
--include("couch_server_int.hrl").
--define(MAX_DBS_OPEN, 500).
-define(RELISTEN_DELAY, 5000).
--record(server,{
- root_dir = [],
- engines = [],
- max_dbs_open=?MAX_DBS_OPEN,
- dbs_open=0,
- start_time="",
- update_lru_on_read=true,
- lru = couch_lru:new()
- }).
-
-dev_start() ->
- couch:stop(),
- up_to_date = make:all([load, debug_info]),
- couch:start().
-
get_version() ->
?COUCHDB_VERSION. %% Defined in rebar.config.script
get_version(short) ->
@@ -70,138 +45,9 @@ get_uuid() ->
UUID -> ?l2b(UUID)
end.
-get_stats() ->
- {ok, #server{start_time=Time,dbs_open=Open}} =
- gen_server:call(couch_server, get_server),
- [{start_time, ?l2b(Time)}, {dbs_open, Open}].
-
sup_start_link() ->
gen_server:start_link({local, couch_server}, couch_server, [], []).
-open(DbName, Options) ->
- try
- validate_open_or_create(DbName, Options),
- open_int(DbName, Options)
- catch throw:{?MODULE, Error} ->
- Error
- end.
-
-open_int(DbName, Options0) ->
- Ctx = couch_util:get_value(user_ctx, Options0, #user_ctx{}),
- case ets:lookup(couch_dbs, DbName) of
- [#entry{db = Db0, lock = Lock} = Entry] when Lock =/= locked ->
- update_lru(DbName, Entry#entry.db_options),
- {ok, Db1} = couch_db:incref(Db0),
- couch_db:set_user_ctx(Db1, Ctx);
- _ ->
- Options = maybe_add_sys_db_callbacks(DbName, Options0),
- Timeout = couch_util:get_value(timeout, Options, infinity),
- Create = couch_util:get_value(create_if_missing, Options, false),
- case gen_server:call(couch_server, {open, DbName, Options}, Timeout) of
- {ok, Db0} ->
- {ok, Db1} = couch_db:incref(Db0),
- couch_db:set_user_ctx(Db1, Ctx);
- {not_found, no_db_file} when Create ->
- couch_log:warning("creating missing database: ~s", [DbName]),
- couch_server:create(DbName, Options);
- Error ->
- Error
- end
- end.
-
-update_lru(DbName, Options) ->
- case config:get_boolean("couchdb", "update_lru_on_read", false) of
- true ->
- case lists:member(sys_db, Options) of
- false -> gen_server:cast(couch_server, {update_lru, DbName});
- true -> ok
- end;
- false ->
- ok
- end.
-
-close_lru() ->
- gen_server:call(couch_server, close_lru).
-
-create(DbName, Options) ->
- try
- validate_open_or_create(DbName, Options),
- create_int(DbName, Options)
- catch throw:{?MODULE, Error} ->
- Error
- end.
-
-create_int(DbName, Options0) ->
- Options = maybe_add_sys_db_callbacks(DbName, Options0),
- couch_partition:validate_dbname(DbName, Options),
- case gen_server:call(couch_server, {create, DbName, Options}, infinity) of
- {ok, Db0} ->
- Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
- {ok, Db1} = couch_db:incref(Db0),
- couch_db:set_user_ctx(Db1, Ctx);
- Error ->
- Error
- end.
-
-delete(DbName, Options) ->
- gen_server:call(couch_server, {delete, DbName, Options}, infinity).
-
-
-exists(DbName) ->
- RootDir = config:get("couchdb", "database_dir", "."),
- Engines = get_configured_engines(),
- Possible = get_possible_engines(DbName, RootDir, Engines),
- Possible /= [].
-
-
-delete_compaction_files(DbName) ->
- delete_compaction_files(DbName, []).
-
-delete_compaction_files(DbName, DelOpts) when is_list(DbName) ->
- RootDir = config:get("couchdb", "database_dir", "."),
- lists:foreach(fun({Ext, Engine}) ->
- FPath = make_filepath(RootDir, DbName, Ext),
- couch_db_engine:delete_compaction_files(Engine, RootDir, FPath, DelOpts)
- end, get_configured_engines()),
- ok;
-delete_compaction_files(DbName, DelOpts) when is_binary(DbName) ->
- delete_compaction_files(?b2l(DbName), DelOpts).
-
-maybe_add_sys_db_callbacks(DbName, Options) when is_binary(DbName) ->
- maybe_add_sys_db_callbacks(?b2l(DbName), Options);
-maybe_add_sys_db_callbacks(DbName, Options) ->
- DbsDbName = config:get("mem3", "shards_db", "_dbs"),
- NodesDbName = config:get("mem3", "nodes_db", "_nodes"),
-
- IsReplicatorDb = path_ends_with(DbName, "_replicator"),
- UsersDbSuffix = config:get("couchdb", "users_db_suffix", "_users"),
- IsUsersDb = path_ends_with(DbName, "_users")
- orelse path_ends_with(DbName, UsersDbSuffix),
- if
- DbName == DbsDbName ->
- [sys_db | Options];
- DbName == NodesDbName ->
- [sys_db | Options];
- IsReplicatorDb ->
- [{before_doc_update, fun couch_replicator_docs:before_doc_update/3},
- {after_doc_read, fun couch_replicator_docs:after_doc_read/2},
- sys_db | Options];
- IsUsersDb ->
- [{before_doc_update, fun couch_users_db:before_doc_update/3},
- {after_doc_read, fun couch_users_db:after_doc_read/2},
- sys_db | Options];
- true ->
- Options
- end.
-
-path_ends_with(Path, Suffix) when is_binary(Suffix) ->
- Suffix =:= couch_db:dbname_suffix(Path);
-path_ends_with(Path, Suffix) when is_list(Suffix) ->
- path_ends_with(Path, ?l2b(Suffix)).
-
-check_dbname(DbName) ->
- couch_db:validate_dbname(DbName).
-
is_admin(User, ClearPwd) ->
case config:get("admins", User) of
"-hashed-" ++ HashedPwdAndSalt ->
@@ -224,22 +70,9 @@ hash_admin_passwords(Persist) ->
config:set("admins", User, ?b2l(HashedPassword), Persist)
end, couch_passwords:get_unhashed_admins()).
-close_db_if_idle(DbName) ->
- case ets:lookup(couch_dbs, DbName) of
- [#entry{}] ->
- gen_server:cast(couch_server, {close_db_if_idle, DbName});
- [] ->
- ok
- end.
-
-
init([]) ->
- couch_util:set_mqd_off_heap(?MODULE),
- couch_util:set_process_priority(?MODULE, high),
-
% Mark being able to receive documents with an _access property as a supported feature
config:enable_feature('access-ready'),
-
% Mark if fips is enabled
case
erlang:function_exported(crypto, info_fips, 0) andalso
@@ -249,83 +82,28 @@ init([]) ->
false ->
ok
end,
+ ok = config:listen_for_changes(?MODULE, nil),
+ hash_admin_passwords(),
+ {ok, nil}.
- % read config and register for configuration changes
+handle_call(Msg, _From, Srv) ->
+ {stop, {bad_call, Msg}, Srv}.
- % just stop if one of the config settings change. couch_server_sup
- % will restart us and then we will pick up the new settings.
+handle_cast(Msg, Srv) ->
+ {stop, {bad_cast, Msg}, Srv}.
- RootDir = config:get("couchdb", "database_dir", "."),
- Engines = get_configured_engines(),
- MaxDbsOpen = list_to_integer(
- config:get("couchdb", "max_dbs_open", integer_to_list(?MAX_DBS_OPEN))),
- UpdateLruOnRead =
- config:get("couchdb", "update_lru_on_read", "false") =:= "true",
- ok = config:listen_for_changes(?MODULE, nil),
- ok = couch_file:init_delete_dir(RootDir),
- hash_admin_passwords(),
- ets:new(couch_dbs, [
- set,
- protected,
- named_table,
- {keypos, #entry.name},
- {read_concurrency, true}
- ]),
- ets:new(couch_dbs_pid_to_name, [set, protected, named_table]),
- ets:new(couch_dbs_locks, [
- set,
- public,
- named_table,
- {read_concurrency, true}
- ]),
- process_flag(trap_exit, true),
- {ok, #server{root_dir=RootDir,
- engines = Engines,
- max_dbs_open=MaxDbsOpen,
- update_lru_on_read=UpdateLruOnRead,
- start_time=couch_util:rfc1123_date()}}.
+handle_info(Msg, Srv) ->
+ {stop, {unknown_message, Msg}, Srv}.
-terminate(Reason, Srv) ->
- couch_log:error("couch_server terminating with ~p, state ~2048p",
- [Reason,
- Srv#server{lru = redacted}]),
- ets:foldl(fun(#entry{db = Db}, _) ->
- % Filter out any entry records for open_async
- % processes that haven't finished.
- if Db == undefined -> ok; true ->
- couch_util:shutdown_sync(couch_db:get_pid(Db))
- end
- end, nil, couch_dbs),
- ok.
+code_change(_OldVsn, Srv, _Extra) ->
+ {ok, Srv}.
-format_status(_Opt, [_PDict, Srv]) ->
- Scrubbed = Srv#server{lru=couch_lru:sizes(Srv#server.lru)},
- [{data, [{"State", ?record_to_keyval(server, Scrubbed)}]}].
+terminate(_Reason, _Srv) ->
+ ok.
-handle_config_change("couchdb", "database_dir", _, _, _) ->
- exit(whereis(couch_server), config_change),
- remove_handler;
-handle_config_change("couchdb", "update_lru_on_read", "true", _, _) ->
- {ok, gen_server:call(couch_server,{set_update_lru_on_read,true})};
-handle_config_change("couchdb", "update_lru_on_read", _, _, _) ->
- {ok, gen_server:call(couch_server,{set_update_lru_on_read,false})};
-handle_config_change("couchdb", "max_dbs_open", Max, _, _) when is_list(Max) ->
- {ok, gen_server:call(couch_server,{set_max_dbs_open,list_to_integer(Max)})};
-handle_config_change("couchdb", "max_dbs_open", _, _, _) ->
- {ok, gen_server:call(couch_server,{set_max_dbs_open,?MAX_DBS_OPEN})};
-handle_config_change("couchdb_engines", _, _, _, _) ->
- {ok, gen_server:call(couch_server, reload_engines)};
handle_config_change("admins", _, _, Persist, _) ->
% spawn here so couch event manager doesn't deadlock
{ok, spawn(fun() -> hash_admin_passwords(Persist) end)};
-handle_config_change("httpd", "authentication_handlers", _, _, _) ->
- {ok, couch_httpd:stop()};
-handle_config_change("httpd", "bind_address", _, _, _) ->
- {ok, couch_httpd:stop()};
-handle_config_change("httpd", "port", _, _, _) ->
- {ok, couch_httpd:stop()};
-handle_config_change("httpd", "max_connections", _, _, _) ->
- {ok, couch_httpd:stop()};
handle_config_change(_, _, _, _, _) ->
{ok, nil}.
@@ -333,621 +111,3 @@ handle_config_terminate(_, stop, _) ->
ok;
handle_config_terminate(_Server, _Reason, _State) ->
erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
-
-
-all_databases() ->
- {ok, DbList} = all_databases(
- fun(DbName, Acc) -> {ok, [DbName | Acc]} end, []),
- {ok, lists:usort(DbList)}.
-
-all_databases(Fun, Acc0) ->
- {ok, #server{root_dir=Root}} = gen_server:call(couch_server, get_server),
- NormRoot = couch_util:normpath(Root),
- Extensions = get_engine_extensions(),
- ExtRegExp = "(" ++ string:join(Extensions, "|") ++ ")",
- RegExp =
- "^[a-z0-9\\_\\$()\\+\\-]*" % stock CouchDB name regex
- "(\\.[0-9]{10,})?" % optional shard timestamp
- "\\." ++ ExtRegExp ++ "$", % filename extension
- FinalAcc = try
- couch_util:fold_files(Root,
- RegExp,
- true,
- fun(Filename, AccIn) ->
- NormFilename = couch_util:normpath(Filename),
- case NormFilename -- NormRoot of
- [$/ | RelativeFilename] -> ok;
- RelativeFilename -> ok
- end,
- Ext = filename:extension(RelativeFilename),
- case Fun(?l2b(filename:rootname(RelativeFilename, Ext)), AccIn) of
- {ok, NewAcc} -> NewAcc;
- {stop, NewAcc} -> throw({stop, Fun, NewAcc})
- end
- end, Acc0)
- catch throw:{stop, Fun, Acc1} ->
- Acc1
- end,
- {ok, FinalAcc}.
-
-
-make_room(Server, Options) ->
- case lists:member(sys_db, Options) of
- false -> maybe_close_lru_db(Server);
- true -> {ok, Server}
- end.
-
-maybe_close_lru_db(#server{dbs_open=NumOpen, max_dbs_open=MaxOpen}=Server)
- when NumOpen < MaxOpen ->
- {ok, Server};
-maybe_close_lru_db(#server{lru=Lru}=Server) ->
- case couch_lru:close(Lru) of
- {true, NewLru} ->
- {ok, db_closed(Server#server{lru = NewLru}, [])};
- false ->
- {error, all_dbs_active}
- end.
-
-open_async(Server, From, DbName, Options) ->
- NoLRUServer = Server#server{
- lru = redacted
- },
- Parent = self(),
- T0 = os:timestamp(),
- Opener = spawn_link(fun() ->
- Res = open_async_int(NoLRUServer, DbName, Options),
- IsSuccess = case Res of
- {ok, _} -> true;
- _ -> false
- end,
- case IsSuccess andalso lists:member(create, Options) of
- true ->
- couch_event:notify(DbName, created);
- false ->
- ok
- end,
- gen_server:call(Parent, {open_result, DbName, Res}, infinity),
- unlink(Parent),
- case IsSuccess of
- true ->
- % Track latency times for successful opens
- Diff = timer:now_diff(os:timestamp(), T0) / 1000,
- couch_stats:update_histogram([couchdb, db_open_time], Diff);
- false ->
- % Log unsuccessful open results
- couch_log:info("open_result error ~p for ~s", [Res, DbName])
- end
- end),
- ReqType = case lists:member(create, Options) of
- true -> create;
- false -> open
- end,
- true = ets:insert(couch_dbs, #entry{
- name = DbName,
- pid = Opener,
- lock = locked,
- waiters = [From],
- req_type = ReqType,
- db_options = Options
- }),
- true = ets:insert(couch_dbs_pid_to_name, {Opener, DbName}),
- db_opened(Server, Options).
-
-open_async_int(Server, DbName, Options) ->
- DbNameList = binary_to_list(DbName),
- case check_dbname(DbNameList) of
- ok ->
- case get_engine(Server, DbNameList, Options) of
- {ok, {Module, FilePath}} ->
- couch_db:start_link(Module, DbName, FilePath, Options);
- Error2 ->
- Error2
- end;
- Error1 ->
- Error1
- end.
-
-handle_call(close_lru, _From, #server{lru=Lru} = Server) ->
- case couch_lru:close(Lru) of
- {true, NewLru} ->
- {reply, ok, db_closed(Server#server{lru = NewLru}, [])};
- false ->
- {reply, {error, all_dbs_active}, Server}
- end;
-handle_call(open_dbs_count, _From, Server) ->
- {reply, Server#server.dbs_open, Server};
-handle_call({set_update_lru_on_read, UpdateOnRead}, _From, Server) ->
- {reply, ok, Server#server{update_lru_on_read=UpdateOnRead}};
-handle_call({set_max_dbs_open, Max}, _From, Server) ->
- {reply, ok, Server#server{max_dbs_open=Max}};
-handle_call(reload_engines, _From, Server) ->
- {reply, ok, Server#server{engines = get_configured_engines()}};
-handle_call(get_server, _From, Server) ->
- {reply, {ok, Server}, Server};
-handle_call({open_result, DbName, {ok, Db}}, {Opener, _}, Server) ->
- true = ets:delete(couch_dbs_pid_to_name, Opener),
- DbPid = couch_db:get_pid(Db),
- case ets:lookup(couch_dbs, DbName) of
- [] ->
- % db was deleted during async open
- exit(DbPid, kill),
- {reply, ok, Server};
- [#entry{pid = Opener, req_type = ReqType, waiters = Waiters} = Entry] ->
- link(DbPid),
- [gen_server:reply(Waiter, {ok, Db}) || Waiter <- Waiters],
- % Cancel the creation request if it exists.
- case ReqType of
- {create, DbName, _Options, CrFrom} ->
- gen_server:reply(CrFrom, file_exists);
- _ ->
- ok
- end,
- true = ets:insert(couch_dbs, #entry{
- name = DbName,
- db = Db,
- pid = DbPid,
- lock = unlocked,
- db_options = Entry#entry.db_options,
- start_time = couch_db:get_instance_start_time(Db)
- }),
- true = ets:insert(couch_dbs_pid_to_name, {DbPid, DbName}),
- Lru = case couch_db:is_system_db(Db) of
- false ->
- couch_lru:insert(DbName, Server#server.lru);
- true ->
- Server#server.lru
- end,
- {reply, ok, Server#server{lru = Lru}};
- [#entry{}] ->
- % A mismatched opener pid means that this open_result message
- % was in our mailbox but is now stale. Mostly ignore
- % it except to ensure that the db pid is super dead.
- exit(couch_db:get_pid(Db), kill),
- {reply, ok, Server}
- end;
-handle_call({open_result, DbName, {error, eexist}}, From, Server) ->
- handle_call({open_result, DbName, file_exists}, From, Server);
-handle_call({open_result, DbName, Error}, {Opener, _}, Server) ->
- case ets:lookup(couch_dbs, DbName) of
- [] ->
- % db was deleted during async open
- {reply, ok, Server};
- [#entry{pid = Opener, req_type = ReqType, waiters = Waiters} = Entry] ->
- [gen_server:reply(Waiter, Error) || Waiter <- Waiters],
- true = ets:delete(couch_dbs, DbName),
- true = ets:delete(couch_dbs_pid_to_name, Opener),
- NewServer = case ReqType of
- {create, DbName, Options, CrFrom} ->
- open_async(Server, CrFrom, DbName, Options);
- _ ->
- Server
- end,
- {reply, ok, db_closed(NewServer, Entry#entry.db_options)};
- [#entry{}] ->
- % A mismatched pid means that this open_result message
- % was in our mailbox and is now stale. Ignore it.
- {reply, ok, Server}
- end;
-handle_call({open, DbName, Options}, From, Server) ->
- case ets:lookup(couch_dbs, DbName) of
- [] ->
- case make_room(Server, Options) of
- {ok, Server2} ->
- {noreply, open_async(Server2, From, DbName, Options)};
- CloseError ->
- {reply, CloseError, Server}
- end;
- [#entry{waiters = Waiters} = Entry] when is_list(Waiters) ->
- true = ets:insert(couch_dbs, Entry#entry{waiters = [From | Waiters]}),
- NumWaiters = length(Waiters),
- if NumWaiters =< 10 orelse NumWaiters rem 10 /= 0 -> ok; true ->
- Fmt = "~b clients waiting to open db ~s",
- couch_log:info(Fmt, [length(Waiters), DbName])
- end,
- {noreply, Server};
- [#entry{db = Db}] ->
- {reply, {ok, Db}, Server}
- end;
-handle_call({create, DbName, Options}, From, Server) ->
- case ets:lookup(couch_dbs, DbName) of
- [] ->
- case make_room(Server, Options) of
- {ok, Server2} ->
- CrOptions = [create | Options],
- {noreply, open_async(Server2, From, DbName, CrOptions)};
- CloseError ->
- {reply, CloseError, Server}
- end;
- [#entry{req_type = open} = Entry] ->
- % We're trying to create a database while someone is in
- % the middle of trying to open it. We allow one creator
- % to wait while we figure out if it'll succeed.
- CrOptions = [create | Options],
- Req = {create, DbName, CrOptions, From},
- true = ets:insert(couch_dbs, Entry#entry{req_type = Req}),
- {noreply, Server};
- [_AlreadyRunningDb] ->
- {reply, file_exists, Server}
- end;
-handle_call({delete, DbName, Options}, _From, Server) ->
- DbNameList = binary_to_list(DbName),
- case check_dbname(DbNameList) of
- ok ->
- Server2 =
- case ets:lookup(couch_dbs, DbName) of
- [] -> Server;
- [#entry{pid = Pid, waiters = Waiters} = Entry] when is_list(Waiters) ->
- true = ets:delete(couch_dbs, DbName),
- true = ets:delete(couch_dbs_pid_to_name, Pid),
- exit(Pid, kill),
- [gen_server:reply(Waiter, not_found) || Waiter <- Waiters],
- db_closed(Server, Entry#entry.db_options);
- [#entry{pid = Pid} = Entry] ->
- true = ets:delete(couch_dbs, DbName),
- true = ets:delete(couch_dbs_pid_to_name, Pid),
- exit(Pid, kill),
- db_closed(Server, Entry#entry.db_options)
- end,
-
- couch_db_plugin:on_delete(DbName, Options),
-
- DelOpt = [{context, delete} | Options],
-
- % Make sure and remove all compaction data
- delete_compaction_files(DbNameList, Options),
-
- {ok, {Engine, FilePath}} = get_engine(Server, DbNameList),
- RootDir = Server#server.root_dir,
- case couch_db_engine:delete(Engine, RootDir, FilePath, DelOpt) of
- ok ->
- couch_event:notify(DbName, deleted),
- {reply, ok, Server2};
- {error, enoent} ->
- {reply, not_found, Server2};
- Else ->
- {reply, Else, Server2}
- end;
- Error ->
- {reply, Error, Server}
- end;
-handle_call({db_updated, Db}, _From, Server0) ->
- DbName = couch_db:name(Db),
- StartTime = couch_db:get_instance_start_time(Db),
- Server = try ets:lookup_element(couch_dbs, DbName, #entry.start_time) of
- StartTime ->
- true = ets:update_element(couch_dbs, DbName, {#entry.db, Db}),
- Lru = case couch_db:is_system_db(Db) of
- false -> couch_lru:update(DbName, Server0#server.lru);
- true -> Server0#server.lru
- end,
- Server0#server{lru = Lru};
- _ ->
- Server0
- catch _:_ ->
- Server0
- end,
- {reply, ok, Server}.
-
-handle_cast({update_lru, DbName}, #server{lru = Lru, update_lru_on_read=true} = Server) ->
- {noreply, Server#server{lru = couch_lru:update(DbName, Lru)}};
-handle_cast({update_lru, _DbName}, Server) ->
- {noreply, Server};
-handle_cast({close_db_if_idle, DbName}, Server) ->
- case ets:update_element(couch_dbs, DbName, {#entry.lock, locked}) of
- true ->
- [#entry{db = Db, db_options = DbOpts}] = ets:lookup(couch_dbs, DbName),
- case couch_db:is_idle(Db) of
- true ->
- DbPid = couch_db:get_pid(Db),
- true = ets:delete(couch_dbs, DbName),
- true = ets:delete(couch_dbs_pid_to_name, DbPid),
- exit(DbPid, kill),
- {noreply, db_closed(Server, DbOpts)};
- false ->
- true = ets:update_element(
- couch_dbs, DbName, {#entry.lock, unlocked}),
- {noreply, Server}
- end;
- false ->
- {noreply, Server}
- end;
-
-handle_cast(Msg, Server) ->
- {stop, {unknown_cast_message, Msg}, Server}.
-
-code_change(_OldVsn, #server{}=State, _Extra) ->
- {ok, State}.
-
-handle_info({'EXIT', _Pid, config_change}, Server) ->
- {stop, config_change, Server};
-handle_info({'EXIT', Pid, Reason}, Server) ->
- case ets:lookup(couch_dbs_pid_to_name, Pid) of
- [{Pid, DbName}] ->
- [#entry{waiters = Waiters} = Entry] = ets:lookup(couch_dbs, DbName),
- if Reason /= snappy_nif_not_loaded -> ok; true ->
- Msg = io_lib:format("To open the database `~s`, Apache CouchDB "
- "must be built with Erlang OTP R13B04 or higher.", [DbName]),
- couch_log:error(Msg, [])
- end,
- % We kill databases on purpose so there's no reason
- % to log that fact. So we restrict logging to "interesting"
- % reasons.
- if Reason == normal orelse Reason == killed -> ok; true ->
- couch_log:info("db ~s died with reason ~p", [DbName, Reason])
- end,
- if not is_list(Waiters) -> ok; true ->
- [gen_server:reply(Waiter, Reason) || Waiter <- Waiters]
- end,
- true = ets:delete(couch_dbs, DbName),
- true = ets:delete(couch_dbs_pid_to_name, Pid),
- {noreply, db_closed(Server, Entry#entry.db_options)};
- [] ->
- {noreply, Server}
- end;
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State};
-handle_info(Info, Server) ->
- {stop, {unknown_message, Info}, Server}.
-
-db_opened(Server, Options) ->
- case lists:member(sys_db, Options) of
- false -> Server#server{dbs_open=Server#server.dbs_open + 1};
- true -> Server
- end.
-
-db_closed(Server, Options) ->
- case lists:member(sys_db, Options) of
- false -> Server#server{dbs_open=Server#server.dbs_open - 1};
- true -> Server
- end.
-
-validate_open_or_create(DbName, Options) ->
- case check_dbname(DbName) of
- ok ->
- ok;
- DbNameError ->
- throw({?MODULE, DbNameError})
- end,
-
- case check_engine(Options) of
- ok ->
- ok;
- EngineError ->
- throw({?MODULE, EngineError})
- end,
-
- case ets:lookup(couch_dbs_locks, DbName) of
- [] ->
- ok;
- [{DbName, Reason}] ->
- throw({?MODULE, {error, {locked, Reason}}})
- end.
-
-get_configured_engines() ->
- ConfigEntries = config:get("couchdb_engines"),
- Engines = lists:flatmap(fun({Extension, ModuleStr}) ->
- try
- [{Extension, list_to_atom(ModuleStr)}]
- catch _T:_R ->
- []
- end
- end, ConfigEntries),
- case Engines of
- [] ->
- [{"couch", couch_bt_engine}];
- Else ->
- Else
- end.
-
-
-get_engine(Server, DbName, Options) ->
- #server{
- root_dir = RootDir,
- engines = Engines
- } = Server,
- case couch_util:get_value(engine, Options) of
- Ext when is_binary(Ext) ->
- ExtStr = binary_to_list(Ext),
- case lists:keyfind(ExtStr, 1, Engines) of
- {ExtStr, Engine} ->
- Path = make_filepath(RootDir, DbName, ExtStr),
- {ok, {Engine, Path}};
- false ->
- {error, {invalid_engine_extension, Ext}}
- end;
- _ ->
- get_engine(Server, DbName)
- end.
-
-
-get_engine(Server, DbName) ->
- #server{
- root_dir = RootDir,
- engines = Engines
- } = Server,
- Possible = get_possible_engines(DbName, RootDir, Engines),
- case Possible of
- [] ->
- get_default_engine(Server, DbName);
- [Engine] ->
- {ok, Engine};
- _ ->
- erlang:error(engine_conflict)
- end.
-
-
-get_possible_engines(DbName, RootDir, Engines) ->
- lists:foldl(fun({Extension, Engine}, Acc) ->
- Path = make_filepath(RootDir, DbName, Extension),
- case couch_db_engine:exists(Engine, Path) of
- true ->
- [{Engine, Path} | Acc];
- false ->
- Acc
- end
- end, [], Engines).
-
-
-get_default_engine(Server, DbName) ->
- #server{
- root_dir = RootDir,
- engines = Engines
- } = Server,
- Default = {couch_bt_engine, make_filepath(RootDir, DbName, "couch")},
- case config:get("couchdb", "default_engine") of
- Extension when is_list(Extension) ->
- case lists:keyfind(Extension, 1, Engines) of
- {Extension, Module} ->
- {ok, {Module, make_filepath(RootDir, DbName, Extension)}};
- false ->
- Fmt = "Invalid storage engine extension ~s,"
- " configured engine extensions are: ~s",
- Exts = [E || {E, _} <- Engines],
- Args = [Extension, string:join(Exts, ", ")],
- couch_log:error(Fmt, Args),
- {ok, Default}
- end;
- _ ->
- {ok, Default}
- end.
-
-
-make_filepath(RootDir, DbName, Extension) when is_binary(RootDir) ->
- make_filepath(binary_to_list(RootDir), DbName, Extension);
-make_filepath(RootDir, DbName, Extension) when is_binary(DbName) ->
- make_filepath(RootDir, binary_to_list(DbName), Extension);
-make_filepath(RootDir, DbName, Extension) when is_binary(Extension) ->
- make_filepath(RootDir, DbName, binary_to_list(Extension));
-make_filepath(RootDir, DbName, Extension) ->
- filename:join([RootDir, "./" ++ DbName ++ "." ++ Extension]).
-
-
-get_engine_extensions() ->
- case config:get("couchdb_engines") of
- [] ->
- ["couch"];
- Entries ->
- [Ext || {Ext, _Mod} <- Entries]
- end.
-
-
-check_engine(Options) ->
- case couch_util:get_value(engine, Options) of
- Ext when is_binary(Ext) ->
- ExtStr = binary_to_list(Ext),
- Extensions = get_engine_extensions(),
- case lists:member(ExtStr, Extensions) of
- true ->
- ok;
- false ->
- {error, {invalid_engine_extension, Ext}}
- end;
- _ ->
- ok
- end.
-
-
-get_engine_path(DbName, Engine) when is_binary(DbName), is_atom(Engine) ->
- RootDir = config:get("couchdb", "database_dir", "."),
- case lists:keyfind(Engine, 2, get_configured_engines()) of
- {Ext, Engine} ->
- {ok, make_filepath(RootDir, DbName, Ext)};
- false ->
- {error, {invalid_engine, Engine}}
- end.
-
-lock(DbName, Reason) when is_binary(DbName), is_binary(Reason) ->
- case ets:lookup(couch_dbs, DbName) of
- [] ->
- true = ets:insert(couch_dbs_locks, {DbName, Reason}),
- ok;
- [#entry{}] ->
- {error, already_opened}
- end.
-
-unlock(DbName) when is_binary(DbName) ->
- true = ets:delete(couch_dbs_locks, DbName),
- ok.
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-setup_all() ->
- ok = meck:new(config, [passthrough]),
- ok = meck:expect(config, get, fun config_get/3),
- ok.
-
-teardown_all(_) ->
- meck:unload().
-
-config_get("couchdb", "users_db_suffix", _) -> "users_db";
-config_get(_, _, _) -> undefined.
-
-maybe_add_sys_db_callbacks_pass_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- fun should_add_sys_db_callbacks/0,
- fun should_not_add_sys_db_callbacks/0
- ]
- }.
-
-should_add_sys_db_callbacks() ->
- Cases = [
- "shards/00000000-3fffffff/foo/users_db.1415960794.couch",
- "shards/00000000-3fffffff/foo/users_db.1415960794",
- "shards/00000000-3fffffff/foo/users_db",
- "shards/00000000-3fffffff/users_db.1415960794.couch",
- "shards/00000000-3fffffff/users_db.1415960794",
- "shards/00000000-3fffffff/users_db",
-
- "shards/00000000-3fffffff/_users.1415960794.couch",
- "shards/00000000-3fffffff/_users.1415960794",
- "shards/00000000-3fffffff/_users",
-
- "foo/users_db.couch",
- "foo/users_db",
- "users_db.couch",
- "users_db",
- "foo/_users.couch",
- "foo/_users",
- "_users.couch",
- "_users",
-
- "shards/00000000-3fffffff/foo/_replicator.1415960794.couch",
- "shards/00000000-3fffffff/foo/_replicator.1415960794",
- "shards/00000000-3fffffff/_replicator",
- "foo/_replicator.couch",
- "foo/_replicator",
- "_replicator.couch",
- "_replicator"
- ],
- lists:foreach(fun(DbName) ->
- check_case(DbName, true),
- check_case(?l2b(DbName), true)
- end, Cases).
-
-should_not_add_sys_db_callbacks() ->
- Cases = [
- "shards/00000000-3fffffff/foo/mydb.1415960794.couch",
- "shards/00000000-3fffffff/foo/mydb.1415960794",
- "shards/00000000-3fffffff/mydb",
- "foo/mydb.couch",
- "foo/mydb",
- "mydb.couch",
- "mydb"
- ],
- lists:foreach(fun(DbName) ->
- check_case(DbName, false),
- check_case(?l2b(DbName), false)
- end, Cases).
-
-check_case(DbName, IsAdded) ->
- Options = maybe_add_sys_db_callbacks(DbName, [other_options]),
- ?assertEqual(IsAdded, lists:member(sys_db, Options)).
-
--endif.
diff --git a/src/couch/src/couch_server_int.hrl b/src/couch/src/couch_server_int.hrl
deleted file mode 100644
index 537a6abb9..000000000
--- a/src/couch/src/couch_server_int.hrl
+++ /dev/null
@@ -1,23 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
--record(entry, {
- name,
- db,
- pid,
- lock,
- waiters,
- req_type,
- db_options,
- start_time
-}).
diff --git a/src/couch/src/couch_stream.erl b/src/couch/src/couch_stream.erl
deleted file mode 100644
index d8b7e0ffe..000000000
--- a/src/couch/src/couch_stream.erl
+++ /dev/null
@@ -1,322 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_stream).
--behaviour(gen_server).
--vsn(1).
-
-
--export([
- open/1,
- open/2,
- close/1,
-
- copy/2,
- write/2,
- to_disk_term/1,
-
- foldl/3,
- foldl/4,
- foldl_decode/5,
- range_foldl/5
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3,
- format_status/2
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
-
--define(DEFAULT_BUFFER_SIZE, 4096).
-
-
--record(stream, {
- engine,
- opener_monitor,
- written_pointers=[],
- buffer_list = [],
- buffer_len = 0,
- max_buffer,
- written_len = 0,
- md5,
- % md5 of the content without any transformation applied (e.g. compression)
- % needed for the attachment upload integrity check (ticket 558)
- identity_md5,
- identity_len = 0,
- encoding_fun,
- end_encoding_fun
-}).
-
-
-open({_StreamEngine, _StreamEngineState} = Engine) ->
- open(Engine, []).
-
-
-open({_StreamEngine, _StreamEngineState} = Engine, Options) ->
- gen_server:start_link(?MODULE, {Engine, self(), erlang:get(io_priority), Options}, []).
-
-
-close(Pid) ->
- gen_server:call(Pid, close, infinity).
-
-
-copy(Src, Dst) ->
- foldl(Src, fun(Bin, _) ->
- ok = write(Dst, Bin)
- end, ok).
-
-
-write(_Pid, <<>>) ->
- ok;
-write(Pid, Bin) ->
- gen_server:call(Pid, {write, Bin}, infinity).
-
-
-to_disk_term({Engine, EngineState}) ->
- Engine:to_disk_term(EngineState).
-
-
-foldl({Engine, EngineState}, Fun, Acc) ->
- Engine:foldl(EngineState, Fun, Acc).
-
-
-foldl(Engine, <<>>, Fun, Acc) ->
- foldl(Engine, Fun, Acc);
-foldl(Engine, Md5, UserFun, UserAcc) ->
- InitAcc = {couch_hash:md5_hash_init(), UserFun, UserAcc},
- {Md5Acc, _, OutAcc} = foldl(Engine, fun foldl_md5/2, InitAcc),
- Md5 = couch_hash:md5_hash_final(Md5Acc),
- OutAcc.
-
-
-foldl_decode(Engine, Md5, Enc, UserFun, UserAcc1) ->
- {DecDataFun, DecEndFun} = case Enc of
- gzip -> ungzip_init();
- identity -> identity_enc_dec_funs()
- end,
- InitAcc = {DecDataFun, UserFun, UserAcc1},
- {_, _, UserAcc2} = foldl(Engine, Md5, fun foldl_decode/2, InitAcc),
- DecEndFun(),
- UserAcc2.
-
-
-range_foldl(Engine, From, To, UserFun, UserAcc) when To >= From ->
- NewEngine = do_seek(Engine, From),
- InitAcc = {To - From, UserFun, UserAcc},
- try
- {_, _, UserAcc2} = foldl(NewEngine, fun foldl_length/2, InitAcc),
- UserAcc2
- catch
- throw:{finished, UserAcc3} ->
- UserAcc3
- end.
-
-
-foldl_md5(Bin, {Md5Acc, UserFun, UserAcc}) ->
- NewMd5Acc = couch_hash:md5_hash_update(Md5Acc, Bin),
- {NewMd5Acc, UserFun, UserFun(Bin, UserAcc)}.
-
-
-foldl_decode(EncBin, {DecFun, UserFun, UserAcc}) ->
- case DecFun(EncBin) of
- <<>> -> {DecFun, UserFun, UserAcc};
- Dec -> {DecFun, UserFun, UserFun(Dec, UserAcc)}
- end.
-
-
-foldl_length(Bin, {Length, UserFun, UserAcc}) ->
- BinSize = size(Bin),
- case BinSize =< Length of
- true ->
- {Length - BinSize, UserFun, UserFun(Bin, UserAcc)};
- false ->
- <<Trunc:Length/binary, _/binary>> = Bin,
- throw({finished, UserFun(Trunc, UserAcc)})
- end.
-
-gzip_init(Options) ->
- case couch_util:get_value(compression_level, Options, 0) of
- Lvl when Lvl >= 1 andalso Lvl =< 9 ->
- Z = zlib:open(),
- % 15 = ?MAX_WBITS (defined in the zlib module)
- % the 16 + ?MAX_WBITS formula was obtained by inspecting zlib:gzip/1
- ok = zlib:deflateInit(Z, Lvl, deflated, 16 + 15, 8, default),
- {
- fun(Data) ->
- zlib:deflate(Z, Data)
- end,
- fun() ->
- Last = zlib:deflate(Z, [], finish),
- ok = zlib:deflateEnd(Z),
- ok = zlib:close(Z),
- Last
- end
- };
- _ ->
- identity_enc_dec_funs()
- end.
-
-ungzip_init() ->
- Z = zlib:open(),
- zlib:inflateInit(Z, 16 + 15),
- {
- fun(Data) ->
- zlib:inflate(Z, Data)
- end,
- fun() ->
- ok = zlib:inflateEnd(Z),
- ok = zlib:close(Z)
- end
- }.
-
-identity_enc_dec_funs() ->
- {
- fun(Data) -> Data end,
- fun() -> [] end
- }.
-
-
-init({Engine, OpenerPid, OpenerPriority, Options}) ->
- erlang:put(io_priority, OpenerPriority),
- {EncodingFun, EndEncodingFun} =
- case couch_util:get_value(encoding, Options, identity) of
- identity -> identity_enc_dec_funs();
- gzip -> gzip_init(Options)
- end,
- {ok, #stream{
- engine=Engine,
- opener_monitor=erlang:monitor(process, OpenerPid),
- md5=couch_hash:md5_hash_init(),
- identity_md5=couch_hash:md5_hash_init(),
- encoding_fun=EncodingFun,
- end_encoding_fun=EndEncodingFun,
- max_buffer=couch_util:get_value(
- buffer_size, Options, ?DEFAULT_BUFFER_SIZE)
- }
- }.
-
-terminate(_Reason, _Stream) ->
- ok.
-
-handle_call({write, Bin}, _From, Stream) ->
- BinSize = iolist_size(Bin),
- #stream{
- engine = Engine,
- written_len = WrittenLen,
- buffer_len = BufferLen,
- buffer_list = Buffer,
- max_buffer = Max,
- md5 = Md5,
- identity_md5 = IdenMd5,
- identity_len = IdenLen,
- encoding_fun = EncodingFun} = Stream,
- if BinSize + BufferLen > Max ->
- WriteBin = lists:reverse(Buffer, [Bin]),
- IdenMd5_2 = couch_hash:md5_hash_update(IdenMd5, WriteBin),
- case EncodingFun(WriteBin) of
- [] ->
- % case where the encoder did some internal buffering
- % (zlib does it for example)
- NewEngine = Engine,
- WrittenLen2 = WrittenLen,
- Md5_2 = Md5;
- WriteBin2 ->
- NewEngine = do_write(Engine, WriteBin2),
- WrittenLen2 = WrittenLen + iolist_size(WriteBin2),
- Md5_2 = couch_hash:md5_hash_update(Md5, WriteBin2)
- end,
-
- {reply, ok, Stream#stream{
- engine = NewEngine,
- written_len=WrittenLen2,
- buffer_list=[],
- buffer_len=0,
- md5=Md5_2,
- identity_md5=IdenMd5_2,
- identity_len=IdenLen + BinSize}, hibernate};
- true ->
- {reply, ok, Stream#stream{
- buffer_list=[Bin|Buffer],
- buffer_len=BufferLen + BinSize,
- identity_len=IdenLen + BinSize}}
- end;
-handle_call(close, _From, Stream) ->
- #stream{
- engine = Engine,
- opener_monitor = MonRef,
- written_len = WrittenLen,
- buffer_list = Buffer,
- md5 = Md5,
- identity_md5 = IdenMd5,
- identity_len = IdenLen,
- encoding_fun = EncodingFun,
- end_encoding_fun = EndEncodingFun} = Stream,
-
- WriteBin = lists:reverse(Buffer),
- IdenMd5Final = couch_hash:md5_hash_final(couch_hash:md5_hash_update(IdenMd5, WriteBin)),
- WriteBin2 = EncodingFun(WriteBin) ++ EndEncodingFun(),
- Md5Final = couch_hash:md5_hash_final(couch_hash:md5_hash_update(Md5, WriteBin2)),
- Result = case WriteBin2 of
- [] ->
- {do_finalize(Engine), WrittenLen, IdenLen, Md5Final, IdenMd5Final};
- _ ->
- NewEngine = do_write(Engine, WriteBin2),
- StreamLen = WrittenLen + iolist_size(WriteBin2),
- {do_finalize(NewEngine), StreamLen, IdenLen, Md5Final, IdenMd5Final}
- end,
- erlang:demonitor(MonRef),
- {stop, normal, Result, Stream}.
-
-handle_cast(_Msg, State) ->
- {noreply,State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-handle_info({'DOWN', Ref, _, _, _}, #stream{opener_monitor=Ref} = State) ->
- {stop, normal, State};
-handle_info(_Info, State) ->
- {noreply, State}.
-
-
-format_status(_Opt, [_PDict, Stream]) ->
- #stream{
- written_pointers=Pointers,
- buffer_list = Buffer
- } = Stream,
- Scrubbed = Stream#stream{
- written_pointers={length, length(Pointers)},
- buffer_list = {length, length(Buffer)}
- },
- [{data, [{"State",
- ?record_to_keyval(stream, Scrubbed)
- }]}].
-
-do_seek({Engine, EngineState}, Offset) ->
- {ok, NewState} = Engine:seek(EngineState, Offset),
- {Engine, NewState}.
-
-do_write({Engine, EngineState}, Data) ->
- {ok, NewState} = Engine:write(EngineState, Data),
- {Engine, NewState}.
-
-do_finalize({Engine, EngineState}) ->
- {ok, NewState} = Engine:finalize(EngineState),
- {Engine, NewState}.
-
diff --git a/src/couch/src/couch_sup.erl b/src/couch/src/couch_sup.erl
index 6e7ef98b7..7a1afae8b 100644
--- a/src/couch/src/couch_sup.erl
+++ b/src/couch/src/couch_sup.erl
@@ -25,6 +25,7 @@
-include_lib("couch/include/couch_db.hrl").
+-include_lib("kernel/include/logger.hrl").
start_link() ->
@@ -93,6 +94,11 @@ assert_admins() ->
couch_log:info("Preflight check: Asserting Admin Account~n", []),
case {config:get("admins"), os:getenv("COUCHDB_TEST_ADMIN_PARTY_OVERRIDE")} of
{[], false} ->
+ ?LOG_INFO(#{
+ what => admin_account_missing,
+ details => "No admin account found, aborting startup. Please configure "
+ "an admin account in your local.ini file."
+ }),
couch_log:info("~n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n"
++ " No Admin Account Found, aborting startup. ~n"
++ " Please configure an admin account in your local.ini file. ~n"
@@ -104,6 +110,11 @@ assert_admins() ->
end.
send_no_admin_account_error_message() ->
+ ?LOG_ERROR(#{
+ what => admin_account_missing,
+ details => "No admin account configured. Please configure an admin "
+ "account in your local.ini file and restart CouchDB."
+ }),
couch_log:error("No Admin Account configured."
++ " Please configure an Admin Account in your local.ini file and restart CouchDB.~n", []),
FiveMinutes = 5 * 1000 * 60,
@@ -118,21 +129,37 @@ maybe_launch_admin_annoyance_reporter() ->
notify_starting() ->
+ ?LOG_INFO(#{
+ what => starting_couchdb,
+ version => couch_server:get_version()
+ }),
couch_log:info("Apache CouchDB ~s is starting.~n", [
couch_server:get_version()
]).
notify_started() ->
+ ?LOG_INFO(#{
+ what => starting_couchdb_complete,
+ time_to_relax => true
+ }),
couch_log:info("Apache CouchDB has started. Time to relax.~n", []).
notify_error(Error) ->
+ ?LOG_ERROR(#{
+ what => error_on_startup,
+ details => Error
+ }),
couch_log:error("Error starting Apache CouchDB:~n~n ~p~n~n", [Error]).
notify_uris() ->
lists:foreach(fun(Uri) ->
+ ?LOG_INFO(#{
+ what => couchdb_listener_started,
+ uri => Uri
+ }),
couch_log:info("Apache CouchDB has started on ~s", [Uri])
end, get_uris()).
@@ -193,7 +220,12 @@ write_file(FileName, Contents) ->
ok ->
ok;
{error, Reason} ->
+ ?LOG_ERROR(#{
+ what => file_write_failure,
+ filename => FileName,
+ error => file:format_error(Reason)
+ }),
Args = [FileName, file:format_error(Reason)],
- couch_log:error("Failed ot write ~s :: ~s", Args),
+ couch_log:error("Failed to write ~s :: ~s", Args),
throw({error, Reason})
end.
diff --git a/src/couch/src/couch_task_status.erl b/src/couch/src/couch_task_status.erl
deleted file mode 100644
index 4083c3f81..000000000
--- a/src/couch/src/couch_task_status.erl
+++ /dev/null
@@ -1,162 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_task_status).
--behaviour(gen_server).
--vsn(1).
-
-% This module is used to track the status of long running tasks.
-% Long running tasks register themselves, via a call to add_task/1, and then
-% update their status properties via update/1. The status of a task is a
-% list of properties. Each property is a tuple, with the first element being
-% either an atom or a binary and the second element must be an EJSON value. When
-% a task updates its status, it can override some or all of its properties.
-% The properties {started_on, UnitTimestamp}, {updated_on, UnixTimestamp} and
-% {pid, ErlangPid} are automatically added by this module.
-% When a tracked task dies, its status will be automatically removed from
-% memory. To get the tasks list, call the all/0 function.
-
--export([start_link/0, stop/0]).
--export([all/0, add_task/1, update/1, get/1, set_update_frequency/1]).
--export([is_task_added/0]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(set(L, K, V), lists:keystore(K, 1, L, {K, V})).
-
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-stop() ->
- gen_server:cast(?MODULE, stop).
-
-
-all() ->
- gen_server:call(?MODULE, all).
-
-
-add_task(Props) ->
- put(task_status_update, {{0, 0, 0}, 0}),
- Ts = timestamp(),
- TaskProps = lists:ukeysort(
- 1, [{started_on, Ts}, {updated_on, Ts} | Props]),
- put(task_status_props, TaskProps),
- gen_server:call(?MODULE, {add_task, TaskProps}).
-
-
-is_task_added() ->
- is_list(erlang:get(task_status_props)).
-
-
-set_update_frequency(Msecs) ->
- put(task_status_update, {{0, 0, 0}, Msecs * 1000}).
-
-
-update(Props) ->
- MergeProps = lists:ukeysort(1, Props),
- CurrProps = erlang:get(task_status_props),
- TaskProps = lists:ukeymerge(1, MergeProps, CurrProps),
- case TaskProps == CurrProps of
- true ->
- maybe_persist(TaskProps);
- false ->
- persist(TaskProps)
- end.
-
-
-get(Props) when is_list(Props) ->
- TaskProps = erlang:get(task_status_props),
- [couch_util:get_value(P, TaskProps) || P <- Props];
-get(Prop) ->
- TaskProps = erlang:get(task_status_props),
- couch_util:get_value(Prop, TaskProps).
-
-
-maybe_persist(TaskProps) ->
- {LastUpdateTime, Frequency} = erlang:get(task_status_update),
- case timer:now_diff(Now = os:timestamp(), LastUpdateTime) >= Frequency of
- true ->
- put(task_status_update, {Now, Frequency}),
- persist(TaskProps);
- false ->
- ok
- end.
-
-
-persist(TaskProps0) ->
- TaskProps = ?set(TaskProps0, updated_on, timestamp(os:timestamp())),
- put(task_status_props, TaskProps),
- gen_server:cast(?MODULE, {update_status, self(), TaskProps}).
-
-
-init([]) ->
- % read configuration settings and register for configuration changes
- ets:new(?MODULE, [ordered_set, protected, named_table]),
- {ok, nil}.
-
-
-terminate(_Reason,_State) ->
- ok.
-
-
-handle_call({add_task, TaskProps}, {From, _}, Server) ->
- case ets:lookup(?MODULE, From) of
- [] ->
- true = ets:insert(?MODULE, {From, TaskProps}),
- erlang:monitor(process, From),
- {reply, ok, Server};
- [_] ->
- {reply, {add_task_error, already_registered}, Server}
- end;
-handle_call(all, _, Server) ->
- All = [
- [{pid, ?l2b(pid_to_list(Pid))} | TaskProps]
- ||
- {Pid, TaskProps} <- ets:tab2list(?MODULE)
- ],
- {reply, All, Server}.
-
-
-handle_cast({update_status, Pid, NewProps}, Server) ->
- case ets:lookup(?MODULE, Pid) of
- [{Pid, _CurProps}] ->
- couch_log:debug("New task status for ~p: ~p", [Pid, NewProps]),
- true = ets:insert(?MODULE, {Pid, NewProps});
- _ ->
- % Task finished/died in the meanwhile and we must have received
- % a monitor message before this call - ignore.
- ok
- end,
- {noreply, Server};
-handle_cast(stop, State) ->
- {stop, normal, State}.
-
-handle_info({'DOWN', _MonitorRef, _Type, Pid, _Info}, Server) ->
- %% should we also erlang:demonitor(_MonitorRef), ?
- ets:delete(?MODULE, Pid),
- {noreply, Server}.
-
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-
-timestamp() ->
- timestamp(os:timestamp()).
-
-timestamp({Mega, Secs, _}) ->
- Mega * 1000000 + Secs.
diff --git a/src/couch/src/couch_users_db.erl b/src/couch/src/couch_users_db.erl
deleted file mode 100644
index e6e1c4892..000000000
--- a/src/couch/src/couch_users_db.erl
+++ /dev/null
@@ -1,137 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_users_db).
-
--export([before_doc_update/3, after_doc_read/2, strip_non_public_fields/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(NAME, <<"name">>).
--define(PASSWORD, <<"password">>).
--define(DERIVED_KEY, <<"derived_key">>).
--define(PASSWORD_SCHEME, <<"password_scheme">>).
--define(SIMPLE, <<"simple">>).
--define(PASSWORD_SHA, <<"password_sha">>).
--define(PBKDF2, <<"pbkdf2">>).
--define(ITERATIONS, <<"iterations">>).
--define(SALT, <<"salt">>).
--define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
-
-% If the request's userCtx identifies an admin
-% -> save_doc (see below)
-%
-% If the request's userCtx.name is null:
-% -> save_doc
-% // this is an anonymous user registering a new document
-% // in case a user doc with the same id already exists, the anonymous
-% // user will get a regular doc update conflict.
-% If the request's userCtx.name doesn't match the doc's name
-% -> 404 // Not Found
-% Else
-% -> save_doc
-before_doc_update(Doc, Db, _UpdateType) ->
- #user_ctx{name=Name} = couch_db:get_user_ctx(Db),
- DocName = get_doc_name(Doc),
- case (catch couch_db:check_is_admin(Db)) of
- ok ->
- save_doc(Doc);
- _ when Name =:= DocName orelse Name =:= null ->
- save_doc(Doc);
- _ ->
- throw(not_found)
- end.
-
-% If newDoc.password == null || newDoc.password == undefined:
-% ->
-% noop
-% Else -> // calculate password hash server side
-% newDoc.password_sha = hash_pw(newDoc.password + salt)
-% newDoc.salt = salt
-% newDoc.password = null
-save_doc(#doc{body={Body}} = Doc) ->
- %% Support both schemes to smooth migration from legacy scheme
- Scheme = config:get("couch_httpd_auth", "password_scheme", "pbkdf2"),
- case {couch_util:get_value(?PASSWORD, Body), Scheme} of
- {null, _} -> % server admins don't have a user-db password entry
- Doc;
- {undefined, _} ->
- Doc;
- {ClearPassword, "simple"} -> % deprecated
- Salt = couch_uuids:random(),
- PasswordSha = couch_passwords:simple(ClearPassword, Salt),
- Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?SIMPLE),
- Body1 = ?replace(Body0, ?SALT, Salt),
- Body2 = ?replace(Body1, ?PASSWORD_SHA, PasswordSha),
- Body3 = proplists:delete(?PASSWORD, Body2),
- Doc#doc{body={Body3}};
- {ClearPassword, "pbkdf2"} ->
- Iterations = list_to_integer(config:get("couch_httpd_auth", "iterations", "1000")),
- Salt = couch_uuids:random(),
- DerivedKey = couch_passwords:pbkdf2(ClearPassword, Salt, Iterations),
- Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?PBKDF2),
- Body1 = ?replace(Body0, ?ITERATIONS, Iterations),
- Body2 = ?replace(Body1, ?DERIVED_KEY, DerivedKey),
- Body3 = ?replace(Body2, ?SALT, Salt),
- Body4 = proplists:delete(?PASSWORD, Body3),
- Doc#doc{body={Body4}};
- {_ClearPassword, Scheme} ->
- couch_log:error("[couch_httpd_auth] password_scheme value of '~p' is invalid.", [Scheme]),
- throw({forbidden, "Server cannot hash passwords at this time."})
- end.
-
-% If the doc is a design doc
-% If the request's userCtx identifies an admin
-% -> return doc
-% Else
-% -> 403 // Forbidden
-% If the request's userCtx identifies an admin
-% -> return doc
-% If the request's userCtx.name doesn't match the doc's name
-% -> 404 // Not Found
-% Else
-% -> return doc
-after_doc_read(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, Db) ->
- case (catch couch_db:check_is_admin(Db)) of
- ok ->
- Doc;
- _ ->
- throw({forbidden,
- <<"Only administrators can view design docs in the users database.">>})
- end;
-after_doc_read(Doc, Db) ->
- #user_ctx{name=Name} = couch_db:get_user_ctx(Db),
- DocName = get_doc_name(Doc),
- case (catch couch_db:check_is_admin(Db)) of
- ok ->
- Doc;
- _ when Name =:= DocName ->
- Doc;
- _ ->
- Doc1 = strip_non_public_fields(Doc),
- case Doc1 of
- #doc{body={[]}} ->
- throw(not_found);
- _ ->
- Doc1
- end
- end.
-
-get_doc_name(#doc{id= <<"org.couchdb.user:", Name/binary>>}) ->
- Name;
-get_doc_name(_) ->
- undefined.
-
-strip_non_public_fields(#doc{body={Props}}=Doc) ->
- Public = re:split(config:get("couch_httpd_auth", "public_fields", ""),
- "\\s*,\\s*", [{return, binary}]),
- Doc#doc{body={[{K, V} || {K, V} <- Props, lists:member(K, Public)]}}.
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
index 8d643398c..af7b7ff0a 100644
--- a/src/couch/src/couch_util.erl
+++ b/src/couch/src/couch_util.erl
@@ -28,7 +28,6 @@
-export([url_strip_password/1]).
-export([encode_doc_id/1]).
-export([normalize_ddoc_id/1]).
--export([with_db/2]).
-export([rfc1123_date/0, rfc1123_date/1]).
-export([integer_to_boolean/1, boolean_to_integer/1]).
-export([validate_positive_int/1]).
@@ -260,9 +259,9 @@ json_apply_field({Key, NewValue}, [], Acc) ->
{[{Key, NewValue}|Acc]}.
json_user_ctx(Db) ->
- ShardName = couch_db:name(Db),
- Ctx = couch_db:get_user_ctx(Db),
- {[{<<"db">>, mem3:dbname(ShardName)},
+ #{name := DbName} = Db,
+ Ctx = fabric2_db:get_user_ctx(Db),
+ {[{<<"db">>, DbName},
{<<"name">>,Ctx#user_ctx.name},
{<<"roles">>,Ctx#user_ctx.roles}]}.
@@ -565,25 +564,6 @@ normalize_ddoc_id(<<"_design/", _/binary>> = DDocId) ->
normalize_ddoc_id(DDocId) when is_binary(DDocId) ->
<<"_design/", DDocId/binary>>.
-with_db(DbName, Fun) when is_binary(DbName) ->
- case couch_db:open_int(DbName, [?ADMIN_CTX]) of
- {ok, Db} ->
- try
- Fun(Db)
- after
- catch couch_db:close(Db)
- end;
- Else ->
- throw(Else)
- end;
-with_db(Db, Fun) ->
- case couch_db:is_db(Db) of
- true ->
- Fun(Db);
- false ->
- erlang:error({invalid_db, Db})
- end.
-
rfc1123_date() ->
{{YYYY,MM,DD},{Hour,Min,Sec}} = calendar:universal_time(),
DayNumber = calendar:day_of_the_week({YYYY,MM,DD}),
diff --git a/src/couch/src/test_util.erl b/src/couch/src/test_util.erl
index 125e76492..c95c444f5 100644
--- a/src/couch/src/test_util.erl
+++ b/src/couch/src/test_util.erl
@@ -14,8 +14,6 @@
-include_lib("couch/include/couch_eunit.hrl").
-include("couch_db.hrl").
--include("couch_db_int.hrl").
--include("couch_bt_engine.hrl").
-export([init_code_path/0]).
-export([source_file/1, build_file/1]).
@@ -36,12 +34,10 @@
-export([start/1, start/2, start/3, stop/1]).
--export([fake_db/1]).
-
-record(test_context, {mocked = [], started = [], module}).
-define(DEFAULT_APPS,
- [inets, ibrowse, ssl, config, couch_epi, couch_event, couch]).
+ [inets, ibrowse, ssl, config, couch_epi, couch]).
srcdir() ->
code:priv_dir(couch) ++ "/../../".
@@ -54,8 +50,7 @@ init_code_path() ->
"couchdb",
"jiffy",
"ibrowse",
- "mochiweb",
- "snappy"
+ "mochiweb"
],
lists:foreach(fun(Name) ->
code:add_patha(filename:join([builddir(), "src", Name]))
@@ -248,7 +243,7 @@ start(Module, ExtraApps) ->
start(Module, ExtraApps, []).
start(Module, ExtraApps, Options) ->
- Apps = start_applications([config, couch_log, ioq, couch_epi | ExtraApps]),
+ Apps = start_applications([config, couch_log, couch_epi | ExtraApps]),
ToMock = [config, couch_stats] -- proplists:get_value(dont_mock, Options, []),
mock(ToMock),
#test_context{module = Module, mocked = ToMock, started = Apps}.
@@ -257,37 +252,6 @@ stop(#test_context{mocked = Mocked, started = Apps}) ->
meck:unload(Mocked),
stop_applications(Apps).
-fake_db(Fields0) ->
- {ok, Db, Fields} = maybe_set_engine(Fields0),
- Indexes = lists:zip(
- record_info(fields, db),
- lists:seq(2, record_info(size, db))
- ),
- lists:foldl(fun({FieldName, Value}, Acc) ->
- Idx = couch_util:get_value(FieldName, Indexes),
- setelement(Idx, Acc, Value)
- end, Db, Fields).
-
-maybe_set_engine(Fields0) ->
- case lists:member(engine, Fields0) of
- true ->
- {ok, #db{}, Fields0};
- false ->
- {ok, Header, Fields} = get_engine_header(Fields0),
- Db = #db{engine = {couch_bt_engine, #st{header = Header}}},
- {ok, Db, Fields}
- end.
-
-get_engine_header(Fields) ->
- Keys = [disk_version, update_seq, unused, id_tree_state,
- seq_tree_state, local_tree_state, purge_seq, purged_docs,
- security_ptr, revs_limit, uuid, epochs, compacted_seq],
- {HeadFields, RestFields} = lists:partition(
- fun({K, _}) -> lists:member(K, Keys) end, Fields),
- Header0 = couch_bt_engine_header:new(),
- Header = couch_bt_engine_header:set(Header0, HeadFields),
- {ok, Header, RestFields}.
-
now_us() ->
{MegaSecs, Secs, MicroSecs} = os:timestamp(),
(MegaSecs * 1000000 + Secs) * 1000000 + MicroSecs.
diff --git a/src/couch/test/eunit/chttpd_endpoints_tests.erl b/src/couch/test/eunit/chttpd_endpoints_tests.erl
index 3c8586a14..f164ae684 100644
--- a/src/couch/test/eunit/chttpd_endpoints_tests.erl
+++ b/src/couch/test/eunit/chttpd_endpoints_tests.erl
@@ -47,10 +47,10 @@ url_handlers() ->
{<<"_replicate">>, chttpd_misc, handle_replicate_req},
{<<"_uuids">>, chttpd_misc, handle_uuids_req},
{<<"_session">>, chttpd_auth, handle_session_req},
- {<<"_up">>, chttpd_misc, handle_up_req},
- {<<"_membership">>, mem3_httpd, handle_membership_req},
- {<<"_db_updates">>, global_changes_httpd, handle_global_changes_req},
- {<<"_cluster_setup">>, setup_httpd, handle_setup_req}
+ {<<"_membership">>, chttpd_httpd_handlers, not_supported},
+ {<<"_db_updates">>, chttpd_httpd_handlers, not_implemented},
+ {<<"_cluster_setup">>, chttpd_httpd_handlers, not_implemented},
+ {<<"_up">>, chttpd_misc, handle_up_req}
],
lists:foreach(fun({Path, Mod, Fun}) ->
@@ -67,9 +67,9 @@ db_handlers() ->
{<<"_view_cleanup">>, chttpd_db, handle_view_cleanup_req},
{<<"_compact">>, chttpd_db, handle_compact_req},
{<<"_design">>, chttpd_db, handle_design_req},
- {<<"_temp_view">>, chttpd_view, handle_temp_view_req},
+ {<<"_temp_view">>, chttpd_httpd_handlers, not_supported},
{<<"_changes">>, chttpd_db, handle_changes_req},
- {<<"_shards">>, mem3_httpd, handle_shards_req},
+ {<<"_shards">>, chttpd_httpd_handlers, not_supported},
{<<"_index">>, mango_httpd, handle_req},
{<<"_explain">>, mango_httpd, handle_req},
{<<"_find">>, mango_httpd, handle_req}
@@ -87,11 +87,11 @@ db_handlers() ->
design_handlers() ->
Handlers = [
{<<"_view">>, chttpd_view, handle_view_req},
- {<<"_show">>, chttpd_show, handle_doc_show_req},
- {<<"_list">>, chttpd_show, handle_view_list_req},
+ {<<"_show">>, chttpd_httpd_handlers, not_supported},
+ {<<"_list">>, chttpd_httpd_handlers, not_supported},
{<<"_update">>, chttpd_show, handle_doc_update_req},
{<<"_info">>, chttpd_db, handle_design_info_req},
- {<<"_rewrite">>, chttpd_rewrite, handle_rewrite_req}
+ {<<"_rewrite">>, chttpd_httpd_handlers, not_supported}
],
lists:foreach(fun({Path, Mod, Fun}) ->
diff --git a/src/couch/test/eunit/couch_auth_cache_tests.erl b/src/couch/test/eunit/couch_auth_cache_tests.erl
deleted file mode 100644
index 71faf77d6..000000000
--- a/src/couch/test/eunit/couch_auth_cache_tests.erl
+++ /dev/null
@@ -1,349 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_auth_cache_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(SALT, <<"SALT">>).
--define(DB_TIMEOUT, 15000).
-
-start() ->
- test_util:start_couch([ioq]).
-
-
-setup() ->
- DbName = ?tempdb(),
- config:set("couch_httpd_auth", "authentication_db",
- ?b2l(DbName), false),
- DbName.
-
-teardown(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-
-couch_auth_cache_test_() ->
- {
- "CouchDB auth cache tests",
- {
- setup,
- fun start/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_get_nil_on_missed_cache/1,
- fun should_get_right_password_hash/1,
- fun should_ensure_doc_hash_equals_cached_one/1,
- fun should_update_password/1,
- fun should_cleanup_cache_after_userdoc_deletion/1,
- fun should_restore_cache_after_userdoc_recreation/1,
- fun should_drop_cache_on_auth_db_change/1,
- fun should_restore_cache_on_auth_db_change/1,
- fun should_recover_cache_after_shutdown/1,
- fun should_get_admin_from_config/1
- ]
- }
- }
- }.
-
-auth_vdu_test_() ->
- Cases = [
- %% Old , New , Result
- %% [Roles, Type] , [Roles, Type] ,
-
- %% Updating valid user doc with valid one
- {[custom, user], [custom, user], "ok"},
-
- %% Updating invalid doc (missing type or roles field) with valid one
- {[missing, missing], [custom, user], "ok"},
- {[missing, user], [custom, user], "ok"},
- {[custom, missing], [custom, user], "ok"},
-
- %% Updating invalid doc (wrong type) with valid one
- {[missing, other], [custom, user], "ok"},
- {[custom, other], [custom, user], "ok"},
-
- %% Updating valid document with invalid one
- {[custom, user], [missing, missing], "doc.type must be user"},
- {[custom, user], [missing, user], "doc.roles must exist"},
- {[custom, user], [custom, missing], "doc.type must be user"},
- {[custom, user], [missing, other], "doc.type must be user"},
- {[custom, user], [custom, other], "doc.type must be user"},
-
- %% Updating invalid doc with invalid one
- {[missing, missing], [missing, missing], "doc.type must be user"},
- {[missing, missing], [missing, user], "doc.roles must exist"},
- {[missing, missing], [custom, missing], "doc.type must be user"},
- {[missing, missing], [missing, other], "doc.type must be user"},
- {[missing, missing], [custom, other], "doc.type must be user"},
-
- {[missing, user], [missing, missing], "doc.type must be user"},
- {[missing, user], [missing, user], "doc.roles must exist"},
- {[missing, user], [custom, missing], "doc.type must be user"},
- {[missing, user], [missing, other], "doc.type must be user"},
- {[missing, user], [custom, other], "doc.type must be user"},
-
- {[missing, other], [missing, missing], "doc.type must be user"},
- {[missing, other], [missing, user], "doc.roles must exist"},
- {[missing, other], [custom, missing], "doc.type must be user"},
- {[missing, other], [missing, other], "doc.type must be user"},
- {[missing, other], [custom, other], "doc.type must be user"},
-
- {[custom, missing], [missing, missing], "doc.type must be user"},
- {[custom, missing], [missing, user], "doc.roles must exist"},
- {[custom, missing], [custom, missing], "doc.type must be user"},
- {[custom, missing], [missing, other], "doc.type must be user"},
- {[custom, missing], [custom, other], "doc.type must be user"},
-
- {[custom, other], [missing, missing], "doc.type must be user"},
- {[custom, other], [missing, user], "doc.roles must exist"},
- {[custom, other], [custom, missing], "doc.type must be user"},
- {[custom, other], [missing, other], "doc.type must be user"},
- {[custom, other], [custom, other], "doc.type must be user"}
- ],
-
- %% Make sure we covered all combinations
- AllPossibleDocs = couch_tests_combinatorics:product([
- [missing, custom],
- [missing, user, other]
- ]),
- AllPossibleCases = couch_tests_combinatorics:product(
- [AllPossibleDocs, AllPossibleDocs]),
- ?assertEqual([], AllPossibleCases -- [[A, B] || {A, B, _} <- Cases]),
-
- {
- "Check User doc validation",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- [
- make_validate_test(Case) || Case <- Cases
- ]
- }
- }.
-
-should_get_nil_on_missed_cache(_) ->
- ?_assertEqual(nil, couch_auth_cache:get_user_creds("joe")).
-
-should_get_right_password_hash(DbName) ->
- ?_test(begin
- PasswordHash = hash_password("pass1"),
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
- ?assertEqual(PasswordHash,
- couch_util:get_value(<<"password_sha">>, Creds))
- end).
-
-should_ensure_doc_hash_equals_cached_one(DbName) ->
- ?_test(begin
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
-
- CachedHash = couch_util:get_value(<<"password_sha">>, Creds),
- StoredHash = get_user_doc_password_sha(DbName, "joe"),
- ?assertEqual(StoredHash, CachedHash)
- end).
-
-should_update_password(DbName) ->
- ?_test(begin
- PasswordHash = hash_password("pass2"),
- {ok, Rev} = update_user_doc(DbName, "joe", "pass1"),
- {ok, _} = update_user_doc(DbName, "joe", "pass2", Rev),
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
- ?assertEqual(PasswordHash,
- couch_util:get_value(<<"password_sha">>, Creds))
- end).
-
-should_cleanup_cache_after_userdoc_deletion(DbName) ->
- ?_test(begin
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- delete_user_doc(DbName, "joe"),
- ?assertEqual(nil, couch_auth_cache:get_user_creds("joe"))
- end).
-
-should_restore_cache_after_userdoc_recreation(DbName) ->
- ?_test(begin
- PasswordHash = hash_password("pass5"),
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- delete_user_doc(DbName, "joe"),
- ?assertEqual(nil, couch_auth_cache:get_user_creds("joe")),
-
- {ok, _} = update_user_doc(DbName, "joe", "pass5"),
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
-
- ?assertEqual(PasswordHash,
- couch_util:get_value(<<"password_sha">>, Creds))
- end).
-
-should_drop_cache_on_auth_db_change(DbName) ->
- ?_test(begin
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- config:set("couch_httpd_auth", "authentication_db",
- ?b2l(?tempdb()), false),
- ?assertEqual(nil, couch_auth_cache:get_user_creds("joe"))
- end).
-
-should_restore_cache_on_auth_db_change(DbName) ->
- ?_test(begin
- PasswordHash = hash_password("pass1"),
- {ok, _} = update_user_doc(DbName, "joe", "pass1"),
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
-
- DbName1 = ?tempdb(),
- config:set("couch_httpd_auth", "authentication_db",
- ?b2l(DbName1), false),
-
- {ok, _} = update_user_doc(DbName1, "joe", "pass5"),
-
- config:set("couch_httpd_auth", "authentication_db",
- ?b2l(DbName), false),
-
- {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
- ?assertEqual(PasswordHash,
- couch_util:get_value(<<"password_sha">>, Creds))
- end).
-
-should_recover_cache_after_shutdown(DbName) ->
- ?_test(begin
- PasswordHash = hash_password("pass2"),
- {ok, Rev0} = update_user_doc(DbName, "joe", "pass1"),
- {ok, Rev1} = update_user_doc(DbName, "joe", "pass2", Rev0),
- shutdown_db(DbName),
- {ok, Rev1} = get_doc_rev(DbName, "joe"),
- ?assertEqual(PasswordHash, get_user_doc_password_sha(DbName, "joe"))
- end).
-
-
-should_get_admin_from_config(_DbName) ->
- ?_test(begin
- config:set("admins", "testadmin", "password", false),
- Creds = test_util:wait(fun() ->
- case couch_auth_cache:get_user_creds("testadmin") of
- {ok, Creds0, _} -> Creds0;
- nil -> wait
- end
- end),
- Roles = couch_util:get_value(<<"roles">>, Creds),
- ?assertEqual([<<"_admin">>], Roles)
- end).
-
-update_user_doc(DbName, UserName, Password) ->
- update_user_doc(DbName, UserName, Password, nil).
-
-update_user_doc(DbName, UserName, Password, Rev) ->
- ok = couch_auth_cache:ensure_users_db_exists(),
- User = iolist_to_binary(UserName),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"org.couchdb.user:", User/binary>>},
- {<<"name">>, User},
- {<<"type">>, <<"user">>},
- {<<"salt">>, ?SALT},
- {<<"password_sha">>, hash_password(Password)},
- {<<"roles">>, []}
- ] ++ case Rev of
- nil -> [];
- _ -> [{<<"_rev">>, Rev}]
- end
- }),
- {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, NewRev} = couch_db:update_doc(AuthDb, Doc, []),
- ok = couch_db:close(AuthDb),
- {ok, couch_doc:rev_to_str(NewRev)}.
-
-hash_password(Password) ->
- ?l2b(couch_util:to_hex(crypto:hash(sha, iolist_to_binary([Password, ?SALT])))).
-
-shutdown_db(DbName) ->
- {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(AuthDb),
- couch_util:shutdown_sync(couch_db:get_pid(AuthDb)),
- ok = timer:sleep(1000).
-
-get_doc_rev(DbName, UserName) ->
- DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
- {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- UpdateRev =
- case couch_db:open_doc(AuthDb, DocId, []) of
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, []),
- couch_util:get_value(<<"_rev">>, Props);
- {not_found, missing} ->
- nil
- end,
- ok = couch_db:close(AuthDb),
- {ok, UpdateRev}.
-
-get_user_doc_password_sha(DbName, UserName) ->
- DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
- {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []),
- ok = couch_db:close(AuthDb),
- {Props} = couch_doc:to_json_obj(Doc, []),
- couch_util:get_value(<<"password_sha">>, Props).
-
-delete_user_doc(DbName, UserName) ->
- DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
- {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []),
- {Props} = couch_doc:to_json_obj(Doc, []),
- DeletedDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DocId},
- {<<"_rev">>, couch_util:get_value(<<"_rev">>, Props)},
- {<<"_deleted">>, true}
- ]}),
- {ok, _} = couch_db:update_doc(AuthDb, DeletedDoc, []),
- ok = couch_db:close(AuthDb).
-
-
-make_validate_test({Old, New, "ok"} = Case) ->
- {test_id(Case), ?_assertEqual(ok, validate(doc(Old), doc(New)))};
-make_validate_test({Old, New, Reason} = Case) ->
- Failure = ?l2b(Reason),
- {test_id(Case), ?_assertThrow({forbidden, Failure}, validate(doc(Old), doc(New)))}.
-
-test_id({[OldRoles, OldType], [NewRoles, NewType], Result}) ->
- lists:flatten(io_lib:format(
- "(roles: ~w, type: ~w) -> (roles: ~w, type: ~w) ==> \"~s\"",
- [OldRoles, OldType, NewRoles, NewType, Result])).
-
-doc([Roles, Type]) ->
- couch_doc:from_json_obj({[
- {<<"_id">>,<<"org.couchdb.user:foo">>},
- {<<"_rev">>,<<"1-281c81adb1bf10927a6160f246dc0468">>},
- {<<"name">>,<<"foo">>},
- {<<"password_scheme">>,<<"simple">>},
- {<<"salt">>,<<"00000000000000000000000000000000">>},
- {<<"password_sha">>, <<"111111111111111111111111111111111111">>}]
- ++ type(Type) ++ roles(Roles)}).
-
-roles(custom) -> [{<<"roles">>, [<<"custom">>]}];
-roles(missing) -> [].
-
-type(user) -> [{<<"type">>, <<"user">>}];
-type(other) -> [{<<"type">>, <<"other">>}];
-type(missing) -> [].
-
-validate(DiskDoc, NewDoc) ->
- JSONCtx = {[
- {<<"db">>, <<"foo/bar">>},
- {<<"name">>, <<"foo">>},
- {<<"roles">>, [<<"_admin">>]}
- ]},
- validate(DiskDoc, NewDoc, JSONCtx).
-
-validate(DiskDoc, NewDoc, JSONCtx) ->
- {ok, DDoc0} = couch_auth_cache:auth_design_doc(<<"_design/anything">>),
- DDoc = DDoc0#doc{revs = {1, [<<>>]}},
- couch_query_servers:validate_doc_update(DDoc, NewDoc, DiskDoc, JSONCtx, []).
diff --git a/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl b/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl
deleted file mode 100644
index 4c4c43958..000000000
--- a/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl
+++ /dev/null
@@ -1,129 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_compactor_tests).
-
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--define(DELAY, 100).
--define(WAIT_DELAY_COUNT, 50).
-
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- create_docs(DbName),
- DbName.
-
-
-teardown(DbName) when is_binary(DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-
-compaction_resume_test_() ->
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun compaction_resume/1
- ]
- }
- }.
-
-
-compaction_resume(DbName) ->
- ?_test(begin
- check_db_validity(DbName),
- compact_db(DbName),
- check_db_validity(DbName),
-
- % Force an error when copying document ids
- with_mecked_emsort(fun() ->
- compact_db(DbName)
- end),
-
- check_db_validity(DbName),
- compact_db(DbName),
- check_db_validity(DbName)
- end).
-
-
-check_db_validity(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
- ?assertEqual(3, couch_db:count_changes_since(Db, 0))
- end).
-
-
-with_mecked_emsort(Fun) ->
- meck:new(couch_emsort, [passthrough]),
- meck:expect(couch_emsort, iter, fun(_) -> erlang:error(kaboom) end),
- try
- Fun()
- after
- meck:unload()
- end.
-
-
-create_docs(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 1}
-
- ]}),
- Doc2 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc2">>},
- {<<"value">>, 2}
-
- ]}),
- Doc3 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc3">>},
- {<<"value">>, 3}
-
- ]}),
- {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3])
- end).
-
-
-compact_db(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- {ok, _} = couch_db:start_compact(Db)
- end),
- wait_db_compact_done(DbName, ?WAIT_DELAY_COUNT).
-
-
-wait_db_compact_done(_DbName, 0) ->
- Failure = [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "DB compaction failed to finish"}
- ],
- erlang:error({assertion_failed, Failure});
-wait_db_compact_done(DbName, N) ->
- IsDone = couch_util:with_db(DbName, fun(Db) ->
- not is_pid(couch_db:get_compactor_pid(Db))
- end),
- if IsDone -> ok; true ->
- timer:sleep(?DELAY),
- wait_db_compact_done(DbName, N - 1)
- end.
diff --git a/src/couch/test/eunit/couch_bt_engine_tests.erl b/src/couch/test/eunit/couch_bt_engine_tests.erl
deleted file mode 100644
index 3e3ecbf25..000000000
--- a/src/couch/test/eunit/couch_bt_engine_tests.erl
+++ /dev/null
@@ -1,20 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_tests).
-
-
--include_lib("eunit/include/eunit.hrl").
-
-
-couch_bt_engine_test_()->
- cpse_util:create_tests(couch, couch_bt_engine, "couch").
diff --git a/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl b/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl
deleted file mode 100644
index a2a972caf..000000000
--- a/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl
+++ /dev/null
@@ -1,244 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_bt_engine_upgrade_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 60). % seconds
-
-setup(_) ->
- Ctx = test_util:start_couch(),
- DbDir = config:get("couchdb", "database_dir"),
- DbFileNames = [
- "db_v6_without_purge_req.couch",
- "db_v6_with_1_purge_req.couch",
- "db_v6_with_2_purge_req.couch",
- "db_v6_with_1_purge_req_for_2_docs.couch",
- "db_v7_without_purge_req.couch",
- "db_v7_with_1_purge_req.couch",
- "db_v7_with_2_purge_req.couch",
- "db_v7_with_1_purge_req_for_2_docs.couch"
- ],
- NewPaths = lists:map(fun(DbFileName) ->
- OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]),
- NewDbFilePath = filename:join([DbDir, DbFileName]),
- ok = filelib:ensure_dir(NewDbFilePath),
- file:delete(NewDbFilePath),
- {ok, _} = file:copy(OldDbFilePath, NewDbFilePath),
- NewDbFilePath
- end, DbFileNames),
- {Ctx, NewPaths}.
-
-
-teardown(_, {Ctx, Paths}) ->
- test_util:stop_couch(Ctx),
- lists:foreach(fun(Path) ->
- file:delete(Path)
- end, Paths).
-
-
-upgrade_test_() ->
- From = [6, 7],
- {
- "Couch Bt Engine Upgrade tests",
- {
- foreachx,
- fun setup/1, fun teardown/2,
- [{F, fun t_upgrade_without_purge_req/2} || F <- From] ++
- [{F, fun t_upgrade_with_1_purge_req/2} || F <- From] ++
- [{F, fun t_upgrade_with_N_purge_req/2} || F <- From] ++
- [{F, fun t_upgrade_with_1_purge_req_for_2_docs/2} || F <- From]
- }
- }.
-
-
-t_upgrade_without_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % There are three documents in the fixture
- % db with zero purge entries
- DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom)
- ++ "_without_purge_req"),
-
- ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
- {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual(0, couch_db:get_purge_seq(Db)),
- couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, [])
- end),
- ?assertEqual([], UpgradedPurged),
- ?assertEqual(8, get_disk_version_from_header(DbName)),
- {ok, Rev} = save_doc(
- DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}
- ),
- {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 5}, couch_db:get_doc_count(Db)),
- ?assertEqual(0, couch_db:get_purge_seq(Db))
- end),
-
- PurgeReqs = [
- {couch_uuids:random(), <<"doc4">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, PurgeReqs)
- end),
- ?assertEqual(PRevs, [Rev]),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)),
- ?assertEqual(1, couch_db:get_purge_seq(Db))
- end)
- end)}.
-
-
-t_upgrade_with_1_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % There are two documents in the fixture database
- % with a single purge entry
- DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom)
- ++ "_with_1_purge_req"),
-
- ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
- {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual(1, couch_db:get_purge_seq(Db)),
- couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, [])
- end),
- ?assertEqual(8, get_disk_version_from_header(DbName)),
- ?assertEqual([{1, <<"doc1">>}], UpgradedPurged),
-
- {ok, Rev} = save_doc(
- DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}
- ),
- {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)),
- ?assertEqual(1, couch_db:get_purge_seq(Db))
- end),
-
- PurgeReqs = [
- {couch_uuids:random(), <<"doc4">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, PurgeReqs)
- end),
- ?assertEqual(PRevs, [Rev]),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
- ?assertEqual(2, couch_db:get_purge_seq(Db))
- end)
- end)}.
-
-
-t_upgrade_with_N_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % There is one document in the fixture database
- % with two docs that have been purged
- DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom)
- ++ "_with_2_purge_req"),
-
- ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
- {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual(2, couch_db:get_purge_seq(Db)),
- couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, [])
- end),
- ?assertEqual(8, get_disk_version_from_header(DbName)),
- ?assertEqual([{2, <<"doc2">>}], UpgradedPurged),
-
- {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}),
- {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
- ?assertEqual(2, couch_db:get_purge_seq(Db))
- end),
-
- PurgeReqs = [
- {couch_uuids:random(), <<"doc4">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, PurgeReqs)
- end),
- ?assertEqual(PRevs, [Rev]),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 2}, couch_db:get_doc_count(Db)),
- ?assertEqual(3, couch_db:get_purge_seq(Db))
- end)
- end)}.
-
-
-t_upgrade_with_1_purge_req_for_2_docs(VersionFrom, {_Ctx, _NewPaths}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % There are two documents (Doc4 and Doc5) in the fixture database
- % with three docs (Doc1, Doc2 and Doc3) that have been purged, and
- % with one purge req for Doc1 and another purge req for Doc 2 and Doc3
- DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom)
- ++ "_with_1_purge_req_for_2_docs"),
-
- ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
- {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual(3, couch_db:get_purge_seq(Db)),
- couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, [])
- end),
- ?assertEqual(8, get_disk_version_from_header(DbName)),
- ?assertEqual([{3,<<"doc2">>},{2,<<"doc3">>}], UpgradedPurged),
-
- {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc6">>}, {<<"v">>, 1}]}),
- {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc7">>}, {<<"v">>, 2}]}),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)),
- ?assertEqual(3, couch_db:get_purge_seq(Db))
- end),
-
- PurgeReqs = [
- {couch_uuids:random(), <<"doc6">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, PurgeReqs)
- end),
- ?assertEqual(PRevs, [Rev]),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
- ?assertEqual(4, couch_db:get_purge_seq(Db))
- end)
- end)}.
-
-
-save_doc(DbName, Json) ->
- Doc = couch_doc:from_json_obj(Json),
- couch_util:with_db(DbName, fun(Db) ->
- couch_db:update_doc(Db, Doc, [])
- end).
-
-
-fold_fun({PSeq, _UUID, Id, _Revs}, Acc) ->
- {ok, [{PSeq, Id} | Acc]}.
-
-
-get_disk_version_from_header(DbFileName) ->
- DbDir = config:get("couchdb", "database_dir"),
- DbFilePath = filename:join([DbDir, ?l2b(?b2l(DbFileName) ++ ".couch")]),
- {ok, Fd} = couch_file:open(DbFilePath, []),
- {ok, Header} = couch_file:read_header(Fd),
- DiskVerison = couch_bt_engine_header:disk_version(Header),
- couch_file:close(Fd),
- DiskVerison.
diff --git a/src/couch/test/eunit/couch_btree_tests.erl b/src/couch/test/eunit/couch_btree_tests.erl
deleted file mode 100644
index c9b791d2c..000000000
--- a/src/couch/test/eunit/couch_btree_tests.erl
+++ /dev/null
@@ -1,572 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_btree_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(ROWS, 1000).
--define(TIMEOUT, 60). % seconds
-
-
-setup() ->
- {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none},
- {reduce, fun reduce_fun/2}]),
- {Fd, Btree}.
-
-setup_kvs(_) ->
- setup().
-
-setup_red() ->
- {_, EvenOddKVs} = lists:foldl(
- fun(Idx, {Key, Acc}) ->
- case Key of
- "even" -> {"odd", [{{Key, Idx}, 1} | Acc]};
- _ -> {"even", [{{Key, Idx}, 1} | Acc]}
- end
- end, {"odd", []}, lists:seq(1, ?ROWS)),
- {Fd, Btree} = setup(),
- {ok, Btree1} = couch_btree:add_remove(Btree, EvenOddKVs, []),
- {Fd, Btree1}.
-setup_red(_) ->
- setup_red().
-
-teardown(Fd) when is_pid(Fd) ->
- ok = couch_file:close(Fd);
-teardown({Fd, _}) ->
- teardown(Fd).
-teardown(_, {Fd, _}) ->
- teardown(Fd).
-
-
-kvs_test_funs() ->
- [
- fun should_set_fd_correctly/2,
- fun should_set_root_correctly/2,
- fun should_create_zero_sized_btree/2,
- fun should_set_reduce_option/2,
- fun should_fold_over_empty_btree/2,
- fun should_add_all_keys/2,
- fun should_continuously_add_new_kv/2,
- fun should_continuously_remove_keys/2,
- fun should_insert_keys_in_reversed_order/2,
- fun should_add_every_odd_key_remove_every_even/2,
- fun should_add_every_even_key_remove_every_old/2
- ].
-
-red_test_funs() ->
- [
- fun should_reduce_whole_range/2,
- fun should_reduce_first_half/2,
- fun should_reduce_second_half/2
- ].
-
-
-btree_open_test_() ->
- {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none}]),
- {
- "Ensure that created btree is really a btree record",
- ?_assert(is_record(Btree, btree))
- }.
-
-sorted_kvs_test_() ->
- Funs = kvs_test_funs(),
- Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)],
- {
- "BTree with sorted keys",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
- {
- foreachx,
- fun setup_kvs/1, fun teardown/2,
- [{Sorted, Fun} || Fun <- Funs]
- }
- }
- }.
-
-rsorted_kvs_test_() ->
- Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)],
- Funs = kvs_test_funs(),
- Reversed = Sorted,
- {
- "BTree with backward sorted keys",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
- {
- foreachx,
- fun setup_kvs/1, fun teardown/2,
- [{Reversed, Fun} || Fun <- Funs]
- }
- }
- }.
-
-shuffled_kvs_test_() ->
- Funs = kvs_test_funs(),
- Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)],
- Shuffled = shuffle(Sorted),
- {
- "BTree with shuffled keys",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
- {
- foreachx,
- fun setup_kvs/1, fun teardown/2,
- [{Shuffled, Fun} || Fun <- Funs]
- }
- }
- }.
-
-reductions_test_() ->
- {
- "BTree reductions",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
- [
- {
- "Common tests",
- {
- foreach,
- fun setup_red/0, fun teardown/1,
- [
- fun should_reduce_without_specified_direction/1,
- fun should_reduce_forward/1,
- fun should_reduce_backward/1
- ]
- }
- },
- {
- "Range requests",
- [
- {
- "Forward direction",
- {
- foreachx,
- fun setup_red/1, fun teardown/2,
- [{fwd, F} || F <- red_test_funs()]
- }
- },
- {
- "Backward direction",
- {
- foreachx,
- fun setup_red/1, fun teardown/2,
- [{rev, F} || F <- red_test_funs()]
- }
- }
- ]
- }
- ]
- }
- }.
-
-
-should_set_fd_correctly(_, {Fd, Btree}) ->
- ?_assertMatch(Fd, Btree#btree.fd).
-
-should_set_root_correctly(_, {_, Btree}) ->
- ?_assertMatch(nil, Btree#btree.root).
-
-should_create_zero_sized_btree(_, {_, Btree}) ->
- ?_assertMatch(0, couch_btree:size(Btree)).
-
-should_set_reduce_option(_, {_, Btree}) ->
- ReduceFun = fun reduce_fun/2,
- Btree1 = couch_btree:set_options(Btree, [{reduce, ReduceFun}]),
- ?_assertMatch(ReduceFun, Btree1#btree.reduce).
-
-should_fold_over_empty_btree(_, {_, Btree}) ->
- {ok, _, EmptyRes} = couch_btree:foldl(Btree, fun(_, X) -> {ok, X+1} end, 0),
- ?_assertEqual(EmptyRes, 0).
-
-should_add_all_keys(KeyValues, {Fd, Btree}) ->
- {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
- [
- should_return_complete_btree_on_adding_all_keys(KeyValues, Btree1),
- should_have_non_zero_size(Btree1),
- should_have_lesser_size_than_file(Fd, Btree1),
- should_keep_root_pointer_to_kp_node(Fd, Btree1),
- should_remove_all_keys(KeyValues, Btree1)
- ].
-
-should_return_complete_btree_on_adding_all_keys(KeyValues, Btree) ->
- ?_assert(test_btree(Btree, KeyValues)).
-
-should_have_non_zero_size(Btree) ->
- ?_assert(couch_btree:size(Btree) > 0).
-
-should_have_lesser_size_than_file(Fd, Btree) ->
- ?_assert((couch_btree:size(Btree) =< couch_file:bytes(Fd))).
-
-should_keep_root_pointer_to_kp_node(Fd, Btree) ->
- ?_assertMatch({ok, {kp_node, _}},
- couch_file:pread_term(Fd, element(1, Btree#btree.root))).
-
-should_remove_all_keys(KeyValues, Btree) ->
- Keys = keys(KeyValues),
- {ok, Btree1} = couch_btree:add_remove(Btree, [], Keys),
- {
- "Should remove all the keys",
- [
- should_produce_valid_btree(Btree1, []),
- should_be_empty(Btree1)
- ]
- }.
-
-should_continuously_add_new_kv(KeyValues, {_, Btree}) ->
- {Btree1, _} = lists:foldl(
- fun(KV, {BtAcc, PrevSize}) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
- ?assert(couch_btree:size(BtAcc2) > PrevSize),
- {BtAcc2, couch_btree:size(BtAcc2)}
- end, {Btree, couch_btree:size(Btree)}, KeyValues),
- {
- "Should continuously add key-values to btree",
- [
- should_produce_valid_btree(Btree1, KeyValues),
- should_not_be_empty(Btree1)
- ]
- }.
-
-should_continuously_remove_keys(KeyValues, {_, Btree}) ->
- {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
- {Btree2, _} = lists:foldl(
- fun({K, _}, {BtAcc, PrevSize}) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]),
- ?assert(couch_btree:size(BtAcc2) < PrevSize),
- {BtAcc2, couch_btree:size(BtAcc2)}
- end, {Btree1, couch_btree:size(Btree1)}, KeyValues),
- {
- "Should continuously remove keys from btree",
- [
- should_produce_valid_btree(Btree2, []),
- should_be_empty(Btree2)
- ]
- }.
-
-should_insert_keys_in_reversed_order(KeyValues, {_, Btree}) ->
- KeyValuesRev = lists:reverse(KeyValues),
- {Btree1, _} = lists:foldl(
- fun(KV, {BtAcc, PrevSize}) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
- ?assert(couch_btree:size(BtAcc2) > PrevSize),
- {BtAcc2, couch_btree:size(BtAcc2)}
- end, {Btree, couch_btree:size(Btree)}, KeyValuesRev),
- should_produce_valid_btree(Btree1, KeyValues).
-
-should_add_every_odd_key_remove_every_even(KeyValues, {_, Btree}) ->
- {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
- {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) ->
- case Count rem 2 == 0 of
- true -> {Count + 1, [X | Left], Right};
- false -> {Count + 1, Left, [X | Right]}
- end
- end, {0, [], []}, KeyValues),
- {timeout, ?TIMEOUT,
- ?_assert(test_add_remove(Btree1, Rem2Keys0, Rem2Keys1))
- }.
-
-should_add_every_even_key_remove_every_old(KeyValues, {_, Btree}) ->
- {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
- {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) ->
- case Count rem 2 == 0 of
- true -> {Count + 1, [X | Left], Right};
- false -> {Count + 1, Left, [X | Right]}
- end
- end, {0, [], []}, KeyValues),
- {timeout, ?TIMEOUT,
- ?_assert(test_add_remove(Btree1, Rem2Keys1, Rem2Keys0))
- }.
-
-
-should_reduce_without_specified_direction({_, Btree}) ->
- ?_assertMatch(
- {ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]},
- fold_reduce(Btree, [])).
-
-should_reduce_forward({_, Btree}) ->
- ?_assertMatch(
- {ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, fwd}])).
-
-should_reduce_backward({_, Btree}) ->
- ?_assertMatch(
- {ok, [{{"even", _}, ?ROWS div 2}, {{"odd", _}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, rev}])).
-
-should_reduce_whole_range(fwd, {_, Btree}) ->
- {SK, EK} = {{"even", 0}, {"odd", ?ROWS - 1}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [{{"odd", 1}, ?ROWS div 2},
- {{"even", 2}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK},
- {end_key, EK}]))
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [{{"odd", 1}, (?ROWS div 2) - 1},
- {{"even", 2}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK},
- {end_key_gt, EK}]))
- }
- ];
-should_reduce_whole_range(rev, {_, Btree}) ->
- {SK, EK} = {{"odd", ?ROWS - 1}, {"even", 2}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [{{"even", ?ROWS}, ?ROWS div 2},
- {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key, EK}]))
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [{{"even", ?ROWS}, (?ROWS div 2) - 1},
- {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key_gt, EK}]))
- }
- ].
-
-should_reduce_first_half(fwd, {_, Btree}) ->
- {SK, EK} = {{"even", 0}, {"odd", (?ROWS div 2) - 1}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [{{"odd", 1}, ?ROWS div 4},
- {{"even", 2}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK}, {end_key, EK}]))
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [{{"odd", 1}, (?ROWS div 4) - 1},
- {{"even", 2}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK},
- {end_key_gt, EK}]))
- }
- ];
-should_reduce_first_half(rev, {_, Btree}) ->
- {SK, EK} = {{"odd", ?ROWS - 1}, {"even", ?ROWS div 2}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [{{"even", ?ROWS}, (?ROWS div 4) + 1},
- {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key, EK}]))
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [{{"even", ?ROWS}, ?ROWS div 4},
- {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key_gt, EK}]))
- }
- ].
-
-should_reduce_second_half(fwd, {_, Btree}) ->
- {SK, EK} = {{"even", ?ROWS div 2}, {"odd", ?ROWS - 1}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [{{"odd", 1}, ?ROWS div 2},
- {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK},
- {end_key, EK}]))
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [{{"odd", 1}, (?ROWS div 2) - 1},
- {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK},
- {end_key_gt, EK}]))
- }
- ];
-should_reduce_second_half(rev, {_, Btree}) ->
- {SK, EK} = {{"odd", (?ROWS div 2) + 1}, {"even", 2}},
- [
- {
- "include endkey",
- ?_assertMatch(
- {ok, [{{"even", ?ROWS}, ?ROWS div 2},
- {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key, EK}]))
- },
- {
- "exclude endkey",
- ?_assertMatch(
- {ok, [{{"even", ?ROWS}, (?ROWS div 2) - 1},
- {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key_gt, EK}]))
- }
- ].
-
-should_produce_valid_btree(Btree, KeyValues) ->
- ?_assert(test_btree(Btree, KeyValues)).
-
-should_be_empty(Btree) ->
- ?_assertEqual(couch_btree:size(Btree), 0).
-
-should_not_be_empty(Btree) ->
- ?_assert(couch_btree:size(Btree) > 0).
-
-fold_reduce(Btree, Opts) ->
- GroupFun = fun({K1, _}, {K2, _}) ->
- K1 == K2
- end,
- FoldFun = fun(GroupedKey, Unreduced, Acc) ->
- {ok, [{GroupedKey, couch_btree:final_reduce(Btree, Unreduced)} | Acc]}
- end,
- couch_btree:fold_reduce(Btree, FoldFun, [],
- [{key_group_fun, GroupFun}] ++ Opts).
-
-
-keys(KVs) ->
- [K || {K, _} <- KVs].
-
-reduce_fun(reduce, KVs) ->
- length(KVs);
-reduce_fun(rereduce, Reds) ->
- lists:sum(Reds).
-
-
-shuffle(List) ->
- randomize(round(math:log(length(List)) + 0.5), List).
-
-randomize(1, List) ->
- randomize(List);
-randomize(T, List) ->
- lists:foldl(
- fun(_E, Acc) ->
- randomize(Acc)
- end, randomize(List), lists:seq(1, (T - 1))).
-
-randomize(List) ->
- D = lists:map(fun(A) -> {couch_rand:uniform(), A} end, List),
- {_, D1} = lists:unzip(lists:keysort(1, D)),
- D1.
-
-test_btree(Btree, KeyValues) ->
- ok = test_key_access(Btree, KeyValues),
- ok = test_lookup_access(Btree, KeyValues),
- ok = test_final_reductions(Btree, KeyValues),
- ok = test_traversal_callbacks(Btree, KeyValues),
- true.
-
-test_add_remove(Btree, OutKeyValues, RemainingKeyValues) ->
- Btree2 = lists:foldl(
- fun({K, _}, BtAcc) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]),
- BtAcc2
- end, Btree, OutKeyValues),
- true = test_btree(Btree2, RemainingKeyValues),
-
- Btree3 = lists:foldl(
- fun(KV, BtAcc) ->
- {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
- BtAcc2
- end, Btree2, OutKeyValues),
- true = test_btree(Btree3, OutKeyValues ++ RemainingKeyValues).
-
-test_key_access(Btree, List) ->
- FoldFun = fun(Element, {[HAcc|TAcc], Count}) ->
- case Element == HAcc of
- true -> {ok, {TAcc, Count + 1}};
- _ -> {ok, {TAcc, Count + 1}}
- end
- end,
- Length = length(List),
- Sorted = lists:sort(List),
- {ok, _, {[], Length}} = couch_btree:foldl(Btree, FoldFun, {Sorted, 0}),
- {ok, _, {[], Length}} = couch_btree:fold(Btree, FoldFun,
- {Sorted, 0}, [{dir, rev}]),
- ok.
-
-test_lookup_access(Btree, KeyValues) ->
- FoldFun = fun({Key, Value}, {Key, Value}) -> {stop, true} end,
- lists:foreach(
- fun({Key, Value}) ->
- [{ok, {Key, Value}}] = couch_btree:lookup(Btree, [Key]),
- {ok, _, true} = couch_btree:foldl(Btree, FoldFun,
- {Key, Value}, [{start_key, Key}])
- end, KeyValues).
-
-test_final_reductions(Btree, KeyValues) ->
- KVLen = length(KeyValues),
- FoldLFun = fun(_X, LeadingReds, Acc) ->
- CountToStart = KVLen div 3 + Acc,
- CountToStart = couch_btree:final_reduce(Btree, LeadingReds),
- {ok, Acc + 1}
- end,
- FoldRFun = fun(_X, LeadingReds, Acc) ->
- CountToEnd = KVLen - KVLen div 3 + Acc,
- CountToEnd = couch_btree:final_reduce(Btree, LeadingReds),
- {ok, Acc + 1}
- end,
- {LStartKey, _} = case KVLen of
- 0 -> {nil, nil};
- _ -> lists:nth(KVLen div 3 + 1, lists:sort(KeyValues))
- end,
- {RStartKey, _} = case KVLen of
- 0 -> {nil, nil};
- _ -> lists:nth(KVLen div 3, lists:sort(KeyValues))
- end,
- {ok, _, FoldLRed} = couch_btree:foldl(Btree, FoldLFun, 0,
- [{start_key, LStartKey}]),
- {ok, _, FoldRRed} = couch_btree:fold(Btree, FoldRFun, 0,
- [{dir, rev}, {start_key, RStartKey}]),
- KVLen = FoldLRed + FoldRRed,
- ok.
-
-test_traversal_callbacks(Btree, _KeyValues) ->
- FoldFun = fun
- (visit, _GroupedKey, _Unreduced, Acc) ->
- {ok, Acc andalso false};
- (traverse, _LK, _Red, Acc) ->
- {skip, Acc andalso true}
- end,
- % With 250 items the root is a kp. Always skipping should reduce to true.
- {ok, _, true} = couch_btree:fold(Btree, FoldFun, true, [{dir, fwd}]),
- ok.
diff --git a/src/couch/test/eunit/couch_changes_tests.erl b/src/couch/test/eunit/couch_changes_tests.erl
deleted file mode 100644
index 848b471f9..000000000
--- a/src/couch/test/eunit/couch_changes_tests.erl
+++ /dev/null
@@ -1,962 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_changes_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 6000).
--define(TEST_TIMEOUT, 10000).
-
--record(row, {
- id,
- seq,
- deleted = false,
- doc = nil
-}).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = create_db(DbName),
- Revs = [R || {ok, R} <- [
- save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
- save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
- save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
- save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
- save_doc(Db, {[{<<"_id">>, <<"doc5">>}]})
- ]],
- Rev = lists:nth(3, Revs),
- {ok, Db1} = couch_db:reopen(Db),
-
- {ok, Rev1} = save_doc(Db1, {[{<<"_id">>, <<"doc3">>}, {<<"_rev">>, Rev}]}),
- Revs1 = Revs ++ [Rev1],
- Revs2 = Revs1 ++ [R || {ok, R} <- [
- save_doc(Db1, {[{<<"_id">>, <<"doc6">>}]}),
- save_doc(Db1, {[{<<"_id">>, <<"_design/foo">>}]}),
- save_doc(Db1, {[{<<"_id">>, <<"doc7">>}]}),
- save_doc(Db1, {[{<<"_id">>, <<"doc8">>}]})
- ]],
- config:set("native_query_servers", "erlang", "{couch_native_process, start_link, []}", _Persist=false),
- {DbName, list_to_tuple(Revs2)}.
-
-teardown({DbName, _}) ->
- config:delete("native_query_servers", "erlang", _Persist=false),
- delete_db(DbName),
- ok.
-
-
-changes_test_() ->
- {
- "Changes feed",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- [
- filter_by_selector(),
- filter_by_doc_id(),
- filter_by_design(),
- continuous_feed(),
- %%filter_by_custom_function()
- filter_by_filter_function(),
- filter_by_view()
- ]
- }
- }.
-
-filter_by_doc_id() ->
- {
- "Filter _doc_id",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_filter_by_specific_doc_ids/1,
- fun should_filter_by_specific_doc_ids_descending/1,
- fun should_filter_by_specific_doc_ids_with_since/1,
- fun should_filter_by_specific_doc_ids_no_result/1,
- fun should_handle_deleted_docs/1
- ]
- }
- }.
-
-filter_by_selector() ->
- {
- "Filter _selector",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_select_basic/1,
- fun should_select_with_since/1,
- fun should_select_when_no_result/1,
- fun should_select_with_deleted_docs/1,
- fun should_select_with_continuous/1,
- fun should_stop_selector_when_db_deleted/1,
- fun should_select_with_empty_fields/1,
- fun should_select_with_fields/1
- ]
- }
- }.
-
-
-filter_by_design() ->
- {
- "Filter _design",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_emit_only_design_documents/1
- ]
- }
- }.
-
-%% filter_by_custom_function() ->
-%% {
-%% "Filter function",
-%% {
-%% foreach,
-%% fun setup/0, fun teardown/1,
-%% [
-%% fun should_receive_heartbeats/1
-%% ]
-%% }
-%% }.
-
-filter_by_filter_function() ->
- {
- "Filter by filters",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_filter_by_doc_attribute/1,
- fun should_filter_by_user_ctx/1
- ]
- }
- }.
-
-filter_by_view() ->
- {
- "Filter _view",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_filter_by_view/1,
- fun should_filter_by_erlang_view/1
- ]
- }
- }.
-
-continuous_feed() ->
- {
- "Continuous Feed",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_filter_continuous_feed_by_specific_doc_ids/1,
- fun should_end_changes_when_db_deleted/1
- ]
- }
- }.
-
-
-should_filter_by_specific_doc_ids({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{
- filter = "_doc_ids"
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(2, length(Rows)),
- [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
- ?assertEqual(<<"doc4">>, Id1),
- ?assertEqual(4, Seq1),
- ?assertEqual(<<"doc3">>, Id2),
- ?assertEqual(6, Seq2),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_filter_by_specific_doc_ids_descending({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{
- filter = "_doc_ids",
- dir = rev
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- {Rows, LastSeq, _} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(2, length(Rows)),
- [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
- ?assertEqual(<<"doc3">>, Id1),
- ?assertEqual(6, Seq1),
- ?assertEqual(<<"doc4">>, Id2),
- ?assertEqual(4, Seq2),
- ?assertEqual(4, LastSeq)
- end).
-
-should_filter_by_specific_doc_ids_with_since({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{
- filter = "_doc_ids",
- since = 5
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq1, id = Id1}] = Rows,
- ?assertEqual(<<"doc3">>, Id1),
- ?assertEqual(6, Seq1),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_filter_by_specific_doc_ids_no_result({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{
- filter = "_doc_ids",
- since = 6
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(0, length(Rows)),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_handle_deleted_docs({DbName, Revs}) ->
- ?_test(
- begin
- Rev3_2 = element(6, Revs),
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _} = save_doc(
- Db,
- {[{<<"_id">>, <<"doc3">>},
- {<<"_deleted">>, true},
- {<<"_rev">>, Rev3_2}]}),
-
- ChArgs = #changes_args{
- filter = "_doc_ids",
- since = 9
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- {Rows, LastSeq, _} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(1, length(Rows)),
- ?assertMatch(
- [#row{seq = LastSeq, id = <<"doc3">>, deleted = true}],
- Rows
- ),
- ?assertEqual(11, LastSeq)
- end).
-
-should_filter_continuous_feed_by_specific_doc_ids({DbName, Revs}) ->
- ?_test(
- begin
- {ok, Db} = couch_db:open_int(DbName, []),
- ChangesArgs = #changes_args{
- filter = "_doc_ids",
- feed = "continuous"
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- reset_row_notifications(),
- Consumer = spawn_consumer(DbName, ChangesArgs, Req),
- ?assertEqual(ok, wait_row_notifications(2)),
- ok = pause(Consumer),
-
- Rows = get_rows(Consumer),
- ?assertEqual(2, length(Rows)),
- [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows,
- ?assertEqual(<<"doc4">>, Id1),
- ?assertEqual(4, Seq1),
- ?assertEqual(<<"doc3">>, Id2),
- ?assertEqual(6, Seq2),
-
- clear_rows(Consumer),
- {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}),
- {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}),
- ok = unpause(Consumer),
- timer:sleep(100),
- ok = pause(Consumer),
- ?assertEqual([], get_rows(Consumer)),
-
- Rev4 = element(4, Revs),
- Rev3_2 = element(6, Revs),
- {ok, Rev4_2} = save_doc(Db, {[{<<"_id">>, <<"doc4">>},
- {<<"_rev">>, Rev4}]}),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc4">>},
- {<<"_rev">>, Rev4_2}]}),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}),
- {ok, Rev3_3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>},
- {<<"_rev">>, Rev3_2}]}),
- reset_row_notifications(),
- ok = unpause(Consumer),
- ?assertEqual(ok, wait_row_notifications(2)),
- ok = pause(Consumer),
-
- NewRows = get_rows(Consumer),
- ?assertEqual(2, length(NewRows)),
- [Row14, Row16] = NewRows,
- ?assertEqual(<<"doc4">>, Row14#row.id),
- ?assertEqual(15, Row14#row.seq),
- ?assertEqual(<<"doc3">>, Row16#row.id),
- ?assertEqual(17, Row16#row.seq),
-
- clear_rows(Consumer),
- {ok, _Rev3_4} = save_doc(Db, {[{<<"_id">>, <<"doc3">>},
- {<<"_rev">>, Rev3_3}]}),
- reset_row_notifications(),
- ok = unpause(Consumer),
- ?assertEqual(ok, wait_row_notifications(1)),
- ok = pause(Consumer),
-
- FinalRows = get_rows(Consumer),
-
- ok = unpause(Consumer),
- stop_consumer(Consumer),
-
- ?assertMatch([#row{seq = 18, id = <<"doc3">>}], FinalRows)
- end).
-
-
-should_end_changes_when_db_deleted({DbName, _Revs}) ->
- ?_test(begin
- {ok, _Db} = couch_db:open_int(DbName, []),
- ChangesArgs = #changes_args{
- filter = "_doc_ids",
- feed = "continuous"
- },
- DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>],
- Req = {json_req, {[{<<"doc_ids">>, DocIds}]}},
- Consumer = spawn_consumer(DbName, ChangesArgs, Req),
- ok = pause(Consumer),
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok = unpause(Consumer),
- {_Rows, _LastSeq} = wait_finished(Consumer),
- stop_consumer(Consumer),
- ok
- end).
-
-
-should_select_basic({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{filter = "_selector"},
- Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_select_with_since({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{filter = "_selector", since = 9},
- GteDoc2 = {[{<<"$gte">>, <<"doc1">>}]},
- Selector = {[{<<"_id">>, GteDoc2}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc8">>, Id),
- ?assertEqual(10, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_select_when_no_result({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{filter = "_selector"},
- Selector = {[{<<"_id">>, <<"nopers">>}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(0, length(Rows)),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_select_with_deleted_docs({DbName, Revs}) ->
- ?_test(
- begin
- Rev3_2 = element(6, Revs),
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _} = save_doc(
- Db,
- {[{<<"_id">>, <<"doc3">>},
- {<<"_deleted">>, true},
- {<<"_rev">>, Rev3_2}]}),
- ChArgs = #changes_args{filter = "_selector"},
- Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- {Rows, LastSeq, _} = run_changes_query(DbName, ChArgs, Req),
- ?assertMatch(
- [#row{seq = LastSeq, id = <<"doc3">>, deleted = true}],
- Rows
- ),
- ?assertEqual(11, LastSeq)
- end).
-
-should_select_with_continuous({DbName, Revs}) ->
- ?_test(
- begin
- {ok, Db} = couch_db:open_int(DbName, []),
- ChArgs = #changes_args{filter = "_selector", feed = "continuous"},
- GteDoc8 = {[{<<"$gte">>, <<"doc8">>}]},
- Selector = {[{<<"_id">>, GteDoc8}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- reset_row_notifications(),
- Consumer = spawn_consumer(DbName, ChArgs, Req),
- ?assertEqual(ok, wait_row_notifications(1)),
- ok = pause(Consumer),
- Rows = get_rows(Consumer),
- ?assertMatch(
- [#row{seq = 10, id = <<"doc8">>, deleted = false}],
- Rows
- ),
- clear_rows(Consumer),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc01">>}]}),
- ok = unpause(Consumer),
- timer:sleep(100),
- ok = pause(Consumer),
- ?assertEqual([], get_rows(Consumer)),
- Rev4 = element(4, Revs),
- Rev8 = element(10, Revs),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc8">>},
- {<<"_rev">>, Rev8}]}),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc4">>},
- {<<"_rev">>, Rev4}]}),
- reset_row_notifications(),
- ok = unpause(Consumer),
- ?assertEqual(ok, wait_row_notifications(1)),
- ok = pause(Consumer),
- NewRows = get_rows(Consumer),
- ?assertMatch(
- [#row{seq = _, id = <<"doc8">>, deleted = false}],
- NewRows
- )
- end).
-
-should_stop_selector_when_db_deleted({DbName, _Revs}) ->
- ?_test(
- begin
- {ok, _Db} = couch_db:open_int(DbName, []),
- ChArgs = #changes_args{filter = "_selector", feed = "continuous"},
- Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- Consumer = spawn_consumer(DbName, ChArgs, Req),
- ok = pause(Consumer),
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok = unpause(Consumer),
- {_Rows, _LastSeq} = wait_finished(Consumer),
- stop_consumer(Consumer),
- ok
- end).
-
-
-should_select_with_empty_fields({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{filter = "_selector", include_docs=true},
- Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req = {json_req, {[{<<"selector">>, Selector},
- {<<"fields">>, []}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id, doc = Doc}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq),
- ?assertMatch({[{_K1, _V1}, {_K2, _V2}]}, Doc)
- end).
-
-should_select_with_fields({DbName, _}) ->
- ?_test(
- begin
- ChArgs = #changes_args{filter = "_selector", include_docs=true},
- Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req = {json_req, {[{<<"selector">>, Selector},
- {<<"fields">>, [<<"_id">>, <<"nope">>]}]}},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id, doc = Doc}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq),
- ?assertMatch(Doc, {[{<<"_id">>, <<"doc3">>}]})
- end).
-
-
-should_emit_only_design_documents({DbName, Revs}) ->
- ?_test(
- begin
- ChArgs = #changes_args{
- filter = "_design"
- },
- Req = {json_req, null},
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
-
- ?assertEqual(1, length(Rows)),
- ?assertEqual(UpSeq, LastSeq),
- ?assertEqual([#row{seq = 8, id = <<"_design/foo">>}], Rows),
-
-
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"_design/foo">>},
- {<<"_rev">>, element(8, Revs)},
- {<<"_deleted">>, true}]}),
-
- couch_db:close(Db),
- {Rows2, LastSeq2, _} = run_changes_query(DbName, ChArgs, Req),
-
- UpSeq2 = UpSeq + 1,
-
- ?assertEqual(1, length(Rows2)),
- ?assertEqual(UpSeq2, LastSeq2),
- ?assertEqual([#row{seq = 11,
- id = <<"_design/foo">>,
- deleted = true}],
- Rows2)
- end).
-
-%% should_receive_heartbeats(_) ->
-%% {timeout, ?TEST_TIMEOUT div 1000,
-%% ?_test(
-%% begin
-%% DbName = ?tempdb(),
-%% Timeout = 100,
-%% {ok, Db} = create_db(DbName),
-
-%% {ok, _} = save_doc(Db, {[
-%% {<<"_id">>, <<"_design/filtered">>},
-%% {<<"language">>, <<"javascript">>},
-%% {<<"filters">>, {[
-%% {<<"foo">>, <<"function(doc) {
-%% return ['doc10', 'doc11', 'doc12'].indexOf(doc._id) != -1;}">>
-%% }]}}
-%% ]}),
-
-%% ChangesArgs = #changes_args{
-%% filter = "filtered/foo",
-%% feed = "continuous",
-%% timeout = 10000,
-%% heartbeat = 1000
-%% },
-%% Consumer = spawn_consumer(DbName, ChangesArgs, {json_req, null}),
-
-%% {ok, _Rev1} = save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev2} = save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev4} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev5} = save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev6} = save_doc(Db, {[{<<"_id">>, <<"doc6">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev7} = save_doc(Db, {[{<<"_id">>, <<"doc7">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev8} = save_doc(Db, {[{<<"_id">>, <<"doc8">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}),
-
-%% Heartbeats = get_heartbeats(Consumer),
-%% ?assert(Heartbeats > 0),
-
-%% {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev11} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev12} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}),
-
-%% Heartbeats2 = get_heartbeats(Consumer),
-%% ?assert(Heartbeats2 > Heartbeats),
-
-%% Rows = get_rows(Consumer),
-%% ?assertEqual(3, length(Rows)),
-
-%% {ok, _Rev13} = save_doc(Db, {[{<<"_id">>, <<"doc13">>}]}),
-%% timer:sleep(Timeout),
-%% {ok, _Rev14} = save_doc(Db, {[{<<"_id">>, <<"doc14">>}]}),
-%% timer:sleep(Timeout),
-
-%% Heartbeats3 = get_heartbeats(Consumer),
-%% ?assert(Heartbeats3 > Heartbeats2)
-%% end)}.
-
-should_filter_by_doc_attribute({DbName, _}) ->
- ?_test(
- begin
- DDocId = <<"_design/app">>,
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDocId},
- {<<"language">>, <<"javascript">>},
- {<<"filters">>, {[
- {<<"valid">>, <<"function(doc, req) {"
- " if (doc._id == 'doc3') {"
- " return true; "
- "} }">>}
- ]}}
- ]}),
- ChArgs = #changes_args{filter = "app/valid"},
- Req = {json_req, null},
- ok = update_ddoc(DbName, DDoc),
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_filter_by_user_ctx({DbName, _}) ->
- ?_test(
- begin
- DDocId = <<"_design/app">>,
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDocId},
- {<<"language">>, <<"javascript">>},
- {<<"filters">>, {[
- {<<"valid">>, <<"function(doc, req) {"
- " if (req.userCtx.name == doc._id) {"
- " return true; "
- "} }">>}
- ]}}
- ]}),
- ChArgs = #changes_args{filter = "app/valid"},
- UserCtx = #user_ctx{name = <<"doc3">>, roles = []},
- {ok, DbRec} = couch_db:clustered_db(DbName, UserCtx),
- Req = {json_req, {[{
- <<"userCtx">>, couch_util:json_user_ctx(DbRec)
- }]}},
- ok = update_ddoc(DbName, DDoc),
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_filter_by_view({DbName, _}) ->
- ?_test(
- begin
- DDocId = <<"_design/app">>,
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDocId},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"valid">>, {[
- {<<"map">>, <<"function(doc) {"
- " if (doc._id == 'doc3') {"
- " emit(doc); "
- "} }">>}
- ]}}
- ]}}
- ]}),
- ChArgs = #changes_args{filter = "_view"},
- Req = {json_req, {[{
- <<"query">>, {[
- {<<"view">>, <<"app/valid">>}
- ]}
- }]}},
- ok = update_ddoc(DbName, DDoc),
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-should_filter_by_erlang_view({DbName, _}) ->
- ?_test(
- begin
- DDocId = <<"_design/app">>,
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDocId},
- {<<"language">>, <<"erlang">>},
- {<<"views">>, {[
- {<<"valid">>, {[
- {<<"map">>, <<"fun({Doc}) ->"
- " case lists:keyfind(<<\"_id\">>, 1, Doc) of"
- " {<<\"_id\">>, <<\"doc3\">>} -> Emit(Doc, null); "
- " false -> ok"
- " end "
- "end.">>}
- ]}}
- ]}}
- ]}),
- ChArgs = #changes_args{filter = "_view"},
- Req = {json_req, {[{
- <<"query">>, {[
- {<<"view">>, <<"app/valid">>}
- ]}
- }]}},
- ok = update_ddoc(DbName, DDoc),
- {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
- ?assertEqual(1, length(Rows)),
- [#row{seq = Seq, id = Id}] = Rows,
- ?assertEqual(<<"doc3">>, Id),
- ?assertEqual(6, Seq),
- ?assertEqual(UpSeq, LastSeq)
- end).
-
-update_ddoc(DbName, DDoc) ->
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, _} = couch_db:update_doc(Db, DDoc, []),
- couch_db:close(Db).
-
-run_changes_query(DbName, ChangesArgs, Opts) ->
- Consumer = spawn_consumer(DbName, ChangesArgs, Opts),
- {Rows, LastSeq} = wait_finished(Consumer),
- {ok, Db} = couch_db:open_int(DbName, []),
- UpSeq = couch_db:get_update_seq(Db),
- couch_db:close(Db),
- stop_consumer(Consumer),
- {Rows, LastSeq, UpSeq}.
-
-save_doc(Db, Json) ->
- Doc = couch_doc:from_json_obj(Json),
- {ok, Rev} = couch_db:update_doc(Db, Doc, []),
- {ok, couch_doc:rev_to_str(Rev)}.
-
-get_rows({Consumer, _}) ->
- Ref = make_ref(),
- Consumer ! {get_rows, Ref},
- Resp = receive
- {rows, Ref, Rows} ->
- Rows
- after ?TIMEOUT ->
- timeout
- end,
- ?assertNotEqual(timeout, Resp),
- Resp.
-
-%% get_heartbeats({Consumer, _}) ->
-%% Ref = make_ref(),
-%% Consumer ! {get_heartbeats, Ref},
-%% Resp = receive
-%% {hearthbeats, Ref, HeartBeats} ->
-%% HeartBeats
-%% after ?TIMEOUT ->
-%% timeout
-%% end,
-%% ?assertNotEqual(timeout, Resp),
-%% Resp.
-
-clear_rows({Consumer, _}) ->
- Ref = make_ref(),
- Consumer ! {reset, Ref},
- Resp = receive
- {ok, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end,
- ?assertNotEqual(timeout, Resp),
- Resp.
-
-stop_consumer({Consumer, _}) ->
- Ref = make_ref(),
- Consumer ! {stop, Ref},
- Resp = receive
- {ok, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end,
- ?assertNotEqual(timeout, Resp),
- Resp.
-
-pause({Consumer, _}) ->
- Ref = make_ref(),
- Consumer ! {pause, Ref},
- Resp = receive
- {paused, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end,
- ?assertNotEqual(timeout, Resp),
- Resp.
-
-unpause({Consumer, _}) ->
- Ref = make_ref(),
- Consumer ! {continue, Ref},
- Resp = receive
- {ok, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end,
- ?assertNotEqual(timeout, Resp),
- Resp.
-
-wait_finished({_, ConsumerRef}) ->
- receive
- {consumer_finished, Rows, LastSeq} ->
- {Rows, LastSeq};
- {'DOWN', ConsumerRef, _, _, Msg} when Msg == normal; Msg == ok ->
- ok;
- {'DOWN', ConsumerRef, _, _, Msg} ->
- erlang:error({consumer_died, [
- {module, ?MODULE},
- {line, ?LINE},
- {value, Msg}
- ]})
- after ?TIMEOUT ->
- erlang:error({consumer_died, [
- {module, ?MODULE},
- {line, ?LINE},
- {value, timeout}
- ]})
- end.
-
-
-reset_row_notifications() ->
- receive
- row ->
- reset_row_notifications()
- after 0 ->
- ok
- end.
-
-
-wait_row_notifications(N) ->
- receive
- row when N == 1 ->
- ok;
- row when N > 1 ->
- wait_row_notifications(N - 1)
- after ?TIMEOUT ->
- timeout
- end.
-
-
-spawn_consumer(DbName, ChangesArgs0, Req) ->
- Parent = self(),
- spawn_monitor(fun() ->
- put(heartbeat_count, 0),
- Callback = fun
- ({change, {Change}, _}, _, Acc) ->
- Id = couch_util:get_value(<<"id">>, Change),
- Seq = couch_util:get_value(<<"seq">>, Change),
- Del = couch_util:get_value(<<"deleted">>, Change, false),
- Doc = couch_util:get_value(doc, Change, nil),
- Parent ! row,
- [#row{id = Id, seq = Seq, deleted = Del, doc = Doc} | Acc];
- ({stop, LastSeq}, _, Acc) ->
- Parent ! {consumer_finished, lists:reverse(Acc), LastSeq},
- stop_loop(Parent, Acc);
- (timeout, _, Acc) ->
- put(heartbeat_count, get(heartbeat_count) + 1),
- maybe_pause(Parent, Acc);
- (_, _, Acc) ->
- maybe_pause(Parent, Acc)
- end,
- {ok, Db} = couch_db:open_int(DbName, []),
- ChangesArgs = case (ChangesArgs0#changes_args.timeout =:= undefined)
- andalso (ChangesArgs0#changes_args.heartbeat =:= undefined) of
- true ->
- ChangesArgs0#changes_args{timeout = 1000, heartbeat = 100};
- false ->
- ChangesArgs0
- end,
- FeedFun = couch_changes:handle_db_changes(ChangesArgs, Req, Db),
- try
- FeedFun({Callback, []})
- catch
- throw:{stop, _} -> ok;
- _:Error -> exit(Error)
- after
- couch_db:close(Db)
- end
- end).
-
-maybe_pause(Parent, Acc) ->
- receive
- {get_rows, Ref} ->
- Parent ! {rows, Ref, lists:reverse(Acc)},
- maybe_pause(Parent, Acc);
- {get_heartbeats, Ref} ->
- Parent ! {hearthbeats, Ref, get(heartbeat_count)},
- maybe_pause(Parent, Acc);
- {reset, Ref} ->
- Parent ! {ok, Ref},
- maybe_pause(Parent, []);
- {pause, Ref} ->
- Parent ! {paused, Ref},
- pause_loop(Parent, Acc);
- {stop, Ref} ->
- Parent ! {ok, Ref},
- throw({stop, Acc});
- V when V /= updated ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {value, V},
- {reason, "Received unexpected message"}]})
- after 0 ->
- Acc
- end.
-
-pause_loop(Parent, Acc) ->
- receive
- {stop, Ref} ->
- Parent ! {ok, Ref},
- throw({stop, Acc});
- {reset, Ref} ->
- Parent ! {ok, Ref},
- pause_loop(Parent, []);
- {continue, Ref} ->
- Parent ! {ok, Ref},
- Acc;
- {get_rows, Ref} ->
- Parent ! {rows, Ref, lists:reverse(Acc)},
- pause_loop(Parent, Acc)
- end.
-
-stop_loop(Parent, Acc) ->
- receive
- {get_rows, Ref} ->
- Parent ! {rows, Ref, lists:reverse(Acc)},
- stop_loop(Parent, Acc);
- {stop, Ref} ->
- Parent ! {ok, Ref},
- Acc
- end.
-
-create_db(DbName) ->
- couch_db:create(DbName, [?ADMIN_CTX, overwrite]).
-
-delete_db(DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]).
diff --git a/src/couch/test/eunit/couch_db_doc_tests.erl b/src/couch/test/eunit/couch_db_doc_tests.erl
deleted file mode 100644
index 916b63207..000000000
--- a/src/couch/test/eunit/couch_db_doc_tests.erl
+++ /dev/null
@@ -1,121 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_doc_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-start() ->
- test_util:start_couch([ioq]).
-
-
-setup() ->
- DbName = ?tempdb(),
- config:set("couchdb", "stem_interactive_updates", "false", false),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db),
- DbName.
-
-
-teardown(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-
-couch_db_doc_test_() ->
- {
- "CouchDB doc tests",
- {
- setup,
- fun start/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_truncate_number_of_revisions/1,
- fun should_raise_bad_request_on_invalid_rev/1,
- fun should_allow_access_in_doc_keys_test/1
- ]
- }
- }
- }.
-
-
-should_truncate_number_of_revisions(DbName) ->
- DocId = <<"foo">>,
- Db = open_db(DbName),
- couch_db:set_revs_limit(Db, 5),
- Rev = create_doc(Db, DocId),
- Rev10 = add_revisions(Db, DocId, Rev, 10),
- {ok, [{ok, #doc{revs = {11, Revs}}}]} = open_doc_rev(Db, DocId, Rev10),
- ?_assertEqual(5, length(Revs)).
-
-
-should_raise_bad_request_on_invalid_rev(DbName) ->
- DocId = <<"foo">>,
- InvalidRev1 = <<"foo">>,
- InvalidRev2 = <<"a-foo">>,
- InvalidRev3 = <<"1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">>,
- Expect = {bad_request, <<"Invalid rev format">>},
- Db = open_db(DbName),
- create_doc(Db, DocId),
- [
- {InvalidRev1,
- ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev1, 1))},
- {InvalidRev2,
- ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev2, 1))},
- {InvalidRev3,
- ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev3, 1))}
- ].
-
-should_allow_access_in_doc_keys_test(_DbName) ->
- Json = <<"{\"_id\":\"foo\",\"_access\":[\"test\"]}">>,
- EJson = couch_util:json_decode(Json),
- Expected = {[{<<"_id">>,<<"foo">>}, {<<"_access">>, [<<"test">>]}]},
- EJson = Expected,
- Doc = couch_doc:from_json_obj(EJson),
- NewEJson = couch_doc:to_json_obj(Doc, []),
- ?_assertEqual(NewEJson, Expected).
-
-open_db(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- Db.
-
-
-create_doc(Db, DocId) ->
- add_revision(Db, DocId, undefined).
-
-
-open_doc_rev(Db0, DocId, Rev) ->
- {ok, Db} = couch_db:reopen(Db0),
- couch_db:open_doc_revs(Db, DocId, [couch_doc:parse_rev(Rev)], []).
-
-
-add_revision(Db, DocId, undefined) ->
- add_revision(Db, DocId, []);
-add_revision(Db, DocId, Rev) when is_binary(Rev) ->
- add_revision(Db, DocId, [{<<"_rev">>, Rev}]);
-add_revision(Db0, DocId, Rev) ->
- {ok, Db} = couch_db:reopen(Db0),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, DocId},
- {<<"value">>, DocId}
- ] ++ Rev}),
- {ok, NewRev} = couch_db:update_doc(Db, Doc, []),
- couch_doc:rev_to_str(NewRev).
-
-
-add_revisions(Db, DocId, Rev, N) ->
- lists:foldl(fun(_, OldRev) ->
- add_revision(Db, DocId, OldRev)
- end, Rev, lists:seq(1, N)).
diff --git a/src/couch/test/eunit/couch_db_mpr_tests.erl b/src/couch/test/eunit/couch_db_mpr_tests.erl
index bb97c66d7..37137975e 100644
--- a/src/couch/test/eunit/couch_db_mpr_tests.erl
+++ b/src/couch/test/eunit/couch_db_mpr_tests.erl
@@ -31,8 +31,8 @@ setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
TmpDb = ?tempdb(),
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(couch_httpd, port),
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(chttpd, port),
Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
Url.
@@ -64,8 +64,12 @@ couch_db_mpr_test_() ->
"multi-part attachment tests",
{
setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
+ fun() ->
+ test_util:start_couch([chttpd])
+ end,
+ fun(Ctx) ->
+ test_util:stop_couch(Ctx)
+ end,
{
foreach,
fun setup/0,
diff --git a/src/couch/test/eunit/couch_db_plugin_tests.erl b/src/couch/test/eunit/couch_db_plugin_tests.erl
deleted file mode 100644
index 93551adbc..000000000
--- a/src/couch/test/eunit/couch_db_plugin_tests.erl
+++ /dev/null
@@ -1,205 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_plugin_tests).
-
--export([
- validate_dbname/2,
- before_doc_update/3,
- after_doc_read/2,
- validate_docid/1,
- check_is_admin/1,
- on_delete/2
-]).
-
--export([ %% couch_epi_plugin behaviour
- app/0,
- providers/0,
- services/0,
- data_providers/0,
- data_subscriptions/0,
- processes/0,
- notify/3
-]).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-%% couch_epi_plugin behaviour
-
-app() -> test_app.
-providers() -> [{couch_db, ?MODULE}].
-services() -> [].
-data_providers() -> [].
-data_subscriptions() -> [].
-processes() -> [].
-notify(_, _, _) -> ok.
-fake_db() -> test_util:fake_db([]).
-
-setup() ->
- couch_tests:setup([
- couch_epi_dispatch:dispatch(chttpd, ?MODULE)
- ]).
-
-teardown(Ctx) ->
- couch_tests:teardown(Ctx).
-
-validate_dbname({true, _Db}, _) -> {decided, true};
-validate_dbname({false, _Db}, _) -> {decided, false};
-validate_dbname({fail, _Db}, _) -> throw(validate_dbname);
-validate_dbname({pass, _Db}, _) -> no_decision.
-
-before_doc_update({fail, _Doc}, _Db, interactive_edit) -> throw(before_doc_update);
-before_doc_update({true, Doc}, Db, interactive_edit) -> [{true, [before_doc_update|Doc]}, Db, interactive_edit];
-before_doc_update({false, Doc}, Db, interactive_edit) -> [{false, Doc}, Db, interactive_edit].
-
-after_doc_read({fail, _Doc}, _Db) -> throw(after_doc_read);
-after_doc_read({true, Doc}, Db) -> [{true, [after_doc_read|Doc]}, Db];
-after_doc_read({false, Doc}, Db) -> [{false, Doc}, Db].
-
-validate_docid({true, _Id}) -> true;
-validate_docid({false, _Id}) -> false;
-validate_docid({fail, _Id}) -> throw(validate_docid).
-
-check_is_admin({true, _Db}) -> true;
-check_is_admin({false, _Db}) -> false;
-check_is_admin({fail, _Db}) -> throw(check_is_admin).
-
-on_delete(true, _Opts) -> true;
-on_delete(false, _Opts) -> false;
-on_delete(fail, _Opts) -> throw(on_delete).
-
-callback_test_() ->
- {
- "callback tests",
- {
- setup, fun setup/0, fun teardown/1,
- [
- {"validate_dbname_match", fun validate_dbname_match/0},
- {"validate_dbname_no_match", fun validate_dbname_no_match/0},
- {"validate_dbname_throw", fun validate_dbname_throw/0},
- {"validate_dbname_pass", fun validate_dbname_pass/0},
-
- {"before_doc_update_match", fun before_doc_update_match/0},
- {"before_doc_update_no_match", fun before_doc_update_no_match/0},
- {"before_doc_update_throw", fun before_doc_update_throw/0},
-
- {"after_doc_read_match", fun after_doc_read_match/0},
- {"after_doc_read_no_match", fun after_doc_read_no_match/0},
- {"after_doc_read_throw", fun after_doc_read_throw/0},
-
- {"validate_docid_match", fun validate_docid_match/0},
- {"validate_docid_no_match", fun validate_docid_no_match/0},
- {"validate_docid_throw", fun validate_docid_throw/0},
-
- {"check_is_admin_match", fun check_is_admin_match/0},
- {"check_is_admin_no_match", fun check_is_admin_no_match/0},
- {"check_is_admin_throw", fun check_is_admin_throw/0},
-
- {"on_delete_match", fun on_delete_match/0},
- {"on_delete_no_match", fun on_delete_no_match/0},
- {"on_delete_throw", fun on_delete_throw/0}
- ]
- }
- }.
-
-
-validate_dbname_match() ->
- ?assert(couch_db_plugin:validate_dbname(
- {true, [db]}, db, fun(_, _) -> pass end)).
-
-validate_dbname_no_match() ->
- ?assertNot(couch_db_plugin:validate_dbname(
- {false, [db]}, db, fun(_, _) -> pass end)).
-
-validate_dbname_throw() ->
- ?assertThrow(
- validate_dbname,
- couch_db_plugin:validate_dbname(
- {fail, [db]}, db, fun(_, _) -> pass end)).
-
-validate_dbname_pass() ->
- ?assertEqual(pass, couch_db_plugin:validate_dbname(
- {pass, [db]}, db, fun(_, _) -> pass end)).
-
-before_doc_update_match() ->
- ?assertMatch(
- {true, [before_doc_update, doc]},
- couch_db_plugin:before_doc_update(
- fake_db(), {true, [doc]}, interactive_edit)).
-
-before_doc_update_no_match() ->
- ?assertMatch(
- {false, [doc]},
- couch_db_plugin:before_doc_update(
- fake_db(), {false, [doc]}, interactive_edit)).
-
-before_doc_update_throw() ->
- ?assertThrow(
- before_doc_update,
- couch_db_plugin:before_doc_update(
- fake_db(), {fail, [doc]}, interactive_edit)).
-
-
-after_doc_read_match() ->
- ?assertMatch(
- {true, [after_doc_read, doc]},
- couch_db_plugin:after_doc_read(fake_db(), {true, [doc]})).
-
-after_doc_read_no_match() ->
- ?assertMatch(
- {false, [doc]},
- couch_db_plugin:after_doc_read(fake_db(), {false, [doc]})).
-
-after_doc_read_throw() ->
- ?assertThrow(
- after_doc_read,
- couch_db_plugin:after_doc_read(fake_db(), {fail, [doc]})).
-
-
-validate_docid_match() ->
- ?assert(couch_db_plugin:validate_docid({true, [doc]})).
-
-validate_docid_no_match() ->
- ?assertNot(couch_db_plugin:validate_docid({false, [doc]})).
-
-validate_docid_throw() ->
- ?assertThrow(
- validate_docid,
- couch_db_plugin:validate_docid({fail, [doc]})).
-
-
-check_is_admin_match() ->
- ?assert(couch_db_plugin:check_is_admin({true, [db]})).
-
-check_is_admin_no_match() ->
- ?assertNot(couch_db_plugin:check_is_admin({false, [db]})).
-
-check_is_admin_throw() ->
- ?assertThrow(
- check_is_admin,
- couch_db_plugin:check_is_admin({fail, [db]})).
-
-on_delete_match() ->
- ?assertMatch(
- [true],
- couch_db_plugin:on_delete(true, [])).
-
-on_delete_no_match() ->
- ?assertMatch(
- [false],
- couch_db_plugin:on_delete(false, [])).
-
-on_delete_throw() ->
- ?assertThrow(
- on_delete,
- couch_db_plugin:on_delete(fail, [])).
diff --git a/src/couch/test/eunit/couch_db_props_upgrade_tests.erl b/src/couch/test/eunit/couch_db_props_upgrade_tests.erl
deleted file mode 100644
index 40ad283cf..000000000
--- a/src/couch/test/eunit/couch_db_props_upgrade_tests.erl
+++ /dev/null
@@ -1,83 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_props_upgrade_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-
-setup() ->
- DbName = <<"test">>,
- DbFileName = "test.couch",
- OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]),
-
- DbDir = config:get("couchdb", "database_dir"),
- NewDbFilePath = filename:join([DbDir, DbFileName]),
-
- file:delete(NewDbFilePath),
- {ok, _} = file:copy(OldDbFilePath, NewDbFilePath),
-
- DbName.
-
-
-teardown(DbName) when is_binary(DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-
-old_db_info_test_() ->
- {
- "Old database versions work",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun can_get_props/1,
- fun can_get_db_info/1,
- fun can_compact_db/1
- ]
- }
- }
- }.
-
-
-can_get_props(DbName) ->
- ?_test(begin
- {ok, Db} = couch_db:open_int(DbName, []),
- Props = couch_db_engine:get_props(Db),
- ?assert(is_list(Props))
- end).
-
-
-can_get_db_info(DbName) ->
- ?_test(begin
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Info} = couch_db:get_db_info(Db),
- Props = couch_util:get_value(props, Info),
- ?assertEqual({[]}, Props)
- end).
-
-
-can_compact_db(DbName) ->
- ?_test(begin
- couch_util:with_db(DbName, fun(Db) ->
- couch_db:start_compact(Db),
- couch_db:wait_for_compaction(Db)
- end)
- end).
diff --git a/src/couch/test/eunit/couch_db_split_tests.erl b/src/couch/test/eunit/couch_db_split_tests.erl
deleted file mode 100644
index 6e24c36ee..000000000
--- a/src/couch/test/eunit/couch_db_split_tests.erl
+++ /dev/null
@@ -1,331 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_split_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(RINGTOP, 2 bsl 31).
--define(TIMEOUT, 60). % seconds
-
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- DbName.
-
-
-teardown(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- FilePath = couch_db:get_filepath(Db),
- ok = couch_db:close(Db),
- ok = file:delete(FilePath).
-
-
-split_test_() ->
- Cases = [
- {"Should split an empty shard", 0, 2},
- {"Should split shard in half", 100, 2},
- {"Should split shard in three", 99, 3},
- {"Should split shard in four", 100, 4}
- ],
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop/1,
- [
- {
- foreachx,
- fun(_) -> setup() end, fun(_, St) -> teardown(St) end,
- [{Case, fun should_split_shard/2} || Case <- Cases]
- },
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_fail_on_missing_source/1,
- fun should_fail_on_existing_target/1,
- fun should_fail_on_invalid_target_name/1,
- fun should_crash_on_invalid_tmap/1,
- fun should_fail_on_opened_target/1
- ]
- }
- ]
- }.
-
-
-should_split_shard({Desc, TotalDocs, Q}, DbName) ->
- {ok, ExpectSeq} = create_docs(DbName, TotalDocs),
- Ranges = make_ranges(Q),
- TMap = make_targets(Ranges),
- DocsPerRange = TotalDocs div Q,
- PickFun = make_pickfun(DocsPerRange),
- {Desc, timeout, ?TIMEOUT, ?_test(begin
- {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun),
- ?assertEqual(ExpectSeq, UpdateSeq),
- maps:map(fun(Range, Name) ->
- {ok, Db} = couch_db:open_int(Name, []),
- FilePath = couch_db:get_filepath(Db),
- %% target actually exists
- ?assertMatch({ok, _}, file:read_file_info(FilePath)),
- %% target's update seq is the same as source's update seq
- USeq = couch_db:get_update_seq(Db),
- ?assertEqual(ExpectSeq, USeq),
- %% target shard has all the expected in its range docs
- {ok, DocsInShard} = couch_db:fold_docs(Db, fun(FDI, Acc) ->
- DocId = FDI#full_doc_info.id,
- ExpectedRange = PickFun(DocId, Ranges, undefined),
- ?assertEqual(ExpectedRange, Range),
- {ok, Acc + 1}
- end, 0),
- ?assertEqual(DocsPerRange, DocsInShard),
- ok = couch_db:close(Db),
- ok = file:delete(FilePath)
- end, TMap)
- end)}.
-
-
-should_fail_on_missing_source(_DbName) ->
- DbName = ?tempdb(),
- Ranges = make_ranges(2),
- TMap = make_targets(Ranges),
- Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3),
- ?_assertEqual({error, missing_source}, Response).
-
-
-should_fail_on_existing_target(DbName) ->
- Ranges = make_ranges(2),
- TMap = maps:map(fun(_, TName) ->
- % We create the target but make sure to remove it from the cache so we
- % hit the eexist error instaed of already_opened
- {ok, Db} = couch_db:create(TName, [?ADMIN_CTX]),
- Pid = couch_db:get_pid(Db),
- ok = couch_db:close(Db),
- exit(Pid, kill),
- test_util:wait(fun() ->
- case ets:lookup(couch_dbs, TName) of
- [] -> ok;
- [_ | _] -> wait
- end
- end),
- TName
- end, make_targets(Ranges)),
- Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3),
- ?_assertMatch({error, {target_create_error, _, eexist}}, Response).
-
-
-should_fail_on_invalid_target_name(DbName) ->
- Ranges = make_ranges(2),
- TMap = maps:map(fun([B, _], _) ->
- iolist_to_binary(["_$", couch_util:to_hex(<<B:32/integer>>)])
- end, make_targets(Ranges)),
- Expect = {error, {target_create_error, <<"_$00000000">>,
- {illegal_database_name, <<"_$00000000">>}}},
- Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3),
- ?_assertMatch(Expect, Response).
-
-
-should_crash_on_invalid_tmap(DbName) ->
- Ranges = make_ranges(1),
- TMap = make_targets(Ranges),
- ?_assertError(function_clause,
- couch_db_split:split(DbName, TMap, fun fake_pickfun/3)).
-
-
-should_fail_on_opened_target(DbName) ->
- Ranges = make_ranges(2),
- TMap = maps:map(fun(_, TName) ->
- % We create and keep the target open but delete
- % its file on disk so we don't fail with eexist
- {ok, Db} = couch_db:create(TName, [?ADMIN_CTX]),
- FilePath = couch_db:get_filepath(Db),
- ok = file:delete(FilePath),
- TName
- end, make_targets(Ranges)),
- ?_assertMatch({error, {target_create_error, _, already_opened}},
- couch_db_split:split(DbName, TMap, fun fake_pickfun/3)).
-
-
-copy_local_docs_test_() ->
- Cases = [
- {"Should work with no docs", 0, 2},
- {"Should copy local docs after split in two", 100, 2},
- {"Should copy local docs after split in three", 99, 3},
- {"Should copy local docs after split in four", 100, 4}
- ],
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop/1,
- [
- {
- foreachx,
- fun(_) -> setup() end, fun(_, St) -> teardown(St) end,
- [{Case, fun should_copy_local_docs/2} || Case <- Cases]
- },
- {"Should return error on missing source",
- fun should_fail_copy_local_on_missing_source/0}
- ]
- }.
-
-
-should_copy_local_docs({Desc, TotalDocs, Q}, DbName) ->
- {ok, ExpectSeq} = create_docs(DbName, TotalDocs),
- Ranges = make_ranges(Q),
- TMap = make_targets(Ranges),
- DocsPerRange = TotalDocs div Q,
- PickFun = make_pickfun(DocsPerRange),
- {Desc, timeout, ?TIMEOUT, ?_test(begin
- {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun),
- ?assertEqual(ExpectSeq, UpdateSeq),
- Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun),
- ?assertEqual(ok, Response),
- maps:map(fun(Range, Name) ->
- {ok, Db} = couch_db:open_int(Name, []),
- FilePath = couch_db:get_filepath(Db),
- %% target shard has all the expected in its range docs
- {ok, DocsInShard} = couch_db:fold_local_docs(Db, fun(Doc, Acc) ->
- DocId = Doc#doc.id,
- ExpectedRange = PickFun(DocId, Ranges, undefined),
- ?assertEqual(ExpectedRange, Range),
- {ok, Acc + 1}
- end, 0, []),
- ?assertEqual(DocsPerRange, DocsInShard),
- ok = couch_db:close(Db),
- ok = file:delete(FilePath)
- end, TMap)
- end)}.
-
-
-should_fail_copy_local_on_missing_source() ->
- DbName = ?tempdb(),
- Ranges = make_ranges(2),
- TMap = make_targets(Ranges),
- PickFun = fun fake_pickfun/3,
- Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun),
- ?assertEqual({error, missing_source}, Response).
-
-
-cleanup_target_test_() ->
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop/1,
- [
- {
- setup,
- fun setup/0, fun teardown/1,
- fun should_delete_existing_targets/1
- },
- {"Should return error on missing source",
- fun should_fail_cleanup_target_on_missing_source/0}
- ]
- }.
-
-
-should_delete_existing_targets(SourceName) ->
- {ok, ExpectSeq} = create_docs(SourceName, 100),
- Ranges = make_ranges(2),
- TMap = make_targets(Ranges),
- PickFun = make_pickfun(50),
- ?_test(begin
- {ok, UpdateSeq} = couch_db_split:split(SourceName, TMap, PickFun),
- ?assertEqual(ExpectSeq, UpdateSeq),
- maps:map(fun(_Range, TargetName) ->
- FilePath = couch_util:with_db(TargetName, fun(Db) ->
- couch_db:get_filepath(Db)
- end),
- ?assertMatch({ok, _}, file:read_file_info(FilePath)),
- Response = couch_db_split:cleanup_target(SourceName, TargetName),
- ?assertEqual(ok, Response),
- ?assertEqual({error, enoent}, file:read_file_info(FilePath))
- end, TMap)
- end).
-
-
-should_fail_cleanup_target_on_missing_source() ->
- SourceName = ?tempdb(),
- TargetName = ?tempdb(),
- Response = couch_db_split:cleanup_target(SourceName, TargetName),
- ?assertEqual({error, missing_source}, Response).
-
-
-make_pickfun(DocsPerRange) ->
- fun(DocId, Ranges, _HashFun) ->
- Id = docid_to_integer(DocId),
- case {Id div DocsPerRange, Id rem DocsPerRange} of
- {N, 0} ->
- lists:nth(N, Ranges);
- {N, _} ->
- lists:nth(N + 1, Ranges)
- end
- end.
-
-
-fake_pickfun(_, Ranges, _) ->
- hd(Ranges).
-
-
-make_targets([]) ->
- maps:new();
-make_targets(Ranges) ->
- Targets = lists:map(fun(Range) ->
- {Range, ?tempdb()}
- end, Ranges),
- maps:from_list(Targets).
-
-
-make_ranges(Q) when Q > 0 ->
- Incr = (2 bsl 31) div Q,
- lists:map(fun
- (End) when End >= ?RINGTOP - 1 ->
- [End - Incr, ?RINGTOP - 1];
- (End) ->
- [End - Incr, End - 1]
- end, lists:seq(Incr, ?RINGTOP, Incr));
-make_ranges(_) ->
- [].
-
-
-create_docs(DbName, 0) ->
- couch_util:with_db(DbName, fun(Db) ->
- UpdateSeq = couch_db:get_update_seq(Db),
- {ok, UpdateSeq}
- end);
-create_docs(DbName, DocNum) ->
- Docs = lists:foldl(fun(I, Acc) ->
- [create_doc(I), create_local_doc(I) | Acc]
- end, [], lists:seq(DocNum, 1, -1)),
- couch_util:with_db(DbName, fun(Db) ->
- {ok, _Result} = couch_db:update_docs(Db, Docs),
- {ok, Db1} = couch_db:reopen(Db),
- UpdateSeq = couch_db:get_update_seq(Db1),
- {ok, UpdateSeq}
- end).
-
-
-create_doc(I) ->
- create_prefix_id_doc(I, "").
-
-
-create_local_doc(I) ->
- create_prefix_id_doc(I, "_local/").
-
-
-create_prefix_id_doc(I, Prefix) ->
- Id = iolist_to_binary(io_lib:format(Prefix ++ "~3..0B", [I])),
- couch_doc:from_json_obj({[{<<"_id">>, Id}, {<<"value">>, I}]}).
-
-
-docid_to_integer(<<"_local/", DocId/binary>>) ->
- docid_to_integer(DocId);
-docid_to_integer(DocId) ->
- list_to_integer(binary_to_list(DocId)).
diff --git a/src/couch/test/eunit/couch_db_tests.erl b/src/couch/test/eunit/couch_db_tests.erl
deleted file mode 100644
index dd2cb427d..000000000
--- a/src/couch/test/eunit/couch_db_tests.erl
+++ /dev/null
@@ -1,198 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(TIMEOUT, 120).
-
-
-
-create_delete_db_test_()->
- {
- "Database create/delete tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun() -> ?tempdb() end,
- [
- fun should_create_db/1,
- fun should_delete_db/1
- ]
- }
- }
- }.
-
-create_delete_multiple_dbs_test_()->
- {
- "Multiple database create/delete tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun() -> [?tempdb() || _ <- lists:seq(1, 6)] end,
- [
- fun should_create_multiple_dbs/1,
- fun should_delete_multiple_dbs/1
- ]
- }
- }
- }.
-
-create_delete_database_continuously_test_() ->
- {
- "Continious database create/delete tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreachx,
- fun(_) -> ?tempdb() end,
- [
- {10, fun should_create_delete_database_continuously/2},
- {100, fun should_create_delete_database_continuously/2}
- ]
- }
- }
- }.
-
-open_db_test_()->
- {
- "Database open tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun() -> ?tempdb() end,
- [
- fun should_create_db_if_missing/1,
- fun should_open_db_if_exists/1,
- fun locking_should_work/1
- ]
- }
- }
- }.
-
-
-should_create_db(DbName) ->
- ?_test(begin
- {ok, Before} = couch_server:all_databases(),
- ?assertNot(lists:member(DbName, Before)),
- ?assert(create_db(DbName)),
- {ok, After} = couch_server:all_databases(),
- ?assert(lists:member(DbName, After))
- end).
-
-should_delete_db(DbName) ->
- ?_test(begin
- ?assert(create_db(DbName)),
- {ok, Before} = couch_server:all_databases(),
- ?assert(lists:member(DbName, Before)),
- couch_server:delete(DbName, []),
- {ok, After} = couch_server:all_databases(),
- ?assertNot(lists:member(DbName, After))
- end).
-
-should_create_multiple_dbs(DbNames) ->
- ?_test(begin
- gen_server:call(couch_server, {set_max_dbs_open, 3}),
- {ok, Before} = couch_server:all_databases(),
- [?assertNot(lists:member(DbName, Before)) || DbName <- DbNames],
- [?assert(create_db(DbName)) || DbName <- DbNames],
- {ok, After} = couch_server:all_databases(),
- [?assert(lists:member(DbName, After)) || DbName <- DbNames]
- end).
-
-should_delete_multiple_dbs(DbNames) ->
- ?_test(begin
- [?assert(create_db(DbName)) || DbName <- DbNames],
- {ok, Before} = couch_server:all_databases(),
- [?assert(lists:member(DbName, Before)) || DbName <- DbNames],
- [?assert(delete_db(DbName)) || DbName <- DbNames],
- {ok, After} = couch_server:all_databases(),
- [?assertNot(lists:member(DbName, After)) || DbName <- DbNames]
- end).
-
-should_create_delete_database_continuously(Times, DbName) ->
- {lists:flatten(io_lib:format("~b times", [Times])),
- {timeout, ?TIMEOUT, ?_test(begin
- ?assert(create_db(DbName)),
- lists:foreach(fun(_) ->
- ?assert(delete_db(DbName)),
- ?assert(create_db(DbName))
- end, lists:seq(1, Times))
- end)}}.
-
-should_create_db_if_missing(DbName) ->
- ?_test(begin
- {ok, Before} = couch_server:all_databases(),
- ?assertNot(lists:member(DbName, Before)),
- {ok, Db} = couch_db:open(DbName, [{create_if_missing, true}]),
- ok = couch_db:close(Db),
- {ok, After} = couch_server:all_databases(),
- ?assert(lists:member(DbName, After))
- end).
-
-should_open_db_if_exists(DbName) ->
- ?_test(begin
- ?assert(create_db(DbName)),
- {ok, Before} = couch_server:all_databases(),
- ?assert(lists:member(DbName, Before)),
- {ok, Db} = couch_db:open(DbName, [{create_if_missing, true}]),
- ok = couch_db:close(Db),
- {ok, After} = couch_server:all_databases(),
- ?assert(lists:member(DbName, After))
- end).
-
-locking_should_work(DbName) ->
- ?_test(begin
- ?assertEqual(ok, couch_server:lock(DbName, <<"x">>)),
- ?assertEqual({error, {locked, <<"x">>}}, couch_db:create(DbName, [])),
- ?assertEqual(ok, couch_server:unlock(DbName)),
- {ok, Db} = couch_db:create(DbName, []),
- ?assertEqual({error, already_opened},
- couch_server:lock(DbName, <<>>)),
-
- ok = couch_db:close(Db),
- catch exit(couch_db:get_pid(Db), kill),
- test_util:wait(fun() ->
- case ets:lookup(couch_dbs, DbName) of
- [] -> ok;
- [_ | _] -> wait
- end
- end),
-
- ?assertEqual(ok, couch_server:lock(DbName, <<"y">>)),
- ?assertEqual({error, {locked, <<"y">>}},
- couch_db:open(DbName, [])),
-
- couch_server:unlock(DbName),
- {ok, Db1} = couch_db:open(DbName, [{create_if_missing, true}]),
- ok = couch_db:close(Db1)
- end).
-
-create_db(DbName) ->
- create_db(DbName, []).
-
-create_db(DbName, Opts) ->
- {ok, Db} = couch_db:create(DbName, Opts),
- ok = couch_db:close(Db),
- true.
-
-delete_db(DbName) ->
- ok = couch_server:delete(DbName, []),
- true.
diff --git a/src/couch/test/eunit/couch_doc_json_tests.erl b/src/couch/test/eunit/couch_doc_json_tests.erl
index 51f228900..3a07642de 100644
--- a/src/couch/test/eunit/couch_doc_json_tests.erl
+++ b/src/couch/test/eunit/couch_doc_json_tests.erl
@@ -19,19 +19,19 @@
setup() ->
mock(couch_log),
mock(config),
- mock(couch_db_plugin),
+ mock(fabric2_db_plugin),
ok.
teardown(_) ->
meck:unload(couch_log),
meck:unload(config),
- meck:unload(couch_db_plugin),
+ meck:unload(fabric2_db_plugin),
ok.
-mock(couch_db_plugin) ->
- ok = meck:new(couch_db_plugin, [passthrough]),
- ok = meck:expect(couch_db_plugin, validate_docid, fun(_) -> false end),
- ok;
+mock(fabric2_db_plugin) ->
+ ok = meck:new(fabric2_db_plugin, [passthrough]),
+ ok = meck:expect(fabric2_db_plugin, validate_docid, fun(_) -> false end),
+ ok;
mock(couch_log) ->
ok = meck:new(couch_log, [passthrough]),
ok = meck:expect(couch_log, debug, fun(_, _) -> ok end),
@@ -52,7 +52,6 @@ json_doc_test_() ->
fun(_) ->
[{"Document from JSON", [
from_json_with_dbname_error_cases(),
- from_json_with_db_name_success_cases(),
from_json_success_cases(),
from_json_error_cases()
]},
@@ -113,7 +112,9 @@ from_json_success_cases() ->
{type, <<"application/awesome">>},
{att_len, 45},
{disk_len, 45},
- {revpos, undefined}
+ {revpos, undefined},
+ {encoding, identity},
+ {md5, <<>>}
]),
couch_att:new([
{name, <<"noahs_private_key.gpg">>},
@@ -121,7 +122,9 @@ from_json_success_cases() ->
{type, <<"application/pgp-signature">>},
{att_len, 18},
{disk_len, 18},
- {revpos, 0}
+ {revpos, 0},
+ {encoding, undefined},
+ {md5, undefined}
])
]},
"Attachments are parsed correctly."
@@ -173,44 +176,6 @@ from_json_success_cases() ->
end,
Cases).
-from_json_with_db_name_success_cases() ->
- Cases = [
- {
- {[]},
- <<"_dbs">>,
- #doc{},
- "DbName _dbs is acceptable with no docid"
- },
- {
- {[{<<"_id">>, <<"zing!">>}]},
- <<"_dbs">>,
- #doc{id = <<"zing!">>},
- "DbName _dbs is acceptable with a normal docid"
- },
- {
- {[{<<"_id">>, <<"_users">>}]},
- <<"_dbs">>,
- #doc{id = <<"_users">>},
- "_dbs/_users is acceptable"
- },
- {
- {[{<<"_id">>, <<"_replicator">>}]},
- <<"_dbs">>,
- #doc{id = <<"_replicator">>},
- "_dbs/_replicator is acceptable"
- },
- {
- {[{<<"_id">>, <<"_global_changes">>}]},
- <<"_dbs">>,
- #doc{id = <<"_global_changes">>},
- "_dbs/_global_changes is acceptable"
- }
- ],
- lists:map(
- fun({EJson, DbName, Expect, Msg}) ->
- {Msg, ?_assertMatch(Expect, couch_doc:from_json_obj_validate(EJson, DbName))}
- end,
- Cases).
from_json_error_cases() ->
Cases = [
@@ -308,13 +273,6 @@ from_json_with_dbname_error_cases() ->
Cases = [
{
{[{<<"_id">>, <<"_random">>}]},
- <<"_dbs">>,
- {illegal_docid,
- <<"Only reserved document ids may start with underscore.">>},
- "Disallow non-system-DB underscore prefixed docids in _dbs database."
- },
- {
- {[{<<"_id">>, <<"_random">>}]},
<<"foobar">>,
{illegal_docid,
<<"Only reserved document ids may start with underscore.">>},
@@ -418,7 +376,9 @@ to_json_success_cases() ->
{data, fun() -> ok end},
{revpos, 1},
{att_len, 400},
- {disk_len, 400}
+ {disk_len, 400},
+ {md5, <<>>},
+ {encoding, identity}
]),
couch_att:new([
{name, <<"fast.json">>},
@@ -426,7 +386,9 @@ to_json_success_cases() ->
{data, <<"{\"so\": \"there!\"}">>},
{revpos, 1},
{att_len, 16},
- {disk_len, 16}
+ {disk_len, 16},
+ {md5, <<>>},
+ {encoding, identity}
])
]},
{[
@@ -457,13 +419,17 @@ to_json_success_cases() ->
{data, fun() -> <<"diet pepsi">> end},
{revpos, 1},
{att_len, 10},
- {disk_len, 10}
+ {disk_len, 10},
+ {md5, <<>>},
+ {encoding, identity}
]),
couch_att:new([
{name, <<"food.now">>},
{type, <<"application/food">>},
{revpos, 1},
- {data, <<"sammich">>}
+ {data, <<"sammich">>},
+ {md5, <<>>},
+ {encoding, identity}
])
]},
{[
diff --git a/src/couch/test/eunit/couch_doc_tests.erl b/src/couch/test/eunit/couch_doc_tests.erl
index cf41df61d..079b13f2b 100644
--- a/src/couch/test/eunit/couch_doc_tests.erl
+++ b/src/couch/test/eunit/couch_doc_tests.erl
@@ -42,7 +42,7 @@ doc_to_multi_part_stream_test() ->
AttLength = size(AttData),
Atts = [couch_att:new([
{name, <<"test">>}, {data, AttData}, {type, <<"text/plain">>},
- {att_len, AttLength}, {disk_len, AttLength}])],
+ {att_len, AttLength}, {disk_len, AttLength}, {encoding, identity}])],
couch_doc:doc_to_multi_part_stream(Boundary, JsonBytes, Atts, fun send/1, true),
AttLengthStr = integer_to_binary(AttLength),
BoundaryLen = size(Boundary),
@@ -69,51 +69,11 @@ len_doc_to_multi_part_stream_test() ->
AttLength = size(AttData),
Atts = [couch_att:new([
{name, <<"test">>}, {data, AttData}, {type, <<"text/plain">>},
- {att_len, AttLength}, {disk_len, AttLength}])],
+ {att_len, AttLength}, {disk_len, AttLength}, {encoding, identity}])],
{ContentType, 258} = %% 258 is expected size of the document
couch_doc:len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, true),
ok.
-validate_docid_test_() ->
- {setup,
- fun() ->
- mock_config(),
- ok = meck:new(couch_db_plugin, [passthrough]),
- meck:expect(couch_db_plugin, validate_docid, fun(_) -> false end)
- end,
- fun(_) ->
- meck:unload(config),
- meck:unload(couch_db_plugin)
- end,
- [
- ?_assertEqual(ok, couch_doc:validate_docid(<<"idx">>)),
- ?_assertEqual(ok, couch_doc:validate_docid(<<"_design/idx">>)),
- ?_assertEqual(ok, couch_doc:validate_docid(<<"_local/idx">>)),
- ?_assertEqual(ok, couch_doc:validate_docid(large_id(1024))),
- ?_assertEqual(ok, couch_doc:validate_docid(<<"_users">>, <<"_dbs">>)),
- ?_assertEqual(ok, couch_doc:validate_docid(<<"_replicator">>, <<"_dbs">>)),
- ?_assertEqual(ok, couch_doc:validate_docid(<<"_global_changes">>, <<"_dbs">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<>>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<16#80>>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_idx">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_design/">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_local/">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(large_id(1025))),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_users">>, <<"foo">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_weeee">>, <<"_dbs">>))
- ]
- }.
-
large_id(N) ->
<< <<"x">> || _ <- lists:seq(1, N) >>.
@@ -139,7 +99,6 @@ mock_config() ->
meck:expect(config, get,
fun("couchdb", "max_document_id_length", "infinity") -> "1024";
("couchdb", "max_attachment_size", "infinity") -> "infinity";
- ("mem3", "shards_db", "_dbs") -> "_dbs";
(Key, Val, Default) -> meck:passthrough([Key, Val, Default])
end
).
diff --git a/src/couch/test/eunit/couch_file_tests.erl b/src/couch/test/eunit/couch_file_tests.erl
deleted file mode 100644
index 606f4bbf4..000000000
--- a/src/couch/test/eunit/couch_file_tests.erl
+++ /dev/null
@@ -1,551 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_file_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(BLOCK_SIZE, 4096).
--define(setup(F), {setup, fun setup/0, fun teardown/1, F}).
--define(foreach(Fs), {foreach, fun setup/0, fun teardown/1, Fs}).
-
-
-setup() ->
- {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- Fd.
-
-teardown(Fd) ->
- case is_process_alive(Fd) of
- true -> ok = couch_file:close(Fd);
- false -> ok
- end.
-
-open_close_test_() ->
- {
- "Test for proper file open and close",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
- [
- should_return_enoent_if_missed(),
- should_ignore_invalid_flags_with_open(),
- ?setup(fun should_return_pid_on_file_open/1),
- should_close_file_properly(),
- ?setup(fun should_create_empty_new_files/1)
- ]
- }
- }.
-
-should_return_enoent_if_missed() ->
- ?_assertEqual({error, enoent}, couch_file:open("not a real file")).
-
-should_ignore_invalid_flags_with_open() ->
- ?_assertMatch({ok, _},
- couch_file:open(?tempfile(), [create, invalid_option])).
-
-should_return_pid_on_file_open(Fd) ->
- ?_assert(is_pid(Fd)).
-
-should_close_file_properly() ->
- {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- ok = couch_file:close(Fd),
- ?_assert(true).
-
-should_create_empty_new_files(Fd) ->
- ?_assertMatch({ok, 0}, couch_file:bytes(Fd)).
-
-
-read_write_test_() ->
- {
- "Common file read/write tests",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
- ?foreach([
- fun should_increase_file_size_on_write/1,
- fun should_return_current_file_size_on_write/1,
- fun should_write_and_read_term/1,
- fun should_write_and_read_binary/1,
- fun should_write_and_read_large_binary/1,
- fun should_return_term_as_binary_for_reading_binary/1,
- fun should_read_term_written_as_binary/1,
- fun should_read_iolist/1,
- fun should_fsync/1,
- fun should_not_read_beyond_eof/1,
- fun should_truncate/1
- ])
- }
- }.
-
-
-should_increase_file_size_on_write(Fd) ->
- {ok, 0, _} = couch_file:append_term(Fd, foo),
- {ok, Size} = couch_file:bytes(Fd),
- ?_assert(Size > 0).
-
-should_return_current_file_size_on_write(Fd) ->
- {ok, 0, _} = couch_file:append_term(Fd, foo),
- {ok, Size} = couch_file:bytes(Fd),
- ?_assertMatch({ok, Size, _}, couch_file:append_term(Fd, bar)).
-
-should_write_and_read_term(Fd) ->
- {ok, Pos, _} = couch_file:append_term(Fd, foo),
- ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)).
-
-should_write_and_read_binary(Fd) ->
- {ok, Pos, _} = couch_file:append_binary(Fd, <<"fancy!">>),
- ?_assertMatch({ok, <<"fancy!">>}, couch_file:pread_binary(Fd, Pos)).
-
-should_return_term_as_binary_for_reading_binary(Fd) ->
- {ok, Pos, _} = couch_file:append_term(Fd, foo),
- Foo = couch_compress:compress(foo, snappy),
- ?_assertMatch({ok, Foo}, couch_file:pread_binary(Fd, Pos)).
-
-should_read_term_written_as_binary(Fd) ->
- {ok, Pos, _} = couch_file:append_binary(Fd, <<131,100,0,3,102,111,111>>),
- ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)).
-
-should_write_and_read_large_binary(Fd) ->
- BigBin = list_to_binary(lists:duplicate(100000, 0)),
- {ok, Pos, _} = couch_file:append_binary(Fd, BigBin),
- ?_assertMatch({ok, BigBin}, couch_file:pread_binary(Fd, Pos)).
-
-should_read_iolist(Fd) ->
- %% append_binary == append_iolist?
- %% Possible bug in pread_iolist or iolist() -> append_binary
- {ok, Pos, _} = couch_file:append_binary(Fd, ["foo", $m, <<"bam">>]),
- {ok, IoList} = couch_file:pread_iolist(Fd, Pos),
- ?_assertMatch(<<"foombam">>, iolist_to_binary(IoList)).
-
-should_fsync(Fd) ->
- {"How does on test fsync?", ?_assertMatch(ok, couch_file:sync(Fd))}.
-
-should_not_read_beyond_eof(Fd) ->
- BigBin = list_to_binary(lists:duplicate(100000, 0)),
- DoubleBin = round(byte_size(BigBin) * 2),
- {ok, Pos, _Size} = couch_file:append_binary(Fd, BigBin),
- {_, Filepath} = couch_file:process_info(Fd),
- %% corrupt db file
- {ok, Io} = file:open(Filepath, [read, write, binary]),
- ok = file:pwrite(Io, Pos, <<0:1/integer, DoubleBin:31/integer>>),
- file:close(Io),
- unlink(Fd),
- ExpectedError = {badmatch, {'EXIT', {bad_return_value,
- {read_beyond_eof, Filepath}}}},
- ?_assertError(ExpectedError, couch_file:pread_binary(Fd, Pos)).
-
-should_truncate(Fd) ->
- {ok, 0, _} = couch_file:append_term(Fd, foo),
- {ok, Size} = couch_file:bytes(Fd),
- BigBin = list_to_binary(lists:duplicate(100000, 0)),
- {ok, _, _} = couch_file:append_binary(Fd, BigBin),
- ok = couch_file:truncate(Fd, Size),
- ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, 0)).
-
-pread_limit_test_() ->
- {
- "Read limit tests",
- {
- setup,
- fun() ->
- Ctx = test_util:start(?MODULE),
- config:set("couchdb", "max_pread_size", "50000"),
- Ctx
- end,
- fun(Ctx) ->
- config:delete("couchdb", "max_pread_size"),
- test_util:stop(Ctx)
- end,
- ?foreach([
- fun should_increase_file_size_on_write/1,
- fun should_return_current_file_size_on_write/1,
- fun should_write_and_read_term/1,
- fun should_write_and_read_binary/1,
- fun should_not_read_more_than_pread_limit/1
- ])
- }
- }.
-
-should_not_read_more_than_pread_limit(Fd) ->
- {_, Filepath} = couch_file:process_info(Fd),
- BigBin = list_to_binary(lists:duplicate(100000, 0)),
- {ok, Pos, _Size} = couch_file:append_binary(Fd, BigBin),
- unlink(Fd),
- ExpectedError = {badmatch, {'EXIT', {bad_return_value,
- {exceed_pread_limit, Filepath, 50000}}}},
- ?_assertError(ExpectedError, couch_file:pread_binary(Fd, Pos)).
-
-
-header_test_() ->
- {
- "File header read/write tests",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
- [
- ?foreach([
- fun should_write_and_read_atom_header/1,
- fun should_write_and_read_tuple_header/1,
- fun should_write_and_read_second_header/1,
- fun should_truncate_second_header/1,
- fun should_produce_same_file_size_on_rewrite/1,
- fun should_save_headers_larger_than_block_size/1
- ]),
- should_recover_header_marker_corruption(),
- should_recover_header_size_corruption(),
- should_recover_header_md5sig_corruption(),
- should_recover_header_data_corruption()
- ]
- }
- }.
-
-
-should_write_and_read_atom_header(Fd) ->
- ok = couch_file:write_header(Fd, hello),
- ?_assertMatch({ok, hello}, couch_file:read_header(Fd)).
-
-should_write_and_read_tuple_header(Fd) ->
- ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
- ?_assertMatch({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd)).
-
-should_write_and_read_second_header(Fd) ->
- ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
- ok = couch_file:write_header(Fd, [foo, <<"more">>]),
- ?_assertMatch({ok, [foo, <<"more">>]}, couch_file:read_header(Fd)).
-
-should_truncate_second_header(Fd) ->
- ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
- {ok, Size} = couch_file:bytes(Fd),
- ok = couch_file:write_header(Fd, [foo, <<"more">>]),
- ok = couch_file:truncate(Fd, Size),
- ?_assertMatch({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd)).
-
-should_produce_same_file_size_on_rewrite(Fd) ->
- ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
- {ok, Size1} = couch_file:bytes(Fd),
- ok = couch_file:write_header(Fd, [foo, <<"more">>]),
- {ok, Size2} = couch_file:bytes(Fd),
- ok = couch_file:truncate(Fd, Size1),
- ok = couch_file:write_header(Fd, [foo, <<"more">>]),
- ?_assertMatch({ok, Size2}, couch_file:bytes(Fd)).
-
-should_save_headers_larger_than_block_size(Fd) ->
- Header = erlang:make_tuple(5000, <<"CouchDB">>),
- couch_file:write_header(Fd, Header),
- {"COUCHDB-1319", ?_assertMatch({ok, Header}, couch_file:read_header(Fd))}.
-
-
-should_recover_header_marker_corruption() ->
- ?_assertMatch(
- ok,
- check_header_recovery(
- fun(CouchFd, RawFd, Expect, HeaderPos) ->
- ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
- file:pwrite(RawFd, HeaderPos, <<0>>),
- ?assertMatch(Expect, couch_file:read_header(CouchFd))
- end)
- ).
-
-should_recover_header_size_corruption() ->
- ?_assertMatch(
- ok,
- check_header_recovery(
- fun(CouchFd, RawFd, Expect, HeaderPos) ->
- ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
- % +1 for 0x1 byte marker
- file:pwrite(RawFd, HeaderPos + 1, <<10/integer>>),
- ?assertMatch(Expect, couch_file:read_header(CouchFd))
- end)
- ).
-
-should_recover_header_md5sig_corruption() ->
- ?_assertMatch(
- ok,
- check_header_recovery(
- fun(CouchFd, RawFd, Expect, HeaderPos) ->
- ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
- % +5 = +1 for 0x1 byte and +4 for term size.
- file:pwrite(RawFd, HeaderPos + 5, <<"F01034F88D320B22">>),
- ?assertMatch(Expect, couch_file:read_header(CouchFd))
- end)
- ).
-
-should_recover_header_data_corruption() ->
- ?_assertMatch(
- ok,
- check_header_recovery(
- fun(CouchFd, RawFd, Expect, HeaderPos) ->
- ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
- % +21 = +1 for 0x1 byte, +4 for term size and +16 for MD5 sig
- file:pwrite(RawFd, HeaderPos + 21, <<"some data goes here!">>),
- ?assertMatch(Expect, couch_file:read_header(CouchFd))
- end)
- ).
-
-
-check_header_recovery(CheckFun) ->
- Path = ?tempfile(),
- {ok, Fd} = couch_file:open(Path, [create, overwrite]),
- {ok, RawFd} = file:open(Path, [read, write, raw, binary]),
-
- {ok, _} = write_random_data(Fd),
- ExpectHeader = {some_atom, <<"a binary">>, 756},
- ok = couch_file:write_header(Fd, ExpectHeader),
-
- {ok, HeaderPos} = write_random_data(Fd),
- ok = couch_file:write_header(Fd, {2342, <<"corruption! greed!">>}),
-
- CheckFun(Fd, RawFd, {ok, ExpectHeader}, HeaderPos),
-
- ok = file:close(RawFd),
- ok = couch_file:close(Fd),
- ok.
-
-write_random_data(Fd) ->
- write_random_data(Fd, 100 + couch_rand:uniform(1000)).
-
-write_random_data(Fd, 0) ->
- {ok, Bytes} = couch_file:bytes(Fd),
- {ok, (1 + Bytes div ?BLOCK_SIZE) * ?BLOCK_SIZE};
-write_random_data(Fd, N) ->
- Choices = [foo, bar, <<"bizzingle">>, "bank", ["rough", stuff]],
- Term = lists:nth(couch_rand:uniform(4) + 1, Choices),
- {ok, _, _} = couch_file:append_term(Fd, Term),
- write_random_data(Fd, N - 1).
-
-
-delete_test_() ->
- {
- "File delete tests",
- {
- setup,
- fun() ->
- meck:new(config, [passthrough])
- end,
- fun(_) ->
- meck:unload()
- end,
- {
- foreach,
- fun() ->
- meck:reset([config]),
- File = ?tempfile() ++ ".couch",
- RootDir = filename:dirname(File),
- ok = couch_file:init_delete_dir(RootDir),
- ok = file:write_file(File, <<>>),
- {RootDir, File}
- end,
- fun({_, File}) ->
- file:delete(File)
- end,
- [
- fun(Cfg) ->
- {"enable_database_recovery = false, context = delete",
- make_enable_recovery_test_case(Cfg, false, delete)}
- end,
- fun(Cfg) ->
- {"enable_database_recovery = true, context = delete",
- make_enable_recovery_test_case(Cfg, true, delete)}
- end,
- fun(Cfg) ->
- {"enable_database_recovery = false, context = compaction",
- make_enable_recovery_test_case(Cfg, false, compaction)}
- end,
- fun(Cfg) ->
- {"enable_database_recovery = true, context = compaction",
- make_enable_recovery_test_case(Cfg, true, compaction)}
- end,
- fun(Cfg) ->
- {"delete_after_rename = true",
- make_delete_after_rename_test_case(Cfg, true)}
- end,
- fun(Cfg) ->
- {"delete_after_rename = false",
- make_delete_after_rename_test_case(Cfg, false)}
- end
- ]
- }
- }
- }.
-
-
-make_enable_recovery_test_case({RootDir, File}, EnableRecovery, Context) ->
- meck:expect(config, get_boolean, fun
- ("couchdb", "enable_database_recovery", _) -> EnableRecovery;
- ("couchdb", "delete_after_rename", _) -> false
- end),
- FileExistsBefore = filelib:is_regular(File),
- couch_file:delete(RootDir, File, [{context, Context}]),
- FileExistsAfter = filelib:is_regular(File),
- RenamedFiles = filelib:wildcard(filename:rootname(File) ++ "*.deleted.*"),
- DeletedFiles = filelib:wildcard(RootDir ++ "/.delete/*"),
- {ExpectRenamedCount, ExpectDeletedCount} = if
- EnableRecovery andalso Context =:= delete -> {1, 0};
- true -> {0, 1}
- end,
- [
- ?_assert(FileExistsBefore),
- ?_assertNot(FileExistsAfter),
- ?_assertEqual(ExpectRenamedCount, length(RenamedFiles)),
- ?_assertEqual(ExpectDeletedCount, length(DeletedFiles))
- ].
-
-make_delete_after_rename_test_case({RootDir, File}, DeleteAfterRename) ->
- meck:expect(config, get_boolean, fun
- ("couchdb", "enable_database_recovery", _) -> false;
- ("couchdb", "delete_after_rename", _) -> DeleteAfterRename
- end),
- FileExistsBefore = filelib:is_regular(File),
- couch_file:delete(RootDir, File),
- FileExistsAfter = filelib:is_regular(File),
- RenamedFiles = filelib:wildcard(filename:join([RootDir, ".delete", "*"])),
- ExpectRenamedCount = if DeleteAfterRename -> 0; true -> 1 end,
- [
- ?_assert(FileExistsBefore),
- ?_assertNot(FileExistsAfter),
- ?_assertEqual(ExpectRenamedCount, length(RenamedFiles))
- ].
-
-
-nuke_dir_test_() ->
- {
- "Nuke directory tests",
- {
- setup,
- fun() ->
- meck:new(config, [passthrough])
- end,
- fun(_) ->
- meck:unload()
- end,
- {
- foreach,
- fun() ->
- meck:reset([config]),
- File0 = ?tempfile() ++ ".couch",
- RootDir = filename:dirname(File0),
- BaseName = filename:basename(File0),
- Seed = couch_rand:uniform(8999999999) + 999999999,
- DDocDir = io_lib:format("db.~b_design", [Seed]),
- ViewDir = filename:join([RootDir, DDocDir]),
- file:make_dir(ViewDir),
- File = filename:join([ViewDir, BaseName]),
- file:rename(File0, File),
- ok = couch_file:init_delete_dir(RootDir),
- ok = file:write_file(File, <<>>),
- {RootDir, ViewDir}
- end,
- fun({RootDir, ViewDir}) ->
- remove_dir(ViewDir),
- Ext = filename:extension(ViewDir),
- case filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext) of
- [DelDir] -> remove_dir(DelDir);
- _ -> ok
- end
- end,
- [
- fun(Cfg) ->
- {"enable_database_recovery = false",
- make_rename_dir_test_case(Cfg, false)}
- end,
- fun(Cfg) ->
- {"enable_database_recovery = true",
- make_rename_dir_test_case(Cfg, true)}
- end,
- fun(Cfg) ->
- {"delete_after_rename = true",
- make_delete_dir_test_case(Cfg, true)}
- end,
- fun(Cfg) ->
- {"delete_after_rename = false",
- make_delete_dir_test_case(Cfg, false)}
- end
- ]
- }
- }
- }.
-
-
-make_rename_dir_test_case({RootDir, ViewDir}, EnableRecovery) ->
- meck:expect(config, get_boolean, fun
- ("couchdb", "enable_database_recovery", _) -> EnableRecovery;
- ("couchdb", "delete_after_rename", _) -> true;
- (_, _, Default) -> Default
- end),
- DirExistsBefore = filelib:is_dir(ViewDir),
- couch_file:nuke_dir(RootDir, ViewDir),
- DirExistsAfter = filelib:is_dir(ViewDir),
- Ext = filename:extension(ViewDir),
- RenamedDirs = filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext),
- ExpectRenamedCount = if EnableRecovery -> 1; true -> 0 end,
- [
- ?_assert(DirExistsBefore),
- ?_assertNot(DirExistsAfter),
- ?_assertEqual(ExpectRenamedCount, length(RenamedDirs))
- ].
-
-make_delete_dir_test_case({RootDir, ViewDir}, DeleteAfterRename) ->
- meck:expect(config, get_boolean, fun
- ("couchdb", "enable_database_recovery", _) -> false;
- ("couchdb", "delete_after_rename", _) -> DeleteAfterRename;
- (_, _, Default) -> Default
- end),
- DirExistsBefore = filelib:is_dir(ViewDir),
- couch_file:nuke_dir(RootDir, ViewDir),
- DirExistsAfter = filelib:is_dir(ViewDir),
- Ext = filename:extension(ViewDir),
- RenamedDirs = filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext),
- RenamedFiles = filelib:wildcard(RootDir ++ "/.delete/*"),
- ExpectRenamedCount = if DeleteAfterRename -> 0; true -> 1 end,
- [
- ?_assert(DirExistsBefore),
- ?_assertNot(DirExistsAfter),
- ?_assertEqual(0, length(RenamedDirs)),
- ?_assertEqual(ExpectRenamedCount, length(RenamedFiles))
- ].
-
-remove_dir(Dir) ->
- [file:delete(File) || File <- filelib:wildcard(filename:join([Dir, "*"]))],
- file:del_dir(Dir).
-
-
-fsync_error_test_() ->
- {
- "Test fsync raises errors",
- {
- setup,
- fun() ->
- test_util:start(?MODULE, [ioq])
- end,
- fun(Ctx) ->
- test_util:stop(Ctx)
- end,
- [
- fun fsync_raises_errors/0
- ]
- }
- }.
-
-
-fsync_raises_errors() ->
- Fd = spawn(fun() -> fake_fsync_fd() end),
- ?assertError({fsync_error, eio}, couch_file:sync(Fd)).
-
-
-fake_fsync_fd() ->
- % Mocking gen_server did not go very
- % well so faking the couch_file pid
- % will have to do.
- receive
- {'$gen_call', From, sync} ->
- gen:reply(From, {error, eio})
- end.
diff --git a/src/couch/test/eunit/couch_flags_config_tests.erl b/src/couch/test/eunit/couch_flags_config_tests.erl
index ed7df1123..63fabfdde 100644
--- a/src/couch/test/eunit/couch_flags_config_tests.erl
+++ b/src/couch/test/eunit/couch_flags_config_tests.erl
@@ -98,10 +98,8 @@ test_config() ->
parse_flags_term_test_() ->
LongBinary = binary:copy(<<"a">>, ?MAX_FLAG_NAME_LENGTH + 1),
- ExpectedError = {error, {"Cannot parse list of tags: ~n~p",
- [{too_long, LongBinary}]}},
- ExpectedUnknownError = {error,{"Cannot parse list of tags: ~n~p",
- [{invalid_flag,<<"dddddddd">>}]}},
+ ExpectedError = {error, [{too_long, LongBinary}]},
+ ExpectedUnknownError = {error, [{invalid_flag,<<"dddddddd">>}]},
[
{"empty binary", ?_assertEqual(
[], couch_flags_config:parse_flags_term(<<>>))},
diff --git a/src/couch/test/eunit/couch_index_tests.erl b/src/couch/test/eunit/couch_index_tests.erl
deleted file mode 100644
index 23c857d6c..000000000
--- a/src/couch/test/eunit/couch_index_tests.erl
+++ /dev/null
@@ -1,232 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include_lib("stdlib/include/ms_transform.hrl").
-
--define(TIMEOUT, 1000).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- create_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
- tracer_new(),
- DbName.
-
-teardown(DbName) ->
- tracer_delete(),
- couch_server:delete(DbName, [?ADMIN_CTX]).
-
-couch_index_ioq_priority_test_() ->
- {
- "Test ioq_priority for views",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun check_io_priority_for_updater/1,
- fun check_io_priority_for_compactor/1
- ]
- }
- }
- }.
-
-
-check_io_priority_for_updater(DbName) ->
- ?_test(begin
- {ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>),
- CouchIndexUpdaterPid = updater_pid(IndexerPid),
- tracer_record(CouchIndexUpdaterPid),
-
- create_docs(DbName),
-
- CommittedSeq = couch_util:with_db(DbName, fun(Db) -> couch_db:get_update_seq(Db) end),
- couch_index:get_state(IndexerPid, CommittedSeq),
- [UpdaterPid] = wait_spawn_event_for_pid(CouchIndexUpdaterPid),
-
- [UpdaterMapProcess] = wait_spawn_by_anonymous_fun(
- UpdaterPid, '-start_update/4-fun-0-'),
-
- ?assert(wait_set_io_priority(
- UpdaterMapProcess, {view_update, DbName, <<"_design/foo">>})),
-
- [UpdaterWriterProcess] = wait_spawn_by_anonymous_fun(
- UpdaterPid, '-start_update/4-fun-1-'),
- ?assert(wait_set_io_priority(
- UpdaterWriterProcess, {view_update, DbName, <<"_design/foo">>})),
-
- ok
- end).
-
-check_io_priority_for_compactor(DbName) ->
- ?_test(begin
- {ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>),
- {ok, CompactorPid} = couch_index:get_compactor_pid(IndexerPid),
- tracer_record(CompactorPid),
-
- create_docs(DbName),
-
- couch_index:compact(IndexerPid),
- wait_spawn_event_for_pid(CompactorPid),
-
- [CompactorProcess] = wait_spawn_by_anonymous_fun(
- CompactorPid, '-handle_call/3-fun-0-'),
- ?assert(wait_set_io_priority(
- CompactorProcess, {view_compact, DbName, <<"_design/foo">>})),
- ok
- end).
-
-create_docs(DbName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 1}
-
- ]}),
- Doc2 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc2">>},
- {<<"value">>, 2}
-
- ]}),
- Doc3 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc3">>},
- {<<"value">>, 3}
-
- ]}),
- {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]),
- couch_db:close(Db).
-
-create_design_doc(DbName, DDName, ViewName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {ViewName, {[
- {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>}
- ]}}
- ]}}
- ]}),
- {ok, Rev} = couch_db:update_doc(Db, DDoc, []),
- couch_db:close(Db),
- Rev.
-
-wait_set_io_priority(Pid, IOPriority) ->
- test_util:wait_value(fun() ->
- does_process_set_io_priority(Pid, IOPriority)
- end, true).
-
-does_process_set_io_priority(Pid, IOPriority) ->
- PutCallsArgs = find_calls_to_fun(Pid, {erlang, put, 2}),
- lists:any(fun([_, Priority]) -> Priority =:= IOPriority end, PutCallsArgs).
-
-wait_events(MatchSpec) ->
- test_util:wait_other_value(fun() -> select(MatchSpec) end, []).
-
-find_spawned_by_anonymous_fun(ParentPid, Name) ->
- AnonymousFuns = select(ets:fun2ms(fun
- ({spawned, Pid, _TS, _Name, _Dict, [PPid, {erlang, apply, [Fun, _]}]})
- when is_function(Fun) andalso PPid =:= ParentPid -> {Pid, Fun}
- end)),
- lists:filtermap(fun({Pid, Fun}) ->
- case erlang:fun_info(Fun, name) of
- {name, Name} -> {true, Pid};
- _ -> false
- end
- end, AnonymousFuns).
-
-find_calls_to_fun(Pid, {Module, Function, Arity}) ->
- select(ets:fun2ms(fun
- ({call, P, _TS, _Name, _Dict, [{M, F, Args}]})
- when length(Args) =:= Arity
- andalso M =:= Module
- andalso F =:= Function
- andalso P =:= Pid
- -> Args
- end)).
-
-wait_spawn_event_for_pid(ParentPid) ->
- wait_events(ets:fun2ms(fun
- ({spawned, Pid, _TS, _Name, _Dict, [P, _]}) when P =:= ParentPid -> Pid
- end)).
-
-wait_spawn_by_anonymous_fun(ParentPid, Name) ->
- test_util:wait_other_value(fun() ->
- find_spawned_by_anonymous_fun(ParentPid, Name)
- end, []).
-
-updater_pid(IndexerPid) ->
- {links, Links} = process_info(IndexerPid, links),
- [Pid] = select_process_by_name_prefix(Links, "couch_index_updater:init/1"),
- Pid.
-
-select_process_by_name_prefix(Pids, Name) ->
- lists:filter(fun(Pid) ->
- Key = couch_debug:process_name(Pid),
- string:str(Key, Name) =:= 1
- end, Pids).
-
-select(MatchSpec) ->
- lists:filtermap(fun(Event) ->
- case ets:test_ms(Event, MatchSpec) of
- {ok, false} -> false;
- {ok, Result} -> {true, Result};
- _ -> false
- end
- end, tracer_events()).
-
-
-%% ========================
-%% Tracer related functions
-%% ------------------------
-tracer_new() ->
- ets:new(?MODULE, [public, named_table]),
- {ok, _Tracer} = dbg:tracer(process, {fun tracer_collector/2, 0}),
- ok.
-
-tracer_delete() ->
- dbg:stop_clear(),
- (catch ets:delete(?MODULE)),
- ok.
-
-tracer_record(Pid) ->
- {ok, _} = dbg:tp(erlang, put, x),
- {ok, _} = dbg:p(Pid, [c, p, sos]),
- ok.
-
-tracer_events() ->
- Events = [{Idx, E} || [Idx, E] <- ets:match(?MODULE, {{trace, '$1'}, '$2'})],
- {_, Sorted} = lists:unzip(lists:keysort(1, Events)),
- Sorted.
-
-tracer_collector(Msg, Seq) ->
- ets:insert(?MODULE, {{trace, Seq}, normalize_trace_msg(Msg)}),
- Seq + 1.
-
-normalize_trace_msg(TraceMsg) ->
- case tuple_to_list(TraceMsg) of
- [trace_ts, Pid, Type | Info] ->
- {TraceInfo, [Timestamp]} = lists:split(length(Info)-1, Info),
- {Type, Pid, Timestamp, couch_debug:process_name(Pid), process_info(Pid), TraceInfo};
- [trace, Pid, Type | TraceInfo] ->
- {Type, Pid, os:timestamp(), couch_debug:process_name(Pid), process_info(Pid), TraceInfo}
- end.
diff --git a/src/couch/test/eunit/couch_js_tests.erl b/src/couch/test/eunit/couch_js_tests.erl
index 693cd9772..c68d60125 100644
--- a/src/couch/test/eunit/couch_js_tests.erl
+++ b/src/couch/test/eunit/couch_js_tests.erl
@@ -150,8 +150,9 @@ should_exit_on_oom() ->
"var state = [];\n"
"function(doc) {\n"
" var val = \"0123456789ABCDEF\";\n"
- " for(var i = 0; i < 165535; i++) {\n"
+ " for(var i = 0; i < 665535; i++) {\n"
" state.push([val, val]);\n"
+ " emit(null, null);\n"
" }\n"
"}\n"
>>,
diff --git a/src/couch/test/eunit/couch_query_servers_tests.erl b/src/couch/test/eunit/couch_query_servers_tests.erl
index f8df896c4..46a8474cc 100644
--- a/src/couch/test/eunit/couch_query_servers_tests.erl
+++ b/src/couch/test/eunit/couch_query_servers_tests.erl
@@ -12,6 +12,7 @@
-module(couch_query_servers_tests).
+-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_eunit.hrl").
@@ -23,6 +24,15 @@ teardown(_) ->
meck:unload().
+setup_oom() ->
+ test_util:start_couch().
+
+
+teardown_oom(Ctx) ->
+ meck:unload(),
+ test_util:stop_couch(Ctx).
+
+
sum_overflow_test_() ->
{
"Test overflow detection in the _sum reduce function",
@@ -39,6 +49,19 @@ sum_overflow_test_() ->
}.
+filter_oom_test_() ->
+{
+ "Test recovery from oom in filters",
+ {
+ setup,
+ fun setup_oom/0,
+ fun teardown_oom/1,
+ [
+ fun should_split_large_batches/0
+ ]
+ }
+}.
+
should_return_error_on_overflow() ->
meck:reset([config, couch_log]),
meck:expect(
@@ -85,6 +108,38 @@ should_return_object_on_false() ->
?assertNot(meck:called(couch_log, error, '_')).
+should_split_large_batches() ->
+ Req = {json_req, {[]}},
+ Db = undefined,
+ DDoc = #doc{
+ id = <<"_design/foo">>,
+ revs = {0, [<<"bork bork bork">>]},
+ body = {[
+ {<<"filters">>, {[
+ {<<"bar">>, <<"function(req, doc) {return true;}">>}
+ ]}}
+ ]}
+ },
+ FName = <<"bar">>,
+ Docs = [
+ #doc{id = <<"a">>, body = {[]}},
+ #doc{id = <<"b">>, body = {[]}}
+ ],
+ meck:new(couch_os_process, [passthrough]),
+ meck:expect(couch_os_process, prompt, fun(Pid, Data) ->
+ case Data of
+ [<<"ddoc">>, _, [<<"filters">>, <<"bar">>], [[_, _], _]] ->
+ throw({os_process_error, {exit_status, 1}});
+ [<<"ddoc">>, _, [<<"filters">>, <<"bar">>], [[_], _]] ->
+ [true, [split_batch]];
+ _ ->
+ meck:passthrough([Pid, Data])
+ end
+ end),
+ {ok, Ret} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs),
+ ?assertEqual([split_batch, split_batch], Ret).
+
+
gen_sum_kvs() ->
lists:map(fun(I) ->
Props = lists:map(fun(_) ->
diff --git a/src/couch/test/eunit/couch_server_tests.erl b/src/couch/test/eunit/couch_server_tests.erl
deleted file mode 100644
index 7d50700d2..000000000
--- a/src/couch/test/eunit/couch_server_tests.erl
+++ /dev/null
@@ -1,294 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_server_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--include("../src/couch_db_int.hrl").
--include("../src/couch_server_int.hrl").
-
-start() ->
- Ctx = test_util:start_couch(),
- config:set("log", "include_sasl", "false", false),
- Ctx.
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, []),
- Db.
-
-setup(rename) ->
- config:set("couchdb", "enable_database_recovery", "true", false),
- setup();
-setup(_) ->
- setup().
-
-teardown(Db) ->
- FilePath = couch_db:get_filepath(Db),
- (catch couch_db:close(Db)),
- (catch file:delete(FilePath)).
-
-teardown(rename, Db) ->
- config:set("couchdb", "enable_database_recovery", "false", false),
- teardown(Db);
-teardown(_, Db) ->
- teardown(Db).
-
-
-delete_db_test_() ->
- {
- "Test for proper deletion of db file",
- {
- setup,
- fun start/0, fun test_util:stop/1,
- [
- make_test_case(rename, [fun should_rename_on_delete/2]),
- make_test_case(delete, [fun should_delete/2])
- ]
- }
- }.
-
-make_test_case(Mod, Funs) ->
- {
- lists:flatten(io_lib:format("~s", [Mod])),
- {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]}
- }.
-
-should_rename_on_delete(_, Db) ->
- DbName = couch_db:name(Db),
- Origin = couch_db:get_filepath(Db),
- ?_test(begin
- ?assert(filelib:is_regular(Origin)),
- ?assertMatch(ok, couch_server:delete(DbName, [])),
- ?assertNot(filelib:is_regular(Origin)),
- DeletedFiles = deleted_files(Origin),
- ?assertMatch([_], DeletedFiles),
- [Renamed] = DeletedFiles,
- ?assertEqual(
- filename:extension(Origin), filename:extension(Renamed)),
- ?assert(filelib:is_regular(Renamed))
- end).
-
-should_delete(_, Db) ->
- DbName = couch_db:name(Db),
- Origin = couch_db:get_filepath(Db),
- ?_test(begin
- ?assert(filelib:is_regular(Origin)),
- ?assertMatch(ok, couch_server:delete(DbName, [])),
- ?assertNot(filelib:is_regular(Origin)),
- ?assertMatch([], deleted_files(Origin))
- end).
-
-deleted_files(ViewFile) ->
- filelib:wildcard(filename:rootname(ViewFile) ++ "*.deleted.*").
-
-
-bad_engine_option_test_() ->
- {
- setup,
- fun start/0,
- fun test_util:stop/1,
- [
- fun t_bad_engine_option/0
- ]
- }.
-
-
-t_bad_engine_option() ->
- Resp = couch_server:create(?tempdb(), [{engine, <<"cowabunga!">>}]),
- ?assertEqual(Resp, {error, {invalid_engine_extension, <<"cowabunga!">>}}).
-
-
-get_engine_path_test_() ->
- {
- setup,
- fun start/0, fun test_util:stop/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_return_engine_path/1,
- fun should_return_invalid_engine_error/1
- ]
- }
- }.
-
-
-should_return_engine_path(Db) ->
- DbName = couch_db:name(Db),
- Engine = couch_db_engine:get_engine(Db),
- Resp = couch_server:get_engine_path(DbName, Engine),
- FilePath = couch_db:get_filepath(Db),
- ?_assertMatch({ok, FilePath}, Resp).
-
-
-should_return_invalid_engine_error(Db) ->
- DbName = couch_db:name(Db),
- Engine = fake_engine,
- Resp = couch_server:get_engine_path(DbName, Engine),
- ?_assertMatch({error, {invalid_engine, Engine}}, Resp).
-
-
-interleaved_requests_test_() ->
- {
- setup,
- fun start_interleaved/0,
- fun stop_interleaved/1,
- fun make_interleaved_requests/1
- }.
-
-
-start_interleaved() ->
- TestDbName = ?tempdb(),
- meck:new(couch_db, [passthrough]),
- meck:expect(couch_db, start_link, fun(Engine, DbName, Filename, Options) ->
- case DbName of
- TestDbName ->
- receive
- go -> ok
- end,
- Res = meck:passthrough([Engine, DbName, Filename, Options]),
- % We're unlinking and sending a delayed
- % EXIT signal so that we can mimic a specific
- % message order in couch_server. On a test machine
- % this is a big race condition which affects the
- % ability to induce the bug.
- case Res of
- {ok, Db} ->
- DbPid = couch_db:get_pid(Db),
- unlink(DbPid),
- Msg = {'EXIT', DbPid, killed},
- erlang:send_after(2000, whereis(couch_server), Msg);
- _ ->
- ok
- end,
- Res;
- _ ->
- meck:passthrough([Engine, DbName, Filename, Options])
- end
- end),
- {test_util:start_couch(), TestDbName}.
-
-
-stop_interleaved({Ctx, TestDbName}) ->
- couch_server:delete(TestDbName, [?ADMIN_CTX]),
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-
-make_interleaved_requests({_, TestDbName}) ->
- [
- fun() -> t_interleaved_create_delete_open(TestDbName) end
- ].
-
-
-t_interleaved_create_delete_open(DbName) ->
- {CrtRef, OpenRef} = {make_ref(), make_ref()},
- CrtMsg = {'$gen_call', {self(), CrtRef}, {create, DbName, [?ADMIN_CTX]}},
- FakePid = spawn(fun() -> ok end),
- OpenResult = {open_result, DbName, {ok, #db{main_pid = FakePid}}},
- OpenResultMsg = {'$gen_call', {self(), OpenRef}, OpenResult},
-
- % Get the current couch_server pid so we're sure
- % to not end up messaging two different pids
- CouchServer = whereis(couch_server),
-
- % Start our first instance that will succeed in
- % an invalid state. Notice that the opener pid
- % spawned by couch_server:open_async/5 will halt
- % in our meck expect function waiting for a message.
- %
- % We're using raw message passing here so that we don't
- % have to coordinate multiple processes for this test.
- CouchServer ! CrtMsg,
- {ok, Opener} = get_opener_pid(DbName),
-
- % We have to suspend couch_server so that we can enqueue
- % our next requests and let the opener finish processing.
- erlang:suspend_process(CouchServer),
-
- % We queue a confused open_result message in front of
- % the correct response from the opener.
- CouchServer ! OpenResultMsg,
-
- % Release the opener pid so it can continue
- Opener ! go,
-
- % Wait for the '$gen_call' message from OpenerPid to arrive
- % in couch_server's mailbox
- ok = wait_for_open_async_result(CouchServer, Opener),
-
- % Now monitor and resume the couch_server and assert that
- % couch_server does not crash while processing OpenResultMsg
- CSRef = erlang:monitor(process, CouchServer),
- erlang:resume_process(CouchServer),
- check_monitor_not_triggered(CSRef),
-
- % Our open_result message was processed and ignored
- ?assertEqual({OpenRef, ok}, get_next_message()),
-
- % Our create request was processed normally after we
- % ignored the spurious open_result
- ?assertMatch({CrtRef, {ok, _}}, get_next_message()),
-
- % And finally assert that couch_server is still
- % alive.
- ?assert(is_process_alive(CouchServer)),
- check_monitor_not_triggered(CSRef).
-
-
-get_opener_pid(DbName) ->
- WaitFun = fun() ->
- case ets:lookup(couch_dbs, DbName) of
- [#entry{pid = Pid}] ->
- {ok, Pid};
- [] ->
- wait
- end
- end,
- test_util:wait(WaitFun).
-
-
-wait_for_open_async_result(CouchServer, Opener) ->
- WaitFun = fun() ->
- {_, Messages} = erlang:process_info(CouchServer, messages),
- Found = lists:foldl(fun(Msg, Acc) ->
- case Msg of
- {'$gen_call', {Opener, _}, {open_result, _, {ok, _}}} ->
- true;
- _ ->
- Acc
- end
- end, false, Messages),
- if Found -> ok; true -> wait end
- end,
- test_util:wait(WaitFun).
-
-
-check_monitor_not_triggered(Ref) ->
- receive
- {'DOWN', Ref, _, _, Reason0} ->
- erlang:error({monitor_triggered, Reason0})
- after 100 ->
- ok
- end.
-
-
-get_next_message() ->
- receive
- Msg ->
- Msg
- after 5000 ->
- erlang:error(timeout)
- end.
diff --git a/src/couch/test/eunit/couch_stream_tests.erl b/src/couch/test/eunit/couch_stream_tests.erl
deleted file mode 100644
index a7fedf0af..000000000
--- a/src/couch/test/eunit/couch_stream_tests.erl
+++ /dev/null
@@ -1,124 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_stream_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--define(ENGINE(FdVar), {couch_bt_engine_stream, {FdVar, []}}).
-
-setup() ->
- {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- {ok, Stream} = couch_stream:open(?ENGINE(Fd), []),
- {Fd, Stream}.
-
-teardown({Fd, _}) ->
- ok = couch_file:close(Fd).
-
-
-stream_test_() ->
- {
- "CouchDB stream tests",
- {
- setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_write/1,
- fun should_write_consecutive/1,
- fun should_write_empty_binary/1,
- fun should_return_file_pointers_on_close/1,
- fun should_return_stream_size_on_close/1,
- fun should_return_valid_pointers/1,
- fun should_recall_last_pointer_position/1,
- fun should_stream_more_with_4K_chunk_size/1,
- fun should_stop_on_normal_exit_of_stream_opener/1
- ]
- }
- }
- }.
-
-
-should_write({_, Stream}) ->
- ?_assertEqual(ok, couch_stream:write(Stream, <<"food">>)).
-
-should_write_consecutive({_, Stream}) ->
- couch_stream:write(Stream, <<"food">>),
- ?_assertEqual(ok, couch_stream:write(Stream, <<"foob">>)).
-
-should_write_empty_binary({_, Stream}) ->
- ?_assertEqual(ok, couch_stream:write(Stream, <<>>)).
-
-should_return_file_pointers_on_close({_, Stream}) ->
- couch_stream:write(Stream, <<"foodfoob">>),
- {NewEngine, _, _, _, _} = couch_stream:close(Stream),
- {ok, Ptrs} = couch_stream:to_disk_term(NewEngine),
- ?_assertEqual([{0, 8}], Ptrs).
-
-should_return_stream_size_on_close({_, Stream}) ->
- couch_stream:write(Stream, <<"foodfoob">>),
- {_, Length, _, _, _} = couch_stream:close(Stream),
- ?_assertEqual(8, Length).
-
-should_return_valid_pointers({_Fd, Stream}) ->
- couch_stream:write(Stream, <<"foodfoob">>),
- {NewEngine, _, _, _, _} = couch_stream:close(Stream),
- ?_assertEqual(<<"foodfoob">>, read_all(NewEngine)).
-
-should_recall_last_pointer_position({Fd, Stream}) ->
- couch_stream:write(Stream, <<"foodfoob">>),
- {_, _, _, _, _} = couch_stream:close(Stream),
- {ok, ExpPtr} = couch_file:bytes(Fd),
- {ok, Stream2} = couch_stream:open(?ENGINE(Fd)),
- ZeroBits = <<0:(8 * 10)>>,
- OneBits = <<1:(8 * 10)>>,
- ok = couch_stream:write(Stream2, OneBits),
- ok = couch_stream:write(Stream2, ZeroBits),
- {NewEngine, 20, _, _, _} = couch_stream:close(Stream2),
- {ok, Ptrs} = couch_stream:to_disk_term(NewEngine),
- [{ExpPtr, 20}] = Ptrs,
- AllBits = iolist_to_binary([OneBits, ZeroBits]),
- ?_assertEqual(AllBits, read_all(NewEngine)).
-
-should_stream_more_with_4K_chunk_size({Fd, _}) ->
- {ok, Stream} = couch_stream:open(?ENGINE(Fd), [{buffer_size, 4096}]),
- lists:foldl(
- fun(_, Acc) ->
- Data = <<"a1b2c">>,
- couch_stream:write(Stream, Data),
- [Data | Acc]
- end, [], lists:seq(1, 1024)),
- {NewEngine, Length, _, _, _} = couch_stream:close(Stream),
- {ok, Ptrs} = couch_stream:to_disk_term(NewEngine),
- ?_assertMatch({[{0, 4100}, {4106, 1020}], 5120}, {Ptrs, Length}).
-
-should_stop_on_normal_exit_of_stream_opener({Fd, _}) ->
- RunnerPid = self(),
- OpenerPid = spawn(
- fun() ->
- {ok, StreamPid} = couch_stream:open(?ENGINE(Fd)),
- RunnerPid ! {pid, StreamPid}
- end),
- StreamPid = receive
- {pid, StreamPid0} -> StreamPid0
- end,
- % Confirm the validity of the test by verifying the stream opener has died
- ?assertNot(is_process_alive(OpenerPid)),
- % Verify the stream itself has also died
- ?_assertNot(is_process_alive(StreamPid)).
-
-
-read_all(Engine) ->
- Data = couch_stream:foldl(Engine, fun(Bin, Acc) -> [Bin, Acc] end, []),
- iolist_to_binary(Data).
diff --git a/src/couch/test/eunit/couch_task_status_tests.erl b/src/couch/test/eunit/couch_task_status_tests.erl
deleted file mode 100644
index 0ec03563b..000000000
--- a/src/couch/test/eunit/couch_task_status_tests.erl
+++ /dev/null
@@ -1,233 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_task_status_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-
-setup() ->
- Ctx = test_util:start(?MODULE, [couch_log], [{dont_mock, [config]}]),
- {ok, TaskStatusPid} = couch_task_status:start_link(),
- TaskUpdaterPid = spawn(fun() -> loop() end),
- {TaskStatusPid, TaskUpdaterPid, Ctx}.
-
-
-teardown({TaskStatusPid, _, Ctx})->
- test_util:stop_sync_throw(TaskStatusPid, fun() ->
- couch_task_status:stop()
- end, timeout_error, ?TIMEOUT),
- test_util:stop(Ctx).
-
-
-couch_task_status_test_() ->
- {
- "CouchDB task status updates",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_register_task/1,
- fun should_set_task_startup_time/1,
- fun should_have_update_time_as_startup_before_any_progress/1,
- fun should_set_task_type/1,
- fun should_not_register_multiple_tasks_for_same_pid/1,
- fun should_set_task_progress/1,
- fun should_update_task_progress/1,
- fun should_update_time_changes_on_task_progress/1,
- %% fun should_control_update_frequency/1,
- fun should_reset_control_update_frequency/1,
- fun should_track_multiple_tasks/1,
- fun should_finish_task/1
-
- ]
- }
- }.
-
-
-should_register_task({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?_assertEqual(1, length(couch_task_status:all())).
-
-should_set_task_startup_time({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?_assert(is_integer(get_task_prop(Pid, started_on))).
-
-should_have_update_time_as_startup_before_any_progress({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- StartTime = get_task_prop(Pid, started_on),
- ?_assertEqual(StartTime, get_task_prop(Pid, updated_on)).
-
-should_set_task_type({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?_assertEqual(replication, get_task_prop(Pid, type)).
-
-should_not_register_multiple_tasks_for_same_pid({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?_assertEqual({add_task_error, already_registered},
- call(Pid, add, [{type, compaction}, {progress, 0}])).
-
-should_set_task_progress({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?_assertEqual(0, get_task_prop(Pid, progress)).
-
-should_update_task_progress({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- call(Pid, update, [{progress, 25}]),
- ?_assertEqual(25, get_task_prop(Pid, progress)).
-
-should_update_time_changes_on_task_progress({_, Pid, _Ctx}) ->
- ?_assert(
- begin
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ok = timer:sleep(1000), % sleep awhile to customize update time
- call(Pid, update, [{progress, 25}]),
- get_task_prop(Pid, updated_on) > get_task_prop(Pid, started_on)
- end).
-
-%%should_control_update_frequency({_, Pid, _Ctx}) ->
-%% ?_assertEqual(66,
-%% begin
-%% ok = call(Pid, add, [{type, replication}, {progress, 0}]),
-%% call(Pid, update, [{progress, 50}]),
-%% call(Pid, update_frequency, 500),
-%% call(Pid, update, [{progress, 66}]),
-%% call(Pid, update, [{progress, 77}]),
-%% get_task_prop(Pid, progress)
-%% end).
-
-should_reset_control_update_frequency({_, Pid, _Ctx}) ->
- ?_assertEqual(87,
- begin
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- call(Pid, update, [{progress, 50}]),
- call(Pid, update_frequency, 500),
- call(Pid, update, [{progress, 66}]),
- call(Pid, update, [{progress, 77}]),
- call(Pid, update_frequency, 0),
- call(Pid, update, [{progress, 87}]),
- get_task_prop(Pid, progress)
- end).
-
-should_track_multiple_tasks(_) ->
- ?_assert(run_multiple_tasks()).
-
-should_finish_task({_, Pid, _Ctx}) ->
- ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?assertEqual(1, length(couch_task_status:all())),
- ok = call(Pid, done),
- ?_assertEqual(0, length(couch_task_status:all())).
-
-
-run_multiple_tasks() ->
- Pid1 = spawn(fun() -> loop() end),
- Pid2 = spawn(fun() -> loop() end),
- Pid3 = spawn(fun() -> loop() end),
- call(Pid1, add, [{type, replication}, {progress, 0}]),
- call(Pid2, add, [{type, compaction}, {progress, 0}]),
- call(Pid3, add, [{type, indexer}, {progress, 0}]),
-
- ?assertEqual(3, length(couch_task_status:all())),
- ?assertEqual(replication, get_task_prop(Pid1, type)),
- ?assertEqual(compaction, get_task_prop(Pid2, type)),
- ?assertEqual(indexer, get_task_prop(Pid3, type)),
-
- call(Pid2, update, [{progress, 33}]),
- call(Pid3, update, [{progress, 42}]),
- call(Pid1, update, [{progress, 11}]),
- ?assertEqual(42, get_task_prop(Pid3, progress)),
- call(Pid1, update, [{progress, 72}]),
- ?assertEqual(72, get_task_prop(Pid1, progress)),
- ?assertEqual(33, get_task_prop(Pid2, progress)),
-
- call(Pid1, done),
- ?assertEqual(2, length(couch_task_status:all())),
- call(Pid3, done),
- ?assertEqual(1, length(couch_task_status:all())),
- call(Pid2, done),
- ?assertEqual(0, length(couch_task_status:all())),
-
- true.
-
-
-loop() ->
- receive
- {add, Props, From} ->
- Resp = couch_task_status:add_task(Props),
- From ! {ok, self(), Resp},
- loop();
- {update, Props, From} ->
- Resp = couch_task_status:update(Props),
- From ! {ok, self(), Resp},
- loop();
- {update_frequency, Msecs, From} ->
- Resp = couch_task_status:set_update_frequency(Msecs),
- From ! {ok, self(), Resp},
- loop();
- {done, From} ->
- From ! {ok, self(), ok}
- end.
-
-call(Pid, done) ->
- Ref = erlang:monitor(process, Pid),
- Pid ! {done, self()},
- Res = wait(Pid),
- receive
- {'DOWN', Ref, _Type, Pid, _Info} ->
- Res
- after ?TIMEOUT ->
- throw(timeout_error)
- end;
-call(Pid, Command) ->
- Pid ! {Command, self()},
- wait(Pid).
-
-call(Pid, Command, Arg) ->
- Pid ! {Command, Arg, self()},
- wait(Pid).
-
-wait(Pid) ->
- receive
- {ok, Pid, Msg} ->
- Msg
- after ?TIMEOUT ->
- throw(timeout_error)
- end.
-
-get_task_prop(Pid, Prop) ->
- From = list_to_binary(pid_to_list(Pid)),
- Element = lists:foldl(
- fun(PropList, Acc) ->
- case couch_util:get_value(pid, PropList) of
- From ->
- [PropList | Acc];
- _ ->
- Acc
- end
- end,
- [], couch_task_status:all()
- ),
- case couch_util:get_value(Prop, hd(Element), nil) of
- nil ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Could not get property '"
- ++ couch_util:to_list(Prop)
- ++ "' for task "
- ++ pid_to_list(Pid)}]});
- Value ->
- Value
- end.
diff --git a/src/couch/test/eunit/couchdb_attachments_tests.erl b/src/couch/test/eunit/couchdb_attachments_tests.erl
deleted file mode 100644
index 04859dbc9..000000000
--- a/src/couch/test/eunit/couchdb_attachments_tests.erl
+++ /dev/null
@@ -1,765 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_attachments_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--define(COMPRESSION_LEVEL, 8).
--define(ATT_BIN_NAME, <<"logo.png">>).
--define(ATT_TXT_NAME, <<"file.erl">>).
--define(FIXTURE_PNG, filename:join([?FIXTURESDIR, "logo.png"])).
--define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
--define(TIMEOUT, 5000).
--define(TIMEOUT_EUNIT, 100).
--define(TIMEWAIT, 1000).
--define(i2l(I), integer_to_list(I)).
-
-
-start() ->
- Ctx = test_util:start_couch(),
- % ensure in default compression settings for attachments_compression_tests
- config:set("attachments", "compression_level",
- ?i2l(?COMPRESSION_LEVEL), false),
- config:set("attachments", "compressible_types", "text/*", false),
- Ctx.
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, []),
- ok = couch_db:close(Db),
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(couch_httpd, port),
- Host = Addr ++ ":" ++ ?i2l(Port),
- {Host, ?b2l(DbName)}.
-
-setup({binary, standalone}) ->
- {Host, DbName} = setup(),
- setup_att(fun create_standalone_png_att/2, Host, DbName, ?FIXTURE_PNG);
-setup({text, standalone}) ->
- {Host, DbName} = setup(),
- setup_att(fun create_standalone_text_att/2, Host, DbName, ?FIXTURE_TXT);
-setup({binary, inline}) ->
- {Host, DbName} = setup(),
- setup_att(fun create_inline_png_att/2, Host, DbName, ?FIXTURE_PNG);
-setup({text, inline}) ->
- {Host, DbName} = setup(),
- setup_att(fun create_inline_text_att/2, Host, DbName, ?FIXTURE_TXT);
-setup(compressed) ->
- {Host, DbName} = setup(),
- setup_att(fun create_already_compressed_att/2, Host, DbName, ?FIXTURE_TXT).
-setup_att(Fun, Host, DbName, File) ->
- HttpHost = "http://" ++ Host,
- AttUrl = Fun(HttpHost, DbName),
- {ok, Data} = file:read_file(File),
- DocUrl = string:join([HttpHost, DbName, "doc"], "/"),
- Helpers = {DbName, DocUrl, AttUrl},
- {Data, Helpers}.
-
-teardown(_, {_, {DbName, _, _}}) ->
- teardown(DbName).
-
-teardown({_, DbName}) ->
- teardown(DbName);
-teardown(DbName) ->
- ok = couch_server:delete(?l2b(DbName), []),
- ok.
-
-
-attachments_test_() ->
- {
- "Attachments tests",
- {
- setup,
- fun start/0, fun test_util:stop_couch/1,
- [
- attachments_md5_tests(),
- attachments_compression_tests()
- ]
- }
- }.
-
-attachments_md5_tests() ->
- {
- "Attachments MD5 tests",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_upload_attachment_without_md5/1,
- fun should_upload_attachment_by_chunks_without_md5/1,
- fun should_upload_attachment_with_valid_md5_header/1,
- fun should_upload_attachment_by_chunks_with_valid_md5_header/1,
- fun should_upload_attachment_by_chunks_with_valid_md5_trailer/1,
- fun should_reject_attachment_with_invalid_md5/1,
- fun should_reject_chunked_attachment_with_invalid_md5/1,
- fun should_reject_chunked_attachment_with_invalid_md5_trailer/1
- ]
- }
- }.
-
-attachments_compression_tests() ->
- Funs = [
- fun should_get_att_without_accept_gzip_encoding/2,
- fun should_get_att_with_accept_gzip_encoding/2,
- fun should_get_att_with_accept_deflate_encoding/2,
- fun should_return_406_response_on_unsupported_encoding/2,
- fun should_get_doc_with_att_data/2,
- fun should_get_doc_with_att_data_stub/2
- ],
- {
- "Attachments compression tests",
- [
- {
- "Created via Attachments API",
- created_attachments_compression_tests(standalone, Funs)
- },
- {
- "Created inline via Document API",
- created_attachments_compression_tests(inline, Funs)
- },
- {
- "Created already been compressed via Attachments API",
- {
- foreachx,
- fun setup/1, fun teardown/2,
- [{compressed, Fun} || Fun <- Funs]
- }
- },
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_not_create_compressed_att_with_deflate_encoding/1,
- fun should_not_create_compressed_att_with_compress_encoding/1,
- fun should_create_compressible_att_with_ctype_params/1
- ]
- }
- ]
- }.
-
-created_attachments_compression_tests(Mod, Funs) ->
- [
- {
- "Compressiable attachments",
- {
- foreachx,
- fun setup/1, fun teardown/2,
- [{{text, Mod}, Fun} || Fun <- Funs]
- }
- },
- {
- "Uncompressiable attachments",
- {
- foreachx,
- fun setup/1, fun teardown/2,
- [{{binary, Mod}, Fun} || Fun <- Funs]
- }
- }
- ].
-
-
-
-should_upload_attachment_without_md5({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- Body = "We all live in a yellow submarine!",
- Headers = [
- {"Content-Length", "34"},
- {"Content-Type", "text/plain"},
- {"Host", Host}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(201, Code),
- ?assertEqual(true, get_json(Json, [<<"ok">>]))
- end).
-
-should_upload_attachment_by_chunks_without_md5({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [chunked_body([Part1, Part2]), "\r\n"],
- Headers = [
- {"Content-Type", "text/plain"},
- {"Transfer-Encoding", "chunked"},
- {"Host", Host}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(201, Code),
- ?assertEqual(true, get_json(Json, [<<"ok">>]))
- end).
-
-should_upload_attachment_with_valid_md5_header({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- Body = "We all live in a yellow submarine!",
- Headers = [
- {"Content-Length", "34"},
- {"Content-Type", "text/plain"},
- {"Content-MD5", ?b2l(base64:encode(couch_hash:md5_hash(Body)))},
- {"Host", Host}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(201, Code),
- ?assertEqual(true, get_json(Json, [<<"ok">>]))
- end).
-
-should_upload_attachment_by_chunks_with_valid_md5_header({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [chunked_body([Part1, Part2]), "\r\n"],
- Headers = [
- {"Content-Type", "text/plain"},
- {"Content-MD5", ?b2l(base64:encode(couch_hash:md5_hash(AttData)))},
- {"Host", Host},
- {"Transfer-Encoding", "chunked"}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(201, Code),
- ?assertEqual(true, get_json(Json, [<<"ok">>]))
- end).
-
-should_upload_attachment_by_chunks_with_valid_md5_trailer({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [chunked_body([Part1, Part2]),
- "Content-MD5: ", base64:encode(couch_hash:md5_hash(AttData)),
- "\r\n\r\n"],
- Headers = [
- {"Content-Type", "text/plain"},
- {"Host", Host},
- {"Trailer", "Content-MD5"},
- {"Transfer-Encoding", "chunked"}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(201, Code),
- ?assertEqual(true, get_json(Json, [<<"ok">>]))
- end).
-
-should_reject_attachment_with_invalid_md5({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- Body = "We all live in a yellow submarine!",
- Headers = [
- {"Content-Length", "34"},
- {"Content-Type", "text/plain"},
- {"Content-MD5", ?b2l(base64:encode(<<"foobar!">>))},
- {"Host", Host}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(400, Code),
- ?assertEqual(<<"content_md5_mismatch">>,
- get_json(Json, [<<"error">>]))
- end).
-
-
-should_reject_chunked_attachment_with_invalid_md5({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [chunked_body([Part1, Part2]), "\r\n"],
- Headers = [
- {"Content-Type", "text/plain"},
- {"Content-MD5", ?b2l(base64:encode(<<"foobar!">>))},
- {"Host", Host},
- {"Transfer-Encoding", "chunked"}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(400, Code),
- ?assertEqual(<<"content_md5_mismatch">>,
- get_json(Json, [<<"error">>]))
- end).
-
-should_reject_chunked_attachment_with_invalid_md5_trailer({Host, DbName}) ->
- ?_test(begin
- AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
- AttData = <<"We all live in a yellow submarine!">>,
- <<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [chunked_body([Part1, Part2]),
- "Content-MD5: ", base64:encode(<<"foobar!">>),
- "\r\n\r\n"],
- Headers = [
- {"Content-Type", "text/plain"},
- {"Host", Host},
- {"Trailer", "Content-MD5"},
- {"Transfer-Encoding", "chunked"}
- ],
- {ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
- ?assertEqual(400, Code),
- ?assertEqual(<<"content_md5_mismatch">>, get_json(Json, [<<"error">>]))
- end).
-
-should_get_att_without_accept_gzip_encoding(_, {Data, {_, _, AttUrl}}) ->
- ?_test(begin
- {ok, Code, Headers, Body} = test_request:get(AttUrl),
- ?assertEqual(200, Code),
- ?assertNot(lists:member({"Content-Encoding", "gzip"}, Headers)),
- ?assertEqual(Data, iolist_to_binary(Body))
- end).
-
-should_get_att_with_accept_gzip_encoding(compressed, {Data, {_, _, AttUrl}}) ->
- ?_test(begin
- {ok, Code, Headers, Body} = test_request:get(
- AttUrl, [{"Accept-Encoding", "gzip"}]),
- ?assertEqual(200, Code),
- ?assert(lists:member({"Content-Encoding", "gzip"}, Headers)),
- ?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body)))
- end);
-should_get_att_with_accept_gzip_encoding({text, _}, {Data, {_, _, AttUrl}}) ->
- ?_test(begin
- {ok, Code, Headers, Body} = test_request:get(
- AttUrl, [{"Accept-Encoding", "gzip"}]),
- ?assertEqual(200, Code),
- ?assert(lists:member({"Content-Encoding", "gzip"}, Headers)),
- ?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body)))
- end);
-should_get_att_with_accept_gzip_encoding({binary, _}, {Data, {_, _, AttUrl}}) ->
- ?_test(begin
- {ok, Code, Headers, Body} = test_request:get(
- AttUrl, [{"Accept-Encoding", "gzip"}]),
- ?assertEqual(200, Code),
- ?assertEqual(undefined,
- couch_util:get_value("Content-Encoding", Headers)),
- ?assertEqual(Data, iolist_to_binary(Body))
- end).
-
-should_get_att_with_accept_deflate_encoding(_, {Data, {_, _, AttUrl}}) ->
- ?_test(begin
- {ok, Code, Headers, Body} = test_request:get(
- AttUrl, [{"Accept-Encoding", "deflate"}]),
- ?assertEqual(200, Code),
- ?assertEqual(undefined,
- couch_util:get_value("Content-Encoding", Headers)),
- ?assertEqual(Data, iolist_to_binary(Body))
- end).
-
-should_return_406_response_on_unsupported_encoding(_, {_, {_, _, AttUrl}}) ->
- ?_assertEqual(406,
- begin
- {ok, Code, _, _} = test_request:get(
- AttUrl, [{"Accept-Encoding", "deflate, *;q=0"}]),
- Code
- end).
-
-should_get_doc_with_att_data(compressed, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?attachments=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- AttJson = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
- AttData = couch_util:get_nested_json_value(
- AttJson, [<<"data">>]),
- ?assertEqual(
- <<"text/plain">>,
- couch_util:get_nested_json_value(AttJson,[<<"content_type">>])),
- ?assertEqual(Data, base64:decode(AttData))
- end);
-should_get_doc_with_att_data({text, _}, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?attachments=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- AttJson = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
- AttData = couch_util:get_nested_json_value(
- AttJson, [<<"data">>]),
- ?assertEqual(
- <<"text/plain">>,
- couch_util:get_nested_json_value(AttJson,[<<"content_type">>])),
- ?assertEqual(Data, base64:decode(AttData))
- end);
-should_get_doc_with_att_data({binary, _}, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?attachments=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- AttJson = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_BIN_NAME]),
- AttData = couch_util:get_nested_json_value(
- AttJson, [<<"data">>]),
- ?assertEqual(
- <<"image/png">>,
- couch_util:get_nested_json_value(AttJson,[<<"content_type">>])),
- ?assertEqual(Data, base64:decode(AttData))
- end).
-
-should_get_doc_with_att_data_stub(compressed, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?att_encoding_info=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- {AttJson} = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
- ?assertEqual(<<"gzip">>,
- couch_util:get_value(<<"encoding">>, AttJson)),
- AttLength = couch_util:get_value(<<"length">>, AttJson),
- EncLength = couch_util:get_value(<<"encoded_length">>, AttJson),
- ?assertEqual(AttLength, EncLength),
- ?assertEqual(iolist_size(zlib:gzip(Data)), AttLength)
- end);
-should_get_doc_with_att_data_stub({text, _}, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?att_encoding_info=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- {AttJson} = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
- ?assertEqual(<<"gzip">>,
- couch_util:get_value(<<"encoding">>, AttJson)),
- AttEncLength = iolist_size(gzip(Data)),
- ?assertEqual(AttEncLength,
- couch_util:get_value(<<"encoded_length">>, AttJson)),
- ?assertEqual(byte_size(Data),
- couch_util:get_value(<<"length">>, AttJson))
- end);
-should_get_doc_with_att_data_stub({binary, _}, {Data, {_, DocUrl, _}}) ->
- ?_test(begin
- Url = DocUrl ++ "?att_encoding_info=true",
- {ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
- ?assertEqual(200, Code),
- Json = jiffy:decode(Body),
- {AttJson} = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_BIN_NAME]),
- ?assertEqual(undefined,
- couch_util:get_value(<<"encoding">>, AttJson)),
- ?assertEqual(undefined,
- couch_util:get_value(<<"encoded_length">>, AttJson)),
- ?assertEqual(byte_size(Data),
- couch_util:get_value(<<"length">>, AttJson))
- end).
-
-should_not_create_compressed_att_with_deflate_encoding({Host, DbName}) ->
- ?_assertEqual(415,
- begin
- HttpHost = "http://" ++ Host,
- AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"),
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Body = zlib:compress(Data),
- Headers = [
- {"Content-Encoding", "deflate"},
- {"Content-Type", "text/plain"}
- ],
- {ok, Code, _, _} = test_request:put(AttUrl, Headers, Body),
- Code
- end).
-
-should_not_create_compressed_att_with_compress_encoding({Host, DbName}) ->
- % Note: As of OTP R13B04, it seems there's no LZW compression
- % (i.e. UNIX compress utility implementation) lib in OTP.
- % However there's a simple working Erlang implementation at:
- % http://scienceblogs.com/goodmath/2008/01/simple_lempelziv_compression_i.php
- ?_assertEqual(415,
- begin
- HttpHost = "http://" ++ Host,
- AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"),
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Headers = [
- {"Content-Encoding", "compress"},
- {"Content-Type", "text/plain"}
- ],
- {ok, Code, _, _} = test_request:put(AttUrl, Headers, Data),
- Code
- end).
-
-should_create_compressible_att_with_ctype_params({Host, DbName}) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(begin
- HttpHost = "http://" ++ Host,
- DocUrl = string:join([HttpHost, DbName, ?docid()], "/"),
- AttUrl = string:join([DocUrl, ?b2l(?ATT_TXT_NAME)], "/"),
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Headers = [{"Content-Type", "text/plain; charset=UTF-8"}],
- {ok, Code0, _, _} = test_request:put(AttUrl, Headers, Data),
- ?assertEqual(201, Code0),
-
- {ok, Code1, _, Body} = test_request:get(
- DocUrl ++ "?att_encoding_info=true"),
- ?assertEqual(200, Code1),
- Json = jiffy:decode(Body),
- {AttJson} = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
- ?assertEqual(<<"gzip">>,
- couch_util:get_value(<<"encoding">>, AttJson)),
- AttEncLength = iolist_size(gzip(Data)),
- ?assertEqual(AttEncLength,
- couch_util:get_value(<<"encoded_length">>, AttJson)),
- ?assertEqual(byte_size(Data),
- couch_util:get_value(<<"length">>, AttJson))
- end)}.
-
-
-compact_after_lowering_attachment_size_limit_test_() ->
- {
- "Compact after lowering attachment size limit",
- {
- foreach,
- fun() ->
- Ctx = test_util:start_couch(),
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- {Ctx, DbName}
- end,
- fun({Ctx, DbName}) ->
- config:delete("couchdb", "max_attachment_size"),
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- test_util:stop_couch(Ctx)
- end,
- [
- fun should_compact_after_lowering_attachment_size_limit/1
- ]
- }
- }.
-
-
-should_compact_after_lowering_attachment_size_limit({_Ctx, DbName}) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(begin
- {ok, Db1} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc1 = #doc{id = <<"doc1">>, atts = att(1000)},
- {ok, _} = couch_db:update_doc(Db1, Doc1, []),
- couch_db:close(Db1),
- config:set("couchdb", "max_attachment_size", "1", _Persist = false),
- compact_db(DbName),
- {ok, Db2} = couch_db:open_int(DbName, []),
- {ok, Doc2} = couch_db:open_doc(Db2, <<"doc1">>),
- couch_db:close(Db2),
- [Att] = Doc2#doc.atts,
- ?assertEqual(1000, couch_att:fetch(att_len, Att))
- end)}.
-
-
-att(Size) when is_integer(Size), Size >= 1 ->
- [couch_att:new([
- {name, <<"att">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, fun(_Bytes) ->
- << <<"x">> || _ <- lists:seq(1, Size) >>
- end}
- ])].
-
-
-compact_db(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _CompactPid} = couch_db:start_compact(Db),
- wait_compaction(DbName, "database", ?LINE),
- ok = couch_db:close(Db).
-
-
-wait_compaction(DbName, Kind, Line) ->
- WaitFun = fun() ->
- case is_compaction_running(DbName) of
- true -> wait;
- false -> ok
- end
- end,
- case test_util:wait(WaitFun, ?TIMEOUT) of
- timeout ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, Line},
- {reason, "Timeout waiting for "
- ++ Kind
- ++ " database compaction"}]});
- _ ->
- ok
- end.
-
-
-is_compaction_running(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DbInfo} = couch_db:get_db_info(Db),
- couch_db:close(Db),
- couch_util:get_value(compact_running, DbInfo) =:= true.
-
-
-internal_replication_after_lowering_attachment_size_limit_test_() ->
- {
- "Internal replication after lowering max attachment size",
- {
- foreach,
- fun() ->
- Ctx = test_util:start_couch([mem3]),
- SrcName = ?tempdb(),
- {ok, SrcDb} = couch_db:create(SrcName, [?ADMIN_CTX]),
- ok = couch_db:close(SrcDb),
- TgtName = ?tempdb(),
- {ok, TgtDb} = couch_db:create(TgtName, [?ADMIN_CTX]),
- ok = couch_db:close(TgtDb),
- {Ctx, SrcName, TgtName}
- end,
- fun({Ctx, SrcName, TgtName}) ->
- config:delete("couchdb", "max_attachment_size"),
- ok = couch_server:delete(SrcName, [?ADMIN_CTX]),
- ok = couch_server:delete(TgtName, [?ADMIN_CTX]),
- test_util:stop_couch(Ctx)
- end,
- [
- fun should_replicate_after_lowering_attachment_size/1
- ]
- }
- }.
-
-should_replicate_after_lowering_attachment_size({_Ctx, SrcName, TgtName}) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(begin
- {ok, SrcDb} = couch_db:open(SrcName, [?ADMIN_CTX]),
- SrcDoc = #doc{id = <<"doc">>, atts = att(1000)},
- {ok, _} = couch_db:update_doc(SrcDb, SrcDoc, []),
- couch_db:close(SrcDb),
- config:set("couchdb", "max_attachment_size", "1", _Persist = false),
- % Create a pair of "fake" shards
- SrcShard = #shard{name = SrcName, node = node()},
- TgtShard = #shard{name = TgtName, node = node()},
- mem3_rep:go(SrcShard, TgtShard, []),
- {ok, TgtDb} = couch_db:open_int(TgtName, []),
- {ok, TgtDoc} = couch_db:open_doc(TgtDb, <<"doc">>),
- couch_db:close(TgtDb),
- [Att] = TgtDoc#doc.atts,
- ?assertEqual(1000, couch_att:fetch(att_len, Att))
- end)}.
-
-
-get_json(Json, Path) ->
- couch_util:get_nested_json_value(Json, Path).
-
-to_hex(Val) ->
- to_hex(Val, []).
-
-to_hex(0, Acc) ->
- Acc;
-to_hex(Val, Acc) ->
- to_hex(Val div 16, [hex_char(Val rem 16) | Acc]).
-
-hex_char(V) when V < 10 -> $0 + V;
-hex_char(V) -> $A + V - 10.
-
-chunked_body(Chunks) ->
- chunked_body(Chunks, []).
-
-chunked_body([], Acc) ->
- iolist_to_binary(lists:reverse(Acc, "0\r\n"));
-chunked_body([Chunk | Rest], Acc) ->
- Size = to_hex(size(Chunk)),
- chunked_body(Rest, ["\r\n", Chunk, "\r\n", Size | Acc]).
-
-get_socket() ->
- Options = [binary, {packet, 0}, {active, false}],
- Port = mochiweb_socket_server:get(couch_httpd, port),
- {ok, Sock} = gen_tcp:connect(bind_address(), Port, Options),
- Sock.
-
-bind_address() ->
- case config:get("httpd", "bind_address") of
- undefined -> any;
- Address -> Address
- end.
-
-request(Method, Url, Headers, Body) ->
- RequestHead = [Method, " ", Url, " HTTP/1.1"],
- RequestHeaders = [[string:join([Key, Value], ": "), "\r\n"]
- || {Key, Value} <- Headers],
- Request = [RequestHead, "\r\n", RequestHeaders, "\r\n", Body],
- Sock = get_socket(),
- gen_tcp:send(Sock, list_to_binary(lists:flatten(Request))),
- timer:sleep(?TIMEWAIT), % must wait to receive complete response
- {ok, R} = gen_tcp:recv(Sock, 0),
- gen_tcp:close(Sock),
- [Header, Body1] = re:split(R, "\r\n\r\n", [{return, binary}]),
- {ok, {http_response, _, Code, _}, _} =
- erlang:decode_packet(http, Header, []),
- Json = jiffy:decode(Body1),
- {ok, Code, Json}.
-
-create_standalone_text_att(Host, DbName) ->
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"),
- {ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "text/plain"}], Data),
- ?assertEqual(201, Code),
- Url.
-
-create_standalone_png_att(Host, DbName) ->
- {ok, Data} = file:read_file(?FIXTURE_PNG),
- Url = string:join([Host, DbName, "doc", ?b2l(?ATT_BIN_NAME)], "/"),
- {ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "image/png"}], Data),
- ?assertEqual(201, Code),
- Url.
-
-create_inline_text_att(Host, DbName) ->
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Url = string:join([Host, DbName, "doc"], "/"),
- Doc = {[
- {<<"_attachments">>, {[
- {?ATT_TXT_NAME, {[
- {<<"content_type">>, <<"text/plain">>},
- {<<"data">>, base64:encode(Data)}
- ]}
- }]}}
- ]},
- {ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc)),
- ?assertEqual(201, Code),
- string:join([Url, ?b2l(?ATT_TXT_NAME)], "/").
-
-create_inline_png_att(Host, DbName) ->
- {ok, Data} = file:read_file(?FIXTURE_PNG),
- Url = string:join([Host, DbName, "doc"], "/"),
- Doc = {[
- {<<"_attachments">>, {[
- {?ATT_BIN_NAME, {[
- {<<"content_type">>, <<"image/png">>},
- {<<"data">>, base64:encode(Data)}
- ]}
- }]}}
- ]},
- {ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc)),
- ?assertEqual(201, Code),
- string:join([Url, ?b2l(?ATT_BIN_NAME)], "/").
-
-create_already_compressed_att(Host, DbName) ->
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"),
- {ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "text/plain"}, {"Content-Encoding", "gzip"}],
- zlib:gzip(Data)),
- ?assertEqual(201, Code),
- Url.
-
-gzip(Data) ->
- Z = zlib:open(),
- ok = zlib:deflateInit(Z, ?COMPRESSION_LEVEL, deflated, 16 + 15, 8, default),
- Chunk = zlib:deflate(Z, Data),
- Last = zlib:deflate(Z, [], finish),
- ok = zlib:deflateEnd(Z),
- ok = zlib:close(Z),
- [Chunk, Last].
diff --git a/src/couch/test/eunit/couchdb_auth_tests.erl b/src/couch/test/eunit/couchdb_auth_tests.erl
index 19d32d0c5..92f8a0a1c 100644
--- a/src/couch/test/eunit/couchdb_auth_tests.erl
+++ b/src/couch/test/eunit/couchdb_auth_tests.erl
@@ -49,7 +49,6 @@ auth_test_() ->
fun() -> test_util:start_couch([chttpd]) end, fun test_util:stop_couch/1,
[
make_test_cases(clustered, Tests),
- make_test_cases(backdoor, Tests),
make_require_valid_user_test_cases(clustered, RequireValidUserTests)
]
}
@@ -86,12 +85,6 @@ should_not_return_authenticated_field(_PortType, Url) ->
<<"info">>, <<"authenticated">>])
end).
-should_return_list_of_handlers(backdoor, Url) ->
- ?_assertEqual([<<"cookie">>,<<"default">>],
- begin
- couch_util:get_nested_json_value(session(Url), [
- <<"info">>, <<"authentication_handlers">>])
- end);
should_return_list_of_handlers(clustered, Url) ->
?_assertEqual([<<"cookie">>,<<"default">>],
begin
@@ -110,6 +103,4 @@ session(Url) ->
jiffy:decode(Body).
port(clustered) ->
- integer_to_list(mochiweb_socket_server:get(chttpd, port));
-port(backdoor) ->
- integer_to_list(mochiweb_socket_server:get(couch_httpd, port)).
+ integer_to_list(mochiweb_socket_server:get(chttpd, port)).
diff --git a/src/couch/test/eunit/couchdb_cors_tests.erl b/src/couch/test/eunit/couchdb_cors_tests.erl
index 82630bba7..0e0926c1f 100644
--- a/src/couch/test/eunit/couchdb_cors_tests.erl
+++ b/src/couch/test/eunit/couchdb_cors_tests.erl
@@ -26,21 +26,20 @@
?assertEqual(lists:usort(A), lists:usort(B))).
start() ->
- Ctx = test_util:start_couch([ioq]),
+ Ctx = test_util:start_couch([chttpd]),
ok = config:set("httpd", "enable_cors", "true", false),
ok = config:set("vhosts", "example.com", "/", false),
Ctx.
setup() ->
DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db),
+ {ok, _} = fabric2_db:create(DbName, [?ADMIN_CTX]),
config:set("cors", "credentials", "false", false),
config:set("cors", "origins", "http://example.com", false),
Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+ Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
Host = "http://" ++ Addr ++ ":" ++ Port,
{Host, ?b2l(DbName)}.
@@ -57,7 +56,7 @@ setup({Mod, VHost}) ->
{Host, DbName, Url, DefaultHeaders}.
teardown(DbName) when is_list(DbName) ->
- ok = couch_server:delete(?l2b(DbName), [?ADMIN_CTX]),
+ ok = fabric2_db:delete(?l2b(DbName), [?ADMIN_CTX]),
ok;
teardown({_, DbName}) ->
teardown(DbName).
diff --git a/src/couch/test/eunit/couchdb_db_tests.erl b/src/couch/test/eunit/couchdb_db_tests.erl
deleted file mode 100644
index 734bafb9f..000000000
--- a/src/couch/test/eunit/couchdb_db_tests.erl
+++ /dev/null
@@ -1,91 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_db_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-setup() ->
- DbName = ?b2l(?tempdb()),
- fabric:create_db(DbName),
- DbName.
-
-
-teardown(DbName) ->
- (catch fabric:delete_db(DbName)),
- ok.
-
-
-clustered_db_test_() ->
- {
- "Checking clustered db API",
- {
- setup,
- fun() -> test_util:start_couch([ddoc_cache, mem3]) end,
- fun test_util:stop/1,
- [
- {
- "DB deletion",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_close_deleted_db/1,
- fun should_kill_caller_from_load_validation_funs_for_deleted_db/1
- ]
- }
- }
- ]
- }
- }.
-
-
-should_close_deleted_db(DbName) ->
- ?_test(begin
- [#shard{name = ShardName} | _] = mem3:shards(DbName),
- {ok, Db} = couch_db:open(ShardName, []),
-
- MonitorRef = couch_db:monitor(Db),
- fabric:delete_db(DbName),
- receive
- {'DOWN', MonitorRef, _Type, _Pid, _Info} ->
- ok
- after 2000 ->
- throw(timeout_error)
- end,
- test_util:wait(fun() ->
- case ets:lookup(couch_dbs, DbName) of
- [] -> ok;
- _ -> wait
- end
- end),
- ?assertEqual([], ets:lookup(couch_dbs, DbName))
- end).
-
-
-should_kill_caller_from_load_validation_funs_for_deleted_db(DbName) ->
- ?_test(begin
- [#shard{name = ShardName} | _] = mem3:shards(DbName),
- {ok, Db} = couch_db:open(ShardName, []),
-
- MonitorRef = couch_db:monitor(Db),
- fabric:delete_db(DbName),
- receive
- {'DOWN', MonitorRef, _Type, _Pid, _Info} ->
- ok
- after 2000 ->
- throw(timeout_error)
- end,
- ?assertError(database_does_not_exist, couch_db:load_validation_funs(Db))
- end).
diff --git a/src/couch/test/eunit/couchdb_design_doc_tests.erl b/src/couch/test/eunit/couchdb_design_doc_tests.erl
deleted file mode 100644
index 653a6cb17..000000000
--- a/src/couch/test/eunit/couchdb_design_doc_tests.erl
+++ /dev/null
@@ -1,87 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_design_doc_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- create_design_doc(DbName, <<"_design/foo">>),
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- BaseUrl = "http://" ++ Addr ++ ":" ++ Port,
- {?b2l(DbName), BaseUrl}.
-
-
-teardown({DbName, _}) ->
- couch_server:delete(?l2b(DbName), [?ADMIN_CTX]),
- ok.
-
-
-design_list_test_() ->
- {
- "Check _list functionality",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_return_empty_when_plain_return/1,
- fun should_return_empty_when_no_docs/1
- ]
- }
- }
- }.
-
-should_return_empty_when_plain_return({DbName, BaseUrl}) ->
- ?_test(begin
- ?assertEqual(<<>>,
- query_text(BaseUrl, DbName, "foo", "_list/plain_return/simple_view"))
- end).
-
-should_return_empty_when_no_docs({DbName, BaseUrl}) ->
- ?_test(begin
- ?assertEqual(<<>>,
- query_text(BaseUrl, DbName, "foo", "_list/simple_render/simple_view"))
- end).
-
-create_design_doc(DbName, DDName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"simple_view">>, {[
- {<<"map">>, <<"function(doc) {emit(doc._id, doc)}">> },
- {<<"reduce">>, <<"function (key, values, rereduce) {return sum(values);}">> }
- ]}}
- ]}},
- {<<"lists">>, {[
- {<<"plain_return">>, <<"function(head, req) {return;}">>},
- {<<"simple_render">>, <<"function(head, req) {var row; while(row=getRow()) {send(JSON.stringify(row)); }}">>}
- ]}}
- ]}),
- {ok, Rev} = couch_db:update_doc(Db, DDoc, []),
- couch_db:close(Db),
- Rev.
-
-query_text(BaseUrl, DbName, DDoc, Path) ->
- {ok, Code, _Headers, Body} = test_request:get(
- BaseUrl ++ "/" ++ DbName ++ "/_design/" ++ DDoc ++ "/" ++ Path),
- ?assertEqual(200, Code),
- Body.
diff --git a/src/couch/test/eunit/couchdb_file_compression_tests.erl b/src/couch/test/eunit/couchdb_file_compression_tests.erl
deleted file mode 100644
index 77250337c..000000000
--- a/src/couch/test/eunit/couchdb_file_compression_tests.erl
+++ /dev/null
@@ -1,250 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_file_compression_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(DDOC_ID, <<"_design/test">>).
--define(DOCS_COUNT, 1000).
--define(TIMEOUT, 60).
-
-setup_all() ->
- Ctx = test_util:start_couch(),
- config:set("couchdb", "file_compression", "none", false),
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = populate_db(Db, ?DOCS_COUNT),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?DDOC_ID},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"by_id">>, {[
- {<<"map">>, <<"function(doc){emit(doc._id, doc.string);}">>}
- ]}}
- ]}
- }
- ]}),
- {ok, _} = couch_db:update_doc(Db, DDoc, []),
- ok = couch_db:close(Db),
- {Ctx, DbName}.
-
-
-teardown_all({Ctx, DbName}) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- test_util:stop_couch(Ctx).
-
-
-couch_file_compression_test_() ->
- {
- "CouchDB file compression tests",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {with, [
- fun should_use_none/1,
- fun should_use_deflate_1/1,
- fun should_use_deflate_9/1,
- fun should_use_snappy/1,
- fun should_compare_compression_methods/1
- ]}
- }
- }.
-
-
-should_use_none({_, DbName}) -> run_test(DbName, "none").
-should_use_deflate_1({_, DbName}) -> run_test(DbName, "deflate_1").
-should_use_deflate_9({_, DbName}) -> run_test(DbName, "deflate_9").
-should_use_snappy({_, DbName}) -> run_test(DbName, "snappy").
-
-
-should_compare_compression_methods({_, DbName}) ->
- TestDb = setup_db(DbName),
- Name = "none > snappy > deflate_1 > deflate_9",
- try
- {Name, {timeout, ?TIMEOUT, ?_test(compare_methods(TestDb))}}
- after
- couch_server:delete(TestDb, [?ADMIN_CTX])
- end.
-
-
-run_test(DbName, Comp) ->
- config:set("couchdb", "file_compression", Comp, false),
- Timeout = 5 + ?TIMEOUT,
- TestDb = setup_db(DbName),
- Tests = [
- {"compact database", {timeout, Timeout, ?_test(compact_db(DbName))}},
- {"compact view", {timeout, Timeout, ?_test(compact_view(DbName))}}
- ],
- try
- {"Use compression: " ++ Comp, Tests}
- after
- ok = couch_server:delete(TestDb, [?ADMIN_CTX])
- end.
-
-
-compare_methods(DbName) ->
- config:set("couchdb", "file_compression", "none", false),
- ExternalSizePreCompact = db_external_size(DbName),
- compact_db(DbName),
- compact_view(DbName),
- DbSizeNone = db_disk_size(DbName),
- ViewSizeNone = view_disk_size(DbName),
- ExternalSizeNone = db_external_size(DbName),
- ViewExternalSizeNone = view_external_size(DbName),
-
- config:set("couchdb", "file_compression", "snappy", false),
- compact_db(DbName),
- compact_view(DbName),
- DbSizeSnappy = db_disk_size(DbName),
- ViewSizeSnappy = view_disk_size(DbName),
- ExternalSizeSnappy = db_external_size(DbName),
- ViewExternalSizeSnappy = view_external_size(DbName),
-
- ?assert(DbSizeNone > DbSizeSnappy),
- ?assert(ViewSizeNone > ViewSizeSnappy),
-
- config:set("couchdb", "file_compression", "deflate_1", false),
- compact_db(DbName),
- compact_view(DbName),
- DbSizeDeflate1 = db_disk_size(DbName),
- ViewSizeDeflate1 = view_disk_size(DbName),
-
- ?assert(DbSizeSnappy > DbSizeDeflate1),
- ?assert(ViewSizeSnappy > ViewSizeDeflate1),
-
- config:set("couchdb", "file_compression", "deflate_9", false),
- compact_db(DbName),
- compact_view(DbName),
- DbSizeDeflate9 = db_disk_size(DbName),
- ViewSizeDeflate9 = view_disk_size(DbName),
- ExternalSizeDeflate9 = db_external_size(DbName),
- ViewExternalSizeDeflate9 = view_external_size(DbName),
-
- ?assert(DbSizeDeflate1 > DbSizeDeflate9),
- ?assert(ViewSizeDeflate1 > ViewSizeDeflate9),
- ?assert(ExternalSizePreCompact >= ExternalSizeNone),
- ?assert(ExternalSizeNone =:= ExternalSizeSnappy),
- ?assert(ExternalSizeNone =:= ExternalSizeDeflate9),
- ?assert(ViewExternalSizeNone =:= ViewExternalSizeSnappy),
- ?assert(ViewExternalSizeNone =:= ViewExternalSizeDeflate9).
-
-
-populate_db(_Db, NumDocs) when NumDocs =< 0 ->
- ok;
-populate_db(Db, NumDocs) ->
- Docs = lists:map(
- fun(_) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, couch_uuids:random()},
- {<<"string">>, ?l2b(lists:duplicate(1000, $X))}
- ]})
- end,
- lists:seq(1, 500)),
- {ok, _} = couch_db:update_docs(Db, Docs, []),
- populate_db(Db, NumDocs - 500).
-
-
-setup_db(SrcDbName) ->
- TgtDbName = ?tempdb(),
- TgtDbFileName = binary_to_list(TgtDbName) ++ ".couch",
- couch_util:with_db(SrcDbName, fun(Db) ->
- OldPath = couch_db:get_filepath(Db),
- NewPath = filename:join(filename:dirname(OldPath), TgtDbFileName),
- {ok, _} = file:copy(OldPath, NewPath)
- end),
- refresh_index(TgtDbName),
- TgtDbName.
-
-
-refresh_index(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]),
- couch_mrview:query_view(Db, DDoc, <<"by_id">>, [{update, true}]),
- ok = couch_db:close(Db).
-
-compact_db(DbName) ->
- DiskSizeBefore = db_disk_size(DbName),
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _CompactPid} = couch_db:start_compact(Db),
- wait_compaction(DbName, "database", ?LINE),
- ok = couch_db:close(Db),
- DiskSizeAfter = db_disk_size(DbName),
- ?assert(DiskSizeBefore > DiskSizeAfter).
-
-compact_view(DbName) ->
- DiskSizeBefore = view_disk_size(DbName),
- {ok, _MonRef} = couch_mrview:compact(DbName, ?DDOC_ID, [monitor]),
- wait_compaction(DbName, "view group", ?LINE),
- DiskSizeAfter = view_disk_size(DbName),
- ?assert(DiskSizeBefore > DiskSizeAfter).
-
-db_disk_size(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Info} = couch_db:get_db_info(Db),
- ok = couch_db:close(Db),
- active_size(Info).
-
-db_external_size(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Info} = couch_db:get_db_info(Db),
- ok = couch_db:close(Db),
- external_size(Info).
-
-view_disk_size(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]),
- {ok, Info} = couch_mrview:get_info(Db, DDoc),
- ok = couch_db:close(Db),
- active_size(Info).
-
-view_external_size(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]),
- {ok, Info} = couch_mrview:get_info(Db, DDoc),
- ok = couch_db:close(Db),
- external_size(Info).
-
-active_size(Info) ->
- couch_util:get_nested_json_value({Info}, [sizes, active]).
-
-external_size(Info) ->
- couch_util:get_nested_json_value({Info}, [sizes, external]).
-
-wait_compaction(DbName, Kind, Line) ->
- WaitFun = fun() ->
- case is_compaction_running(DbName) of
- true -> wait;
- false -> ok
- end
- end,
- case test_util:wait(WaitFun, ?TIMEOUT) of
- timeout ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, Line},
- {reason, "Timeout waiting for "
- ++ Kind
- ++ " database compaction"}]});
- _ ->
- ok
- end.
-
-is_compaction_running(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DbInfo} = couch_db:get_db_info(Db),
- {ok, ViewInfo} = couch_mrview:get_info(Db, ?DDOC_ID),
- couch_db:close(Db),
- (couch_util:get_value(compact_running, ViewInfo) =:= true)
- orelse (couch_util:get_value(compact_running, DbInfo) =:= true).
diff --git a/src/couch/test/eunit/couchdb_location_header_tests.erl b/src/couch/test/eunit/couchdb_location_header_tests.erl
deleted file mode 100644
index c6c039eb0..000000000
--- a/src/couch/test/eunit/couchdb_location_header_tests.erl
+++ /dev/null
@@ -1,78 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_location_header_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db),
-
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- Host = "http://" ++ Addr ++ ":" ++ Port,
- {Host, ?b2l(DbName)}.
-
-teardown({_, DbName}) ->
- ok = couch_server:delete(?l2b(DbName), [?ADMIN_CTX]),
- ok.
-
-
-header_test_() ->
- {
- "CouchDB Location Header Tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_work_with_newlines_in_docs/1,
- fun should_work_with_newlines_in_attachments/1
- ]
- }
- }
- }.
-
-should_work_with_newlines_in_docs({Host, DbName}) ->
- Url = Host ++ "/" ++ DbName ++ "/docid%0A",
- {"COUCHDB-708",
- ?_assertEqual(
- Url,
- begin
- {ok, _, Headers, _} = test_request:put(Url,
- [{"Content-Type", "application/json"}], "{}"),
- proplists:get_value("Location", Headers)
- end)}.
-
-should_work_with_newlines_in_attachments({Host, DbName}) ->
- Url = Host ++ "/" ++ DbName,
- AttUrl = Url ++ "/docid%0A/readme.txt",
- {"COUCHDB-708",
- ?_assertEqual(
- AttUrl,
- begin
- Body = "We all live in a yellow submarine!",
- Headers0 = [
- {"Content-Length", "34"},
- {"Content-Type", "text/plain"}
- ],
- {ok, _, Headers, _} = test_request:put(AttUrl, Headers0, Body),
- proplists:get_value("Location", Headers)
- end)}.
diff --git a/src/couch/test/eunit/couchdb_mrview_cors_tests.erl b/src/couch/test/eunit/couchdb_mrview_cors_tests.erl
index 0f69048a0..a9215f5d7 100644
--- a/src/couch/test/eunit/couchdb_mrview_cors_tests.erl
+++ b/src/couch/test/eunit/couchdb_mrview_cors_tests.erl
@@ -70,8 +70,7 @@ show_tests() ->
{
"Check CORS for show",
[
- make_test_case(clustered, [fun should_make_shows_request/2]),
- make_test_case(backdoor, [fun should_make_shows_request/2])
+ make_test_case(clustered, [fun should_make_shows_request/2])
]
}.
@@ -86,22 +85,14 @@ should_make_shows_request(_, {Host, DbName}) ->
ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_show/bar",
Headers = [{"Origin", "http://example.com"},
{"Access-Control-Request-Method", "GET"}, ?AUTH],
- {ok, _, Resp, Body} = test_request:get(ReqUrl, Headers),
- Origin = proplists:get_value("Access-Control-Allow-Origin", Resp),
- ?assertEqual("http://example.com", Origin),
- ?assertEqual(<<"<h1>wosh</h1>">>, Body)
+ ?assertMatch({ok, 410, _, _}, test_request:get(ReqUrl, Headers))
end).
-create_db(backdoor, DbName) ->
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db);
create_db(clustered, DbName) ->
{ok, Status, _, _} = test_request:put(db_url(DbName), [?AUTH], ""),
assert_success(create_db, Status),
ok.
-delete_db(backdoor, DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]);
delete_db(clustered, DbName) ->
{ok, Status, _, _} = test_request:delete(db_url(DbName), [?AUTH]),
assert_success(delete_db, Status),
@@ -119,7 +110,6 @@ host_url(PortType) ->
bind_address(PortType) ->
config:get(section(PortType), "bind_address", "127.0.0.1").
-section(backdoor) -> "http";
section(clustered) -> "chttpd".
db_url(DbName) when is_binary(DbName) ->
@@ -128,9 +118,7 @@ db_url(DbName) when is_list(DbName) ->
host_url(clustered) ++ "/" ++ DbName.
port(clustered) ->
- integer_to_list(mochiweb_socket_server:get(chttpd, port));
-port(backdoor) ->
- integer_to_list(mochiweb_socket_server:get(couch_httpd, port)).
+ integer_to_list(mochiweb_socket_server:get(chttpd, port)).
upload_ddoc(Host, DbName) ->
diff --git a/src/couch/test/eunit/couchdb_mrview_tests.erl b/src/couch/test/eunit/couchdb_mrview_tests.erl
deleted file mode 100644
index ec77b190d..000000000
--- a/src/couch/test/eunit/couchdb_mrview_tests.erl
+++ /dev/null
@@ -1,261 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_mrview_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-
--define(DDOC, {[
- {<<"_id">>, <<"_design/foo">>},
- {<<"shows">>, {[
- {<<"bar">>, <<"function(doc, req) {return '<h1>wosh</h1>';}">>}
- ]}},
- {<<"updates">>, {[
- {<<"report">>, <<"function(doc, req) {"
- "var data = JSON.parse(req.body); "
- "return ['test', data];"
- "}">>}
- ]}},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>, <<"function(doc){emit(doc._id, doc._rev)}">>}
- ]}}
- ]}}
-]}).
-
--define(USER, "admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
-
-
-setup_all() ->
- Ctx = test_util:start_couch([chttpd]),
- ok = meck:new(mochiweb_socket, [passthrough]),
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
- Ctx.
-
-teardown_all(Ctx) ->
- meck:unload(),
- ok = config:delete("admins", ?USER, _Persist=false),
- test_util:stop_couch(Ctx).
-
-setup(PortType) ->
- meck:reset([mochiweb_socket]),
- ok = meck:expect(mochiweb_socket, recv, fun mochiweb_socket_recv/3),
-
- DbName = ?tempdb(),
- ok = create_db(PortType, DbName),
-
- Host = host_url(PortType),
- upload_ddoc(Host, ?b2l(DbName)),
- {Host, ?b2l(DbName)}.
-
-teardown(PortType, {_Host, DbName}) ->
- delete_db(PortType, ?l2b(DbName)),
- ok.
-
-mrview_show_test_() ->
- {
- "Check show functionality",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- make_test_case(clustered, [fun should_return_invalid_request_body/2]),
- make_test_case(backdoor, [fun should_return_invalid_request_body/2])
- ]
- }
- }.
-
-mrview_query_test_() ->
- {
- "Check view query functionality",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- make_test_case(clustered, [fun should_return_400_for_wrong_order_of_keys/2]),
- make_test_case(backdoor, [fun should_return_400_for_wrong_order_of_keys/2])
- ]
- }
- }.
-
-mrview_cleanup_index_files_test_() ->
- {
- "Check index files cleanup",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- make_test_case(clustered, [fun should_cleanup_index_files/2])
- ]
- }
- }.
-
-
-make_test_case(Mod, Funs) ->
- {
- lists:flatten(io_lib:format("~s", [Mod])),
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [{Mod, Fun} || Fun <- Funs]
- }
- }.
-
-should_return_invalid_request_body(PortType, {Host, DbName}) ->
- ?_test(begin
- ok = create_doc(PortType, ?l2b(DbName), <<"doc_id">>, {[]}),
- ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_update/report/doc_id",
- {ok, Status, _Headers, Body} =
- test_request:post(ReqUrl, [?AUTH], <<"{truncated}">>),
- {Props} = jiffy:decode(Body),
- ?assertEqual(
- <<"bad_request">>, couch_util:get_value(<<"error">>, Props)),
- ?assertEqual(
- <<"Invalid request body">>, couch_util:get_value(<<"reason">>, Props)),
- ?assertEqual(400, Status),
- ok
- end).
-
-should_return_400_for_wrong_order_of_keys(_PortType, {Host, DbName}) ->
- Args = [{start_key, "\"bbb\""}, {end_key, "\"aaa\""}],
- ?_test(begin
- ReqUrl = Host ++ "/" ++ DbName
- ++ "/_design/foo/_view/view1?" ++ mochiweb_util:urlencode(Args),
- {ok, Status, _Headers, Body} = test_request:get(ReqUrl, [?AUTH]),
- {Props} = jiffy:decode(Body),
- ?assertEqual(
- <<"query_parse_error">>, couch_util:get_value(<<"error">>, Props)),
- ?assertEqual(
- <<"No rows can match your key range, reverse your start_key and end_key or set descending=true">>,
- couch_util:get_value(<<"reason">>, Props)),
- ?assertEqual(400, Status),
- ok
- end).
-
-should_cleanup_index_files(_PortType, {Host, DbName}) ->
- ?_test(begin
- IndexWildCard = [
- config:get("couchdb", "view_index_dir"),
- "/.shards/*/",
- DbName,
- ".[0-9]*_design/mrview/*"
- ],
- ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_view/view1",
- {ok, _Status0, _Headers0, _Body0} = test_request:get(ReqUrl, [?AUTH]),
- FileList0 = filelib:wildcard(IndexWildCard),
- ?assertNotEqual([], FileList0),
-
- % It is hard to simulate inactive view.
- % Since couch_mrview:cleanup is called on view definition change.
- % That's why we just create extra files in place
- ToDelete = lists:map(fun(FilePath) ->
- ViewFile = filename:join([
- filename:dirname(FilePath),
- "11111111111111111111111111111111.view"]),
- file:write_file(ViewFile, <<>>),
- ViewFile
- end, FileList0),
- FileList1 = filelib:wildcard(IndexWildCard),
- ?assertEqual([], lists:usort(FileList1 -- (FileList0 ++ ToDelete))),
-
- CleanupUrl = Host ++ "/" ++ DbName ++ "/_view_cleanup",
- {ok, _Status1, _Headers1, _Body1} = test_request:post(
- CleanupUrl, [], <<>>, [?AUTH]),
- test_util:wait(fun() ->
- IndexFiles = filelib:wildcard(IndexWildCard),
- case lists:usort(FileList0) == lists:usort(IndexFiles) of
- false -> wait;
- true -> ok
- end
- end),
- ok
- end).
-
-
-create_doc(backdoor, DbName, Id, Body) ->
- JsonDoc = couch_util:json_apply_field({<<"_id">>, Id}, Body),
- Doc = couch_doc:from_json_obj(JsonDoc),
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- {ok, _} = couch_db:update_docs(Db, [Doc]),
- couch_db:close(Db);
-create_doc(clustered, DbName, Id, Body) ->
- JsonDoc = couch_util:json_apply_field({<<"_id">>, Id}, Body),
- Doc = couch_doc:from_json_obj(JsonDoc),
- {ok, _} = fabric:update_docs(DbName, [Doc], [?ADMIN_CTX]),
- ok.
-
-create_db(backdoor, DbName) ->
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db);
-create_db(clustered, DbName) ->
- {ok, Status, _, _} = test_request:put(db_url(DbName), [?AUTH], ""),
- assert_success(create_db, Status),
- ok.
-
-delete_db(backdoor, DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]);
-delete_db(clustered, DbName) ->
- {ok, Status, _, _} = test_request:delete(db_url(DbName), [?AUTH]),
- assert_success(delete_db, Status),
- ok.
-
-assert_success(create_db, Status) ->
- ?assert(lists:member(Status, [201, 202]));
-assert_success(delete_db, Status) ->
- ?assert(lists:member(Status, [200, 202])).
-
-
-host_url(PortType) ->
- "http://" ++ bind_address(PortType) ++ ":" ++ port(PortType).
-
-bind_address(PortType) ->
- config:get(section(PortType), "bind_address", "127.0.0.1").
-
-section(backdoor) -> "http";
-section(clustered) -> "chttpd".
-
-db_url(DbName) when is_binary(DbName) ->
- db_url(binary_to_list(DbName));
-db_url(DbName) when is_list(DbName) ->
- host_url(clustered) ++ "/" ++ DbName.
-
-port(clustered) ->
- integer_to_list(mochiweb_socket_server:get(chttpd, port));
-port(backdoor) ->
- integer_to_list(mochiweb_socket_server:get(couch_httpd, port)).
-
-
-upload_ddoc(Host, DbName) ->
- Url = Host ++ "/" ++ DbName ++ "/_design/foo",
- Body = couch_util:json_encode(?DDOC),
- {ok, 201, _Resp, _Body} = test_request:put(Url, [?AUTH], Body),
- ok.
-
-mochiweb_socket_recv(Sock, Len, Timeout) ->
- case meck:passthrough([Sock, Len, Timeout]) of
- {ok, <<"{truncated}">>} ->
- {error, closed};
- {ok, Data} ->
- {ok, Data};
- Else ->
- Else
- end.
diff --git a/src/couch/test/eunit/couchdb_update_conflicts_tests.erl b/src/couch/test/eunit/couchdb_update_conflicts_tests.erl
deleted file mode 100644
index 1329aba27..000000000
--- a/src/couch/test/eunit/couchdb_update_conflicts_tests.erl
+++ /dev/null
@@ -1,280 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_update_conflicts_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(i2l(I), integer_to_list(I)).
--define(DOC_ID, <<"foobar">>).
--define(LOCAL_DOC_ID, <<"_local/foobar">>).
--define(NUM_CLIENTS, [100, 500, 1000, 2000, 5000, 10000]).
--define(TIMEOUT, 20000).
-
-start() ->
- test_util:start_couch().
-
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX, overwrite]),
- Doc = couch_doc:from_json_obj({[{<<"_id">>, ?DOC_ID},
- {<<"value">>, 0}]}),
- {ok, Rev} = couch_db:update_doc(Db, Doc, []),
- ok = couch_db:close(Db),
- RevStr = couch_doc:rev_to_str(Rev),
- {DbName, RevStr}.
-setup(_) ->
- setup().
-
-teardown({DbName, _}) ->
- ok = couch_server:delete(DbName, []),
- ok.
-teardown(_, {DbName, _RevStr}) ->
- teardown({DbName, _RevStr}).
-
-
-view_indexes_cleanup_test_() ->
- {
- "Update conflicts",
- {
- setup,
- fun start/0, fun test_util:stop_couch/1,
- [
- concurrent_updates(),
- bulk_docs_updates()
- ]
- }
- }.
-
-concurrent_updates()->
- {
- "Concurrent updates",
- {
- foreachx,
- fun setup/1, fun teardown/2,
- [{NumClients, fun should_concurrently_update_doc/2}
- || NumClients <- ?NUM_CLIENTS]
- }
- }.
-
-bulk_docs_updates()->
- {
- "Bulk docs updates",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_bulk_create_delete_doc/1,
- fun should_bulk_create_local_doc/1,
- fun should_ignore_invalid_local_doc/1
- ]
- }
- }.
-
-
-should_concurrently_update_doc(NumClients, {DbName, InitRev})->
- {?i2l(NumClients) ++ " clients",
- {inorder,
- [{"update doc",
- {timeout, ?TIMEOUT div 1000,
- ?_test(concurrent_doc_update(NumClients, DbName, InitRev))}},
- {"ensure in single leaf",
- ?_test(ensure_in_single_revision_leaf(DbName))}]}}.
-
-should_bulk_create_delete_doc({DbName, InitRev})->
- ?_test(bulk_delete_create(DbName, InitRev)).
-
-should_bulk_create_local_doc({DbName, _})->
- ?_test(bulk_create_local_doc(DbName)).
-
-should_ignore_invalid_local_doc({DbName, _})->
- ?_test(ignore_invalid_local_doc(DbName)).
-
-
-concurrent_doc_update(NumClients, DbName, InitRev) ->
- Clients = lists:map(
- fun(Value) ->
- ClientDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?DOC_ID},
- {<<"_rev">>, InitRev},
- {<<"value">>, Value}
- ]}),
- Pid = spawn_client(DbName, ClientDoc),
- {Value, Pid, erlang:monitor(process, Pid)}
- end,
- lists:seq(1, NumClients)),
-
- lists:foreach(fun({_, Pid, _}) -> Pid ! go end, Clients),
-
- {NumConflicts, SavedValue} = lists:foldl(
- fun({Value, Pid, MonRef}, {AccConflicts, AccValue}) ->
- receive
- {'DOWN', MonRef, process, Pid, {ok, _NewRev}} ->
- {AccConflicts, Value};
- {'DOWN', MonRef, process, Pid, conflict} ->
- {AccConflicts + 1, AccValue};
- {'DOWN', MonRef, process, Pid, Error} ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Client " ++ ?i2l(Value)
- ++ " got update error: "
- ++ couch_util:to_list(Error)}]})
- after ?TIMEOUT div 2 ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Timeout waiting for client "
- ++ ?i2l(Value) ++ " to die"}]})
- end
- end, {0, nil}, Clients),
- ?assertEqual(NumClients - 1, NumConflicts),
-
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Leaves} = couch_db:open_doc_revs(Db, ?DOC_ID, all, []),
- ok = couch_db:close(Db),
- ?assertEqual(1, length(Leaves)),
-
- [{ok, Doc2}] = Leaves,
- {JsonDoc} = couch_doc:to_json_obj(Doc2, []),
- ?assertEqual(SavedValue, couch_util:get_value(<<"value">>, JsonDoc)).
-
-ensure_in_single_revision_leaf(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Leaves} = couch_db:open_doc_revs(Db, ?DOC_ID, all, []),
- ok = couch_db:close(Db),
- [{ok, Doc}] = Leaves,
-
- %% FIXME: server restart won't work from test side
- %% stop(ok),
- %% start(),
-
- {ok, Db2} = couch_db:open_int(DbName, []),
- {ok, Leaves2} = couch_db:open_doc_revs(Db2, ?DOC_ID, all, []),
- ok = couch_db:close(Db2),
- ?assertEqual(1, length(Leaves2)),
-
- [{ok, Doc2}] = Leaves,
- ?assertEqual(Doc, Doc2).
-
-bulk_delete_create(DbName, InitRev) ->
- {ok, Db} = couch_db:open_int(DbName, []),
-
- DeletedDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?DOC_ID},
- {<<"_rev">>, InitRev},
- {<<"_deleted">>, true}
- ]}),
- NewDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?DOC_ID},
- {<<"value">>, 666}
- ]}),
-
- {ok, Results} = couch_db:update_docs(Db, [DeletedDoc, NewDoc], []),
- ok = couch_db:close(Db),
-
- ?assertEqual(2, length([ok || {ok, _} <- Results])),
- [{ok, Rev1}, {ok, Rev2}] = Results,
-
- {ok, Db2} = couch_db:open_int(DbName, []),
- {ok, [{ok, Doc1}]} = couch_db:open_doc_revs(
- Db2, ?DOC_ID, [Rev1], [conflicts, deleted_conflicts]),
- {ok, [{ok, Doc2}]} = couch_db:open_doc_revs(
- Db2, ?DOC_ID, [Rev2], [conflicts, deleted_conflicts]),
- ok = couch_db:close(Db2),
-
- {Doc1Props} = couch_doc:to_json_obj(Doc1, []),
- {Doc2Props} = couch_doc:to_json_obj(Doc2, []),
-
- %% Document was deleted
- ?assert(couch_util:get_value(<<"_deleted">>, Doc1Props)),
- %% New document not flagged as deleted
- ?assertEqual(undefined, couch_util:get_value(<<"_deleted">>,
- Doc2Props)),
- %% New leaf revision has the right value
- ?assertEqual(666, couch_util:get_value(<<"value">>,
- Doc2Props)),
- %% Deleted document has no conflicts
- ?assertEqual(undefined, couch_util:get_value(<<"_conflicts">>,
- Doc1Props)),
- %% Deleted document has no deleted conflicts
- ?assertEqual(undefined, couch_util:get_value(<<"_deleted_conflicts">>,
- Doc1Props)),
- %% New leaf revision doesn't have conflicts
- ?assertEqual(undefined, couch_util:get_value(<<"_conflicts">>,
- Doc1Props)),
- %% New leaf revision doesn't have deleted conflicts
- ?assertEqual(undefined, couch_util:get_value(<<"_deleted_conflicts">>,
- Doc1Props)),
-
- %% Deleted revision has position 2
- ?assertEqual(2, element(1, Rev1)),
- %% New leaf revision has position 3
- ?assertEqual(3, element(1, Rev2)).
-
-
-bulk_create_local_doc(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
-
- LocalDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?LOCAL_DOC_ID},
- {<<"_rev">>, <<"0-1">>}
- ]}),
-
- {ok, Results} = couch_db:update_docs(Db, [LocalDoc],
- [], replicated_changes),
- ok = couch_db:close(Db),
- ?assertEqual([], Results),
-
- {ok, Db2} = couch_db:open_int(DbName, []),
- {ok, LocalDoc1} = couch_db:open_doc_int(Db2, ?LOCAL_DOC_ID, []),
- ok = couch_db:close(Db2),
- ?assertEqual(?LOCAL_DOC_ID, LocalDoc1#doc.id),
- ?assertEqual({0, [<<"2">>]}, LocalDoc1#doc.revs).
-
-
-ignore_invalid_local_doc(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
-
- LocalDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?LOCAL_DOC_ID},
- {<<"_rev">>, <<"0-abcdef">>}
- ]}),
-
- {ok, Results} = couch_db:update_docs(Db, [LocalDoc],
- [], replicated_changes),
- ok = couch_db:close(Db),
- ?assertEqual([], Results),
-
- {ok, Db2} = couch_db:open_int(DbName, []),
- Result2 = couch_db:open_doc_int(Db2, ?LOCAL_DOC_ID, []),
- ok = couch_db:close(Db2),
- ?assertEqual({not_found, missing}, Result2).
-
-
-spawn_client(DbName, Doc) ->
- spawn(fun() ->
- {ok, Db} = couch_db:open_int(DbName, []),
- receive
- go -> ok
- end,
- erlang:yield(),
- Result = try
- couch_db:update_doc(Db, Doc, [])
- catch _:Error ->
- Error
- end,
- ok = couch_db:close(Db),
- exit(Result)
- end).
diff --git a/src/couch/test/eunit/couchdb_vhosts_tests.erl b/src/couch/test/eunit/couchdb_vhosts_tests.erl
deleted file mode 100644
index fbe5579cd..000000000
--- a/src/couch/test/eunit/couchdb_vhosts_tests.erl
+++ /dev/null
@@ -1,271 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_vhosts_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
--define(iofmt(S, A), lists:flatten(io_lib:format(S, A))).
-
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 666}
- ]}),
-
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/doc1">>},
- {<<"shows">>, {[
- {<<"test">>, <<"function(doc, req) {
- return { json: {
- requested_path: '/' + req.requested_path.join('/'),
- path: '/' + req.path.join('/')}};}">>}
- ]}},
- {<<"rewrites">>, [
- {[
- {<<"from">>, <<"/">>},
- {<<"to">>, <<"_show/test">>}
- ]}
- ]}
- ]}),
- {ok, _} = couch_db:update_docs(Db, [Doc, Doc1]),
- couch_db:close(Db),
-
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- Url = "http://" ++ Addr ++ ":" ++ Port,
- {Url, ?b2l(DbName)}.
-
-teardown({_, DbName}) ->
- ok = couch_server:delete(?l2b(DbName), []),
- ok.
-
-
-vhosts_test_() ->
- {
- "Virtual Hosts rewrite tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_return_database_info/1,
- fun should_return_revs_info/1,
- fun should_return_virtual_request_path_field_in_request/1,
- fun should_return_real_request_path_field_in_request/1,
- fun should_match_wildcard_vhost/1,
- fun should_return_db_info_for_wildcard_vhost_for_custom_db/1,
- fun should_replace_rewrite_variables_for_db_and_doc/1,
- fun should_return_db_info_for_vhost_with_resource/1,
- fun should_return_revs_info_for_vhost_with_resource/1,
- fun should_return_db_info_for_vhost_with_wildcard_resource/1,
- fun should_return_path_for_vhost_with_wildcard_host/1
- ]
- }
- }
- }.
-
-should_return_database_info({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "example.com", "/" ++ DbName, false),
- case test_request:get(Url, [], [{host_header, "example.com"}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_return_revs_info({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "example.com", "/" ++ DbName, false),
- case test_request:get(Url ++ "/doc1?revs_info=true", [],
- [{host_header, "example.com"}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"_revs_info">>, JsonBody));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_return_virtual_request_path_field_in_request({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "example1.com",
- "/" ++ DbName ++ "/_design/doc1/_rewrite/",
- false),
- case test_request:get(Url, [], [{host_header, "example1.com"}]) of
- {ok, _, _, Body} ->
- {Json} = jiffy:decode(Body),
- ?assertEqual(<<"/">>,
- proplists:get_value(<<"requested_path">>, Json));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_return_real_request_path_field_in_request({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "example1.com",
- "/" ++ DbName ++ "/_design/doc1/_rewrite/",
- false),
- case test_request:get(Url, [], [{host_header, "example1.com"}]) of
- {ok, _, _, Body} ->
- {Json} = jiffy:decode(Body),
- Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
- ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_match_wildcard_vhost({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "*.example.com",
- "/" ++ DbName ++ "/_design/doc1/_rewrite", false),
- case test_request:get(Url, [], [{host_header, "test.example.com"}]) of
- {ok, _, _, Body} ->
- {Json} = jiffy:decode(Body),
- Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
- ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_return_db_info_for_wildcard_vhost_for_custom_db({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", ":dbname.example1.com",
- "/:dbname", false),
- Host = DbName ++ ".example1.com",
- case test_request:get(Url, [], [{host_header, Host}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_replace_rewrite_variables_for_db_and_doc({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts",":appname.:dbname.example1.com",
- "/:dbname/_design/:appname/_rewrite/", false),
- Host = "doc1." ++ DbName ++ ".example1.com",
- case test_request:get(Url, [], [{host_header, Host}]) of
- {ok, _, _, Body} ->
- {Json} = jiffy:decode(Body),
- Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
- ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_return_db_info_for_vhost_with_resource({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts",
- "example.com/test", "/" ++ DbName, false),
- ReqUrl = Url ++ "/test",
- case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-
-should_return_revs_info_for_vhost_with_resource({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts",
- "example.com/test", "/" ++ DbName, false),
- ReqUrl = Url ++ "/test/doc1?revs_info=true",
- case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"_revs_info">>, JsonBody));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_return_db_info_for_vhost_with_wildcard_resource({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "*.example2.com/test", "/*", false),
- ReqUrl = Url ++ "/test",
- Host = DbName ++ ".example2.com",
- case test_request:get(ReqUrl, [], [{host_header, Host}]) of
- {ok, _, _, Body} ->
- {JsonBody} = jiffy:decode(Body),
- ?assert(proplists:is_defined(<<"db_name">>, JsonBody));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
-should_return_path_for_vhost_with_wildcard_host({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "*/test1",
- "/" ++ DbName ++ "/_design/doc1/_show/test",
- false),
- case test_request:get(Url ++ "/test1") of
- {ok, _, _, Body} ->
- {Json} = jiffy:decode(Body),
- Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
- ?assertEqual(Path, proplists:get_value(<<"path">>, Json));
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
diff --git a/src/couch/test/eunit/couchdb_views_tests.erl b/src/couch/test/eunit/couchdb_views_tests.erl
deleted file mode 100644
index 06e2f03eb..000000000
--- a/src/couch/test/eunit/couchdb_views_tests.erl
+++ /dev/null
@@ -1,668 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_views_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--define(DELAY, 100).
--define(TIMEOUT, 1000).
--define(WAIT_DELAY_COUNT, 40).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- FooRev = create_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
- query_view(DbName, "foo", "bar"),
- BooRev = create_design_doc(DbName, <<"_design/boo">>, <<"baz">>),
- query_view(DbName, "boo", "baz"),
- {DbName, {FooRev, BooRev}}.
-
-setup_with_docs() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- create_docs(DbName),
- create_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
- DbName.
-
-setup_legacy() ->
- DbName = <<"test">>,
- DbFileName = "test.couch",
- OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]),
- OldViewName = "6cf2c2f766f87b618edf6630b00f8736.view",
- FixtureViewFilePath = filename:join([?FIXTURESDIR, OldViewName]),
- NewViewName = "a1c5929f912aca32f13446122cc6ce50.view",
-
- DbDir = config:get("couchdb", "database_dir"),
- ViewDir = config:get("couchdb", "view_index_dir"),
- OldViewFilePath = filename:join([ViewDir, ".test_design", "mrview",
- OldViewName]),
- NewViewFilePath = filename:join([ViewDir, ".test_design", "mrview",
- NewViewName]),
-
- NewDbFilePath = filename:join([DbDir, DbFileName]),
-
- Files = [NewDbFilePath, OldViewFilePath, NewViewFilePath],
-
- %% make sure there is no left over
- lists:foreach(fun(File) -> file:delete(File) end, Files),
-
- % copy old db file into db dir
- {ok, _} = file:copy(OldDbFilePath, NewDbFilePath),
-
- % copy old view file into view dir
- ok = filelib:ensure_dir(OldViewFilePath),
-
- {ok, _} = file:copy(FixtureViewFilePath, OldViewFilePath),
-
- {DbName, Files}.
-
-teardown({DbName, _}) ->
- teardown(DbName);
-teardown(DbName) when is_binary(DbName) ->
- couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-teardown_legacy({_DbName, Files}) ->
- lists:foreach(fun(File) -> file:delete(File) end, Files).
-
-view_indexes_cleanup_test_() ->
- {
- "View indexes cleanup",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_have_two_indexes_alive_before_deletion/1,
- fun should_cleanup_index_file_after_ddoc_deletion/1,
- fun should_cleanup_all_index_files/1
- ]
- }
- }
- }.
-
-view_group_db_leaks_test_() ->
- {
- "View group db leaks",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup_with_docs/0, fun teardown/1,
- [
- fun couchdb_1138/1,
- fun couchdb_1309/1
- ]
- }
- }
- }.
-
-view_group_shutdown_test_() ->
- {
- "View group shutdown",
- {
- setup,
- fun() ->
- meck:new(couch_mrview_index, [passthrough]),
- test_util:start_couch()
- end,
- fun(Ctx) ->
- test_util:stop_couch(Ctx),
- meck:unload()
- end,
- [couchdb_1283()]
- }
- }.
-
-backup_restore_test_() ->
- {
- "Upgrade and bugs related tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup_with_docs/0, fun teardown/1,
- [
- fun should_not_remember_docs_in_index_after_backup_restore/1
- ]
- }
- }
- }.
-
-
-upgrade_test_() ->
- {
- "Upgrade tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup_legacy/0, fun teardown_legacy/1,
- [
- fun should_upgrade_legacy_view_files/1
- ]
- }
- }
- }.
-
-should_not_remember_docs_in_index_after_backup_restore(DbName) ->
- ?_test(begin
- %% COUCHDB-640
-
- ok = backup_db_file(DbName),
- create_doc(DbName, "doc666"),
-
- Rows0 = query_view(DbName, "foo", "bar"),
- ?assert(has_doc("doc1", Rows0)),
- ?assert(has_doc("doc2", Rows0)),
- ?assert(has_doc("doc3", Rows0)),
- ?assert(has_doc("doc666", Rows0)),
-
- ?assertEqual(ok, restore_backup_db_file(DbName)),
-
- Rows1 = query_view(DbName, "foo", "bar"),
- ?assert(has_doc("doc1", Rows1)),
- ?assert(has_doc("doc2", Rows1)),
- ?assert(has_doc("doc3", Rows1)),
- ?assertNot(has_doc("doc666", Rows1))
- end).
-
-should_upgrade_legacy_view_files({DbName, Files}) ->
- ?_test(begin
- [_NewDbFilePath, OldViewFilePath, NewViewFilePath] = Files,
- ok = config:set("query_server_config", "commit_freq", "0", false),
-
- % ensure old header
- OldHeader = read_header(OldViewFilePath),
- ?assertEqual(6, tuple_size(OldHeader)),
- ?assertMatch(mrheader, element(1, OldHeader)),
-
- % query view for expected results
- Rows0 = query_view(DbName, "test", "test"),
- ?assertEqual(3, length(Rows0)),
-
- % ensure old file gone
- ?assertNot(filelib:is_regular(OldViewFilePath)),
-
- % add doc to trigger update
- DocUrl = db_url(DbName) ++ "/bar",
- {ok, _, _, _} = test_request:put(
- DocUrl, [{"Content-Type", "application/json"}], <<"{\"a\":4}">>),
-
- % query view for expected results
- Rows1 = query_view(DbName, "test", "test"),
- ?assertEqual(4, length(Rows1)),
-
- % ensure new header
- timer:sleep(2000), % have to wait for awhile to upgrade the index
- NewHeader = read_header(NewViewFilePath),
- ?assertMatch(#mrheader{}, NewHeader),
- NewViewStatus = hd(NewHeader#mrheader.view_states),
- ?assertEqual(3, tuple_size(NewViewStatus))
- end).
-
-
-should_have_two_indexes_alive_before_deletion({DbName, _}) ->
- view_cleanup(DbName),
- ?_assertEqual(2, count_index_files(DbName)).
-
-should_cleanup_index_file_after_ddoc_deletion({DbName, {FooRev, _}}) ->
- delete_design_doc(DbName, <<"_design/foo">>, FooRev),
- view_cleanup(DbName),
- ?_assertEqual(1, count_index_files(DbName)).
-
-should_cleanup_all_index_files({DbName, {FooRev, BooRev}})->
- delete_design_doc(DbName, <<"_design/foo">>, FooRev),
- delete_design_doc(DbName, <<"_design/boo">>, BooRev),
- view_cleanup(DbName),
- ?_assertEqual(0, count_index_files(DbName)).
-
-couchdb_1138(DbName) ->
- ?_test(begin
- {ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>),
- ?assert(is_pid(IndexerPid)),
- ?assert(is_process_alive(IndexerPid)),
- ?assertEqual(2, count_users(DbName)),
-
- wait_indexer(IndexerPid),
-
- Rows0 = query_view(DbName, "foo", "bar"),
- ?assertEqual(3, length(Rows0)),
- ?assertEqual(2, count_users(DbName)),
- ?assert(is_process_alive(IndexerPid)),
-
- create_doc(DbName, "doc1000"),
- Rows1 = query_view(DbName, "foo", "bar"),
- ?assertEqual(4, length(Rows1)),
- ?assertEqual(2, count_users(DbName)),
-
- ?assert(is_process_alive(IndexerPid)),
-
- compact_db(DbName),
- ?assert(is_process_alive(IndexerPid)),
-
- compact_view_group(DbName, "foo"),
- ?assertEqual(2, count_users(DbName)),
-
- ?assert(is_process_alive(IndexerPid)),
-
- create_doc(DbName, "doc1001"),
- Rows2 = query_view(DbName, "foo", "bar"),
- ?assertEqual(5, length(Rows2)),
- ?assertEqual(2, count_users(DbName)),
-
- ?assert(is_process_alive(IndexerPid))
- end).
-
-couchdb_1309(DbName) ->
- ?_test(begin
- {ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>),
- ?assert(is_pid(IndexerPid)),
- ?assert(is_process_alive(IndexerPid)),
- ?assertEqual(2, count_users(DbName)),
-
- wait_indexer(IndexerPid),
-
- create_doc(DbName, "doc1001"),
- Rows0 = query_view(DbName, "foo", "bar"),
- check_rows_value(Rows0, null),
- ?assertEqual(4, length(Rows0)),
- ?assertEqual(2, count_users(DbName)),
-
- ?assert(is_process_alive(IndexerPid)),
-
- update_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
- {ok, NewIndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>),
- ?assert(is_pid(NewIndexerPid)),
- ?assert(is_process_alive(NewIndexerPid)),
- ?assertNotEqual(IndexerPid, NewIndexerPid),
- UserCnt = case count_users(DbName) of
- N when N > 2 ->
- timer:sleep(1000),
- count_users(DbName);
- N -> N
- end,
- ?assertEqual(2, UserCnt),
-
- Rows1 = query_view(DbName, "foo", "bar", ok),
- ?assertEqual(0, length(Rows1)),
- Rows2 = query_view(DbName, "foo", "bar"),
- check_rows_value(Rows2, 1),
- ?assertEqual(4, length(Rows2)),
-
- ok = stop_indexer( %% FIXME we need to grab monitor earlier
- fun() -> ok end,
- IndexerPid, ?LINE,
- "old view group is not dead after ddoc update"),
-
- ok = stop_indexer(
- fun() -> couch_server:delete(DbName, [?ADMIN_USER]) end,
- NewIndexerPid, ?LINE,
- "new view group did not die after DB deletion")
- end).
-
-couchdb_1283() ->
- ?_test(begin
- ok = config:set("couchdb", "max_dbs_open", "3", false),
-
- {ok, MDb1} = couch_db:create(?tempdb(), [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/foo">>},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"foo">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}},
- {<<"foo2">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}},
- {<<"foo3">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}},
- {<<"foo4">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}},
- {<<"foo5">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}}
- ]}}
- ]}),
- {ok, _} = couch_db:update_doc(MDb1, DDoc, []),
- ok = populate_db(MDb1, 100, 100),
- query_view(couch_db:name(MDb1), "foo", "foo"),
- ok = couch_db:close(MDb1),
-
- {ok, Pid} = couch_index_server:get_index(
- couch_mrview_index, couch_db:name(MDb1), <<"_design/foo">>),
-
- % Start and pause compacton
- WaitRef = erlang:make_ref(),
- meck:expect(couch_mrview_index, compact, fun(Db, State, Opts) ->
- receive {WaitRef, From, init} -> ok end,
- From ! {WaitRef, inited},
- receive {WaitRef, go} -> ok end,
- meck:passthrough([Db, State, Opts])
- end),
-
- {ok, CPid} = gen_server:call(Pid, compact),
- CRef = erlang:monitor(process, CPid),
- ?assert(is_process_alive(CPid)),
-
- % Make sure that our compactor is waiting for us
- % before we continue our assertions
- CPid ! {WaitRef, self(), init},
- receive {WaitRef, inited} -> ok end,
-
- % Make sure that a compaction process takes a monitor
- % on the database's main_pid
- ?assertEqual(true, lists:member(CPid, couch_db:monitored_by(MDb1))),
-
- % Finish compaction to and make sure the monitor
- % disappears
- CPid ! {WaitRef, go},
- wait_for_process_shutdown(CRef, normal,
- {reason, "Failure compacting view group"}),
-
- % Make sure that the monitor was removed
- ?assertEqual(false, lists:member(CPid, couch_db:monitored_by(MDb1)))
- end).
-
-wait_for_process_shutdown(Pid, ExpectedReason, Error) ->
- receive
- {'DOWN', Pid, process, _, Reason} ->
- ?assertEqual(ExpectedReason, Reason)
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, ?LINE}, Error]})
- end.
-
-
-create_doc(DbName, DocId) when is_list(DocId) ->
- create_doc(DbName, ?l2b(DocId));
-create_doc(DbName, DocId) when is_binary(DocId) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc666 = couch_doc:from_json_obj({[
- {<<"_id">>, DocId},
- {<<"value">>, 999}
- ]}),
- {ok, _} = couch_db:update_docs(Db, [Doc666]),
- couch_db:close(Db).
-
-create_docs(DbName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 1}
-
- ]}),
- Doc2 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc2">>},
- {<<"value">>, 2}
-
- ]}),
- Doc3 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc3">>},
- {<<"value">>, 3}
-
- ]}),
- {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]),
- couch_db:close(Db).
-
-populate_db(Db, BatchSize, N) when N > 0 ->
- Docs = lists:map(
- fun(_) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, couch_uuids:new()},
- {<<"value">>, base64:encode(crypto:strong_rand_bytes(1000))}
- ]})
- end,
- lists:seq(1, BatchSize)),
- {ok, _} = couch_db:update_docs(Db, Docs, []),
- populate_db(Db, BatchSize, N - length(Docs));
-populate_db(_Db, _, _) ->
- ok.
-
-create_design_doc(DbName, DDName, ViewName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {ViewName, {[
- {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>}
- ]}}
- ]}}
- ]}),
- {ok, Rev} = couch_db:update_doc(Db, DDoc, []),
- couch_db:close(Db),
- Rev.
-
-update_design_doc(DbName, DDName, ViewName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- {ok, Doc} = couch_db:open_doc(Db, DDName, [?ADMIN_CTX]),
- {Props} = couch_doc:to_json_obj(Doc, []),
- Rev = couch_util:get_value(<<"_rev">>, Props),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"_rev">>, Rev},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {ViewName, {[
- {<<"map">>, <<"function(doc) { emit(doc.value, 1); }">>}
- ]}}
- ]}}
- ]}),
- {ok, NewRev} = couch_db:update_doc(Db, DDoc, [?ADMIN_CTX]),
- couch_db:close(Db),
- NewRev.
-
-delete_design_doc(DbName, DDName, Rev) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"_rev">>, couch_doc:rev_to_str(Rev)},
- {<<"_deleted">>, true}
- ]}),
- {ok, _} = couch_db:update_doc(Db, DDoc, [Rev]),
- couch_db:close(Db).
-
-db_url(DbName) ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- "http://" ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(DbName).
-
-query_view(DbName, DDoc, View) ->
- query_view(DbName, DDoc, View, false).
-
-query_view(DbName, DDoc, View, Stale) ->
- {ok, Code, _Headers, Body} = test_request:get(
- db_url(DbName) ++ "/_design/" ++ DDoc ++ "/_view/" ++ View
- ++ case Stale of
- false -> [];
- _ -> "?stale=" ++ atom_to_list(Stale)
- end),
- ?assertEqual(200, Code),
- {Props} = jiffy:decode(Body),
- couch_util:get_value(<<"rows">>, Props, []).
-
-check_rows_value(Rows, Value) ->
- lists:foreach(
- fun({Row}) ->
- ?assertEqual(Value, couch_util:get_value(<<"value">>, Row))
- end, Rows).
-
-view_cleanup(DbName) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- couch_mrview:cleanup(Db),
- couch_db:close(Db).
-
-count_users(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- DbPid = couch_db:get_pid(Db),
- {monitored_by, Monitors0} = process_info(DbPid, monitored_by),
- Monitors = lists:filter(fun is_pid/1, Monitors0),
- CouchFiles = [P || P <- Monitors, couch_file:process_info(P) =/= undefined],
- ok = couch_db:close(Db),
- length(lists:usort(Monitors) -- [self() | CouchFiles]).
-
-count_index_files(DbName) ->
- % call server to fetch the index files
- RootDir = config:get("couchdb", "view_index_dir"),
- length(filelib:wildcard(RootDir ++ "/." ++
- binary_to_list(DbName) ++ "_design"++"/mrview/*")).
-
-has_doc(DocId1, Rows) ->
- DocId = iolist_to_binary(DocId1),
- lists:any(fun({R}) -> lists:member({<<"id">>, DocId}, R) end, Rows).
-
-backup_db_file(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- try
- SrcPath = couch_db:get_filepath(Db),
- Src = if
- is_list(SrcPath) -> SrcPath;
- true -> binary_to_list(SrcPath)
- end,
- ok = copy_tree(Src, Src ++ ".backup")
- after
- couch_db:close(Db)
- end.
-
-restore_backup_db_file(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- Src = couch_db:get_filepath(Db),
- ok = couch_db:close(Db),
- DbPid = couch_db:get_pid(Db),
- exit(DbPid, shutdown),
- ok = copy_tree(Src ++ ".backup", Src),
-
- test_util:wait(fun() ->
- case couch_server:open(DbName, [{timeout, ?TIMEOUT}]) of
- {ok, WaitDb} ->
- case couch_db:get_pid(WaitDb) == DbPid of
- true -> wait;
- false -> ok
- end;
- Else ->
- Else
- end
- end, ?TIMEOUT, ?DELAY).
-
-compact_db(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, _} = couch_db:start_compact(Db),
- ok = couch_db:close(Db),
- wait_db_compact_done(DbName, ?WAIT_DELAY_COUNT).
-
-wait_db_compact_done(_DbName, 0) ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "DB compaction failed to finish"}]});
-wait_db_compact_done(DbName, N) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- ok = couch_db:close(Db),
- CompactorPid = couch_db:get_compactor_pid(Db),
- case is_pid(CompactorPid) of
- false ->
- ok;
- true ->
- ok = timer:sleep(?DELAY),
- wait_db_compact_done(DbName, N - 1)
- end.
-
-compact_view_group(DbName, DDocId) when is_list(DDocId) ->
- compact_view_group(DbName, ?l2b("_design/" ++ DDocId));
-compact_view_group(DbName, DDocId) when is_binary(DDocId) ->
- ok = couch_mrview:compact(DbName, DDocId),
- wait_view_compact_done(DbName, DDocId, 10).
-
-wait_view_compact_done(_DbName, _DDocId, 0) ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "DB compaction failed to finish"}]});
-wait_view_compact_done(DbName, DDocId, N) ->
- {ok, Code, _Headers, Body} = test_request:get(
- db_url(DbName) ++ "/" ++ ?b2l(DDocId) ++ "/_info"),
- ?assertEqual(200, Code),
- {Info} = jiffy:decode(Body),
- {IndexInfo} = couch_util:get_value(<<"view_index">>, Info),
- CompactRunning = couch_util:get_value(<<"compact_running">>, IndexInfo),
- case CompactRunning of
- false ->
- ok;
- true ->
- ok = timer:sleep(?DELAY),
- wait_view_compact_done(DbName, DDocId, N - 1)
- end.
-
-read_header(File) ->
- {ok, Fd} = couch_file:open(File),
- {ok, {_Sig, Header}} = couch_file:read_header(Fd),
- couch_file:close(Fd),
- Header.
-
-stop_indexer(StopFun, Pid, Line, Reason) ->
- case test_util:stop_sync(Pid, StopFun) of
- timeout ->
- erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, Line},
- {reason, Reason}]});
- ok ->
- ok
- end.
-
-wait_indexer(IndexerPid) ->
- test_util:wait(fun() ->
- {ok, Info} = couch_index:get_info(IndexerPid),
- case couch_util:get_value(compact_running, Info) of
- true ->
- wait;
- false ->
- ok
- end
- end).
-
-copy_tree(Src, Dst) ->
- case filelib:is_dir(Src) of
- true ->
- {ok, Files} = file:list_dir(Src),
- copy_tree(Files, Src, Dst);
- false ->
- ok = filelib:ensure_dir(Dst),
- {ok, _} = file:copy(Src, Dst),
- ok
- end.
-
-copy_tree([], _Src, _Dst) ->
- ok;
-copy_tree([File | Rest], Src, Dst) ->
- FullSrc = filename:join(Src, File),
- FullDst = filename:join(Dst, File),
- ok = copy_tree(FullSrc, FullDst),
- copy_tree(Rest, Src, Dst).
diff --git a/src/couch/test/eunit/fixtures/os_daemon_configer.escript b/src/couch/test/eunit/fixtures/os_daemon_configer.escript
index f146b8314..16ec489e9 100755
--- a/src/couch/test/eunit/fixtures/os_daemon_configer.escript
+++ b/src/couch/test/eunit/fixtures/os_daemon_configer.escript
@@ -89,8 +89,7 @@ init_code_path() ->
"couchdb",
"jiffy",
"ibrowse",
- "mochiweb",
- "snappy"
+ "mochiweb"
],
lists:foreach(fun(Name) ->
code:add_patha(filename:join([?BUILDDIR(), "src", Name, "ebin"]))
diff --git a/src/couch/test/eunit/global_changes_tests.erl b/src/couch/test/eunit/global_changes_tests.erl
deleted file mode 100644
index 4392aafac..000000000
--- a/src/couch/test/eunit/global_changes_tests.erl
+++ /dev/null
@@ -1,159 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
-
-setup() ->
- Host = get_host(),
- ok = add_admin(?USER, ?PASS),
- DbName = "foo/" ++ ?b2l(?tempdb()),
- ok = http_create_db(DbName),
- {Host, DbName}.
-
-teardown({_, DbName}) ->
- ok = http_delete_db(DbName),
- delete_admin(?USER),
- ok.
-
-http_create_db(Name) ->
- {ok, Status, _, _} = test_request:put(db_url(Name), [?AUTH], ""),
- true = lists:member(Status, [201, 202]),
- ok.
-
-http_delete_db(Name) ->
- {ok, Status, _, _} = test_request:delete(db_url(Name), [?AUTH]),
- true = lists:member(Status, [200, 202]),
- ok.
-
-db_url(Name) ->
- get_host() ++ "/" ++ escape(Name).
-
-start_couch() ->
- Ctx = test_util:start_couch([chttpd, global_changes]),
- ok = ensure_db_exists("_global_changes"),
- Ctx.
-
-ensure_db_exists(Name) ->
- case fabric:create_db(Name) of
- ok ->
- ok;
- {error, file_exists} ->
- ok
- end.
-
-global_changes_test_() ->
- {
- "Checking global_changes endpoint",
- {
- setup,
- fun start_couch/0,
- fun test_util:stop/1,
- [
- check_response()
- ]
- }
- }.
-
-check_response() ->
- {
- "Check response",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_return_correct_response_on_create/1,
- fun should_return_correct_response_on_update/1
- ]
- }
- }.
-
-should_return_correct_response_on_create({Host, DbName}) ->
- ?_test(begin
- Headers = [?AUTH],
- create_doc(Host, DbName, "bar/baz"),
- {Status, Events} = request_updates(Host, DbName, Headers),
- ?assertEqual(200, Status),
- ?assertEqual([<<"created">>, <<"updated">>], Events)
- end).
-
-should_return_correct_response_on_update({Host, DbName}) ->
- ?_test(begin
- Headers = [?AUTH],
- create_doc(Host, DbName, "bar/baz"),
- update_doc(Host, DbName, "bar/baz", "new_value"),
- {Status, Events} = request_updates(Host, DbName, Headers),
- ?assertEqual(200, Status),
- ?assertEqual([<<"created">>, <<"updated">>], Events)
- end).
-
-create_doc(Host, DbName, Id) ->
- Headers = [?AUTH],
- Url = Host ++ "/" ++ escape(DbName) ++ "/" ++ escape(Id),
- Body = jiffy:encode({[
- {key, "value"}
- ]}),
- {ok, Status, _Headers, _Body} = test_request:put(Url, Headers, Body),
- ?assert(Status =:= 201 orelse Status =:= 202),
- timer:sleep(1000),
- ok.
-
-update_doc(Host, DbName, Id, Value) ->
- Headers = [?AUTH],
- Url = Host ++ "/" ++ escape(DbName) ++ "/" ++ escape(Id),
- {ok, 200, _Headers0, BinBody} = test_request:get(Url, Headers),
- [Rev] = decode_response(BinBody, [<<"_rev">>]),
- Body = jiffy:encode({[
- {key, Value},
- {'_rev', Rev}
- ]}),
- {ok, Status, _Headers1, _Body} = test_request:put(Url, Headers, Body),
- ?assert(Status =:= 201 orelse Status =:= 202),
- timer:sleep(1000),
- ok.
-
-request_updates(Host, DbName, Headers) ->
- Url = Host ++ "/_db_updates",
- {ok, Status, _Headers, BinBody} = test_request:get(Url, Headers),
- [Results] = decode_response(BinBody, [<<"results">>]),
- ToDecode = [<<"db_name">>, <<"type">>],
- Values = [decode_result(Result, ToDecode) || Result <- Results],
- Result = [Type || [DB, Type] <- Values, DB == ?l2b(DbName)],
- {Status, lists:sort(Result)}.
-
-decode_result({Props}, ToDecode) ->
- [couch_util:get_value(Key, Props) || Key <- ToDecode].
-
-decode_response(BinBody, ToDecode) ->
- {Body} = jiffy:decode(BinBody),
- [couch_util:get_value(Key, Body) || Key <- ToDecode].
-
-add_admin(User, Pass) ->
- Hashed = couch_passwords:hash_admin_password(Pass),
- config:set("admins", User, ?b2l(Hashed), _Persist=false).
-
-delete_admin(User) ->
- config:delete("admins", User, false).
-
-get_host() ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
- "http://" ++ Addr ++ ":" ++ Port.
-
-escape(Path) ->
- re:replace(Path, "/", "%2f", [global, {return, list}]).
diff --git a/src/couch/test/exunit/couch_compress_tests.exs b/src/couch/test/exunit/couch_compress_tests.exs
deleted file mode 100644
index 5550e0661..000000000
--- a/src/couch/test/exunit/couch_compress_tests.exs
+++ /dev/null
@@ -1,113 +0,0 @@
-defmodule Couch.Test.CouchCompress do
- use Couch.Test.ExUnit.Case
- alias Couch.Test.Utils
-
- import Utils
-
- @term {[{:a, 1}, {:b, 2}, {:c, 3}, {:d, 4}, {:e, 5}]}
-
- @none <<131, 104, 1, 108, 0, 0, 0, 5, 104, 2, 100, 0, 1, 97, 97, 1, 104, 2, 100, 0, 1,
- 98, 97, 2, 104, 2, 100, 0, 1, 99, 97, 3, 104, 2, 100, 0, 1, 100, 97, 4, 104, 2,
- 100, 0, 1, 101, 97, 5, 106>>
-
- @deflate <<131, 80, 0, 0, 0, 48, 120, 218, 203, 96, 204, 97, 96, 96, 96, 205, 96, 74,
- 97, 96, 76, 76, 100, 4, 211, 73, 137, 76, 96, 58, 57, 145, 25, 76, 167, 36,
- 178, 128, 233, 212, 68, 214, 44, 0, 212, 169, 9, 51>>
-
- @snappy <<1, 49, 64, 131, 104, 1, 108, 0, 0, 0, 5, 104, 2, 100, 0, 1, 97, 97, 1, 104, 1,
- 8, 8, 98, 97, 2, 5, 8, 8, 99, 97, 3, 5, 8, 44, 100, 97, 4, 104, 2, 100, 0, 1,
- 101, 97, 5, 106>>
-
- @snappy_bigendian <<1, 49, 60, 131, 104, 1, 108, 0, 0, 0, 5, 104, 2, 100, 0, 1, 97, 97,
- 1, 5, 8, 8, 98, 97, 2, 5, 8, 8, 99, 97, 3, 5, 8, 44, 100, 97, 4,
- 104, 2, 100, 0, 1, 101, 97, 5, 106>>
-
- @corrupt <<2, 12, 85, 06>>
-
- describe "couch_compress" do
- test "compress" do
- assert @none === :couch_compress.compress(@term, :none)
- assert @none !== :couch_compress.compress(@term, {:deflate, 9})
- assert @none !== :couch_compress.compress(@term, :snappy)
-
- # assert that compressed output is smaller than uncompressed input
- assert bit_size(:couch_compress.compress(@term, {:deflate, 9})) < bit_size(@none)
- assert bit_size(:couch_compress.compress(@term, :snappy)) < bit_size(@none)
- end
-
- test "decompress" do
- assert @term === :couch_compress.decompress(@none)
- assert @term === :couch_compress.decompress(@deflate)
- assert @term === :couch_compress.decompress(@snappy)
- assert @term === :couch_compress.decompress(@snappy_bigendian)
- assert catch_error(:couch_compress.decompress(@corrupt)) == :invalid_compression
- end
-
- test "recompress" do
- res = @none
-
- # none -> deflate
- res = :couch_compress.compress(res, {:deflate, 9})
- assert :couch_compress.is_compressed(res, {:deflate, 9})
-
- # deflate -> snappy
- res = :couch_compress.compress(res, :snappy)
- assert :couch_compress.is_compressed(res, :snappy)
-
- # snappy -> none
- res = :couch_compress.compress(res, :none)
- assert :couch_compress.is_compressed(res, :none)
-
- # none -> snappy
- res = :couch_compress.compress(res, :snappy)
- assert :couch_compress.is_compressed(res, :snappy)
-
- # snappy -> deflate
- res = :couch_compress.compress(res, {:deflate, 9})
- assert :couch_compress.is_compressed(res, {:deflate, 9})
-
- # deflate -> none
- res = :couch_compress.compress(res, :none)
- assert :couch_compress.is_compressed(res, :none)
- end
-
- test "is_compressed" do
- assert :couch_compress.is_compressed(@none, :none)
- assert :couch_compress.is_compressed(@deflate, {:deflate, 9})
- assert :couch_compress.is_compressed(@snappy, :snappy)
- assert :couch_compress.is_compressed(@snappy_bigendian, :snappy)
- refute :couch_compress.is_compressed(@none, {:deflate, 0})
- refute :couch_compress.is_compressed(@none, {:deflate, 9})
- refute :couch_compress.is_compressed(@none, :snappy)
- refute :couch_compress.is_compressed(@deflate, :none)
- refute :couch_compress.is_compressed(@deflate, :snappy)
- refute :couch_compress.is_compressed(@snappy, :none)
- refute :couch_compress.is_compressed(@snappy, {:deflate, 9})
- refute :couch_compress.is_compressed(@snappy_bigendian, :none)
- refute :couch_compress.is_compressed(@snappy_bigendian, {:deflate, 9})
-
- assert catch_error(:couch_compress.is_compressed(@corrupt, :none)) ==
- :invalid_compression
-
- assert catch_error(:couch_compress.is_compressed(@corrupt, {:deflate, 9})) ==
- :invalid_compression
-
- assert catch_error(:couch_compress.is_compressed(@corrupt, :snappy)) ==
- :invalid_compression
- end
-
- test "uncompressed_size" do
- assert :couch_compress.uncompressed_size(@none) === 49
- assert :couch_compress.uncompressed_size(@deflate) === 49
- assert :couch_compress.uncompressed_size(@snappy) === 49
- assert :couch_compress.uncompressed_size(@snappy_bigendian) === 49
-
- assert :couch_compress.uncompressed_size(
- :couch_compress.compress(:x, {:deflate, 9})
- ) === 5
-
- assert catch_error(:couch_compress.uncompressed_size(@corrupt)) ==
- :invalid_compression
- end
- end
-end
diff --git a/src/couch/test/exunit/fabric_test.exs b/src/couch/test/exunit/fabric_test.exs
deleted file mode 100644
index bdb84e9a2..000000000
--- a/src/couch/test/exunit/fabric_test.exs
+++ /dev/null
@@ -1,101 +0,0 @@
-defmodule Couch.Test.Fabric do
- use Couch.Test.ExUnit.Case
- alias Couch.Test.Utils
-
- alias Couch.Test.Setup
-
- alias Couch.Test.Setup.Step
-
- import Couch.DBTest
-
- import Utils
-
- @admin {:user_ctx, user_ctx(roles: ["_admin"])}
-
- def with_db(context, setup) do
- setup =
- setup
- |> Setup.Common.with_db()
- |> Setup.run()
-
- context =
- Map.merge(context, %{
- db_name: setup |> Setup.get(:db) |> Step.Create.DB.name()
- })
-
- {context, setup}
- end
-
- describe "Fabric miscellaneous API" do
- @describetag setup: &__MODULE__.with_db/2
- test "Get inactive_index_files", ctx do
- {:ok, _rev} = update_doc(ctx.db_name, %{"_id" => "doc1"})
-
- design_doc = %{
- "_id" => "_design/test",
- "language" => "javascript",
- "views" => %{
- "view" => %{
- "map" => "function(doc){emit(doc._id, doc._rev)}"
- }
- }
- }
-
- {:ok, rev1} = update_doc(ctx.db_name, design_doc)
- wait_sig_update(ctx.db_name, "test", "")
- prev_active = get_active_sig(ctx.db_name, "test")
-
- updated_design_doc =
- put_in(design_doc, ["views", "view", "map"], "function(doc){emit(doc._id, null)}")
-
- {:ok, rev2} =
- update_doc(
- ctx.db_name,
- Map.put(updated_design_doc, "_rev", rev1)
- )
-
- assert rev1 != rev2
- wait_sig_update(ctx.db_name, "test", prev_active)
-
- {:ok, info} = :fabric.get_view_group_info(ctx.db_name, "_design/test")
- active = info[:signature]
-
- files = Enum.map(:fabric.inactive_index_files(ctx.db_name), &List.to_string/1)
-
- assert [] != files, "We should have some inactive"
-
- assert not Enum.any?(files, fn
- file_path -> String.contains?(file_path, active)
- end),
- "We are not suppose to return active views"
-
- assert Enum.all?(files, fn
- file_path -> String.contains?(file_path, prev_active)
- end),
- "We expect all files to contain previous active signature"
- end
- end
-
- defp update_doc(db_name, body) do
- json_body = :jiffy.decode(:jiffy.encode(body))
-
- case :fabric.update_doc(db_name, json_body, [@admin]) do
- {:ok, rev} ->
- {:ok, :couch_doc.rev_to_str(rev)}
-
- error ->
- error
- end
- end
-
- defp get_active_sig(db_name, ddoc_id) do
- {:ok, info} = :fabric.get_view_group_info(db_name, "_design/#{ddoc_id}")
- info[:signature]
- end
-
- defp wait_sig_update(db_name, ddoc_id, prev_active) do
- retry_until(fn ->
- get_active_sig(db_name, ddoc_id) != prev_active
- end)
- end
-end
diff --git a/src/couch_eval/src/couch_eval.erl b/src/couch_eval/src/couch_eval.erl
index 3541a5b94..f87ba97b9 100644
--- a/src/couch_eval/src/couch_eval.erl
+++ b/src/couch_eval/src/couch_eval.erl
@@ -17,7 +17,9 @@
-export([
acquire_map_context/6,
release_map_context/1,
- map_docs/2
+ map_docs/2,
+ with_context/2,
+ try_compile/4
]).
@@ -35,6 +37,10 @@
-type result() :: {doc_id(), [[{any(), any()}]]}.
-type api_mod() :: atom().
-type context() :: {api_mod(), any()}.
+-type function_type() :: binary() | atom().
+-type function_name() :: binary().
+-type function_src() :: binary().
+-type error(_Error) :: no_return().
-type context_opts() :: #{
db_name := db_name(),
@@ -46,10 +52,17 @@
api_mod => api_mod()
}.
+-type with_context_opts() :: #{
+ language := language()
+}.
+
-callback acquire_map_context(context_opts()) -> {ok, any()} | {error, any()}.
-callback release_map_context(context()) -> ok | {error, any()}.
-callback map_docs(context(), [doc()]) -> {ok, [result()]} | {error, any()}.
+-callback acquire_context() -> {ok, any()} | {error, any()}.
+-callback release_context(context()) -> ok | {error, any()}.
+-callback try_compile(context(), function_type(), function_name(), function_src()) -> ok.
-spec acquire_map_context(
@@ -59,7 +72,10 @@
sig(),
lib(),
map_funs()
- ) -> {ok, context()} | {error, any()}.
+ ) ->
+ {ok, context()}
+ | error({invalid_eval_api_mod, Language :: binary()})
+ | error({unknown_eval_api_language, Language :: binary()}).
acquire_map_context(DbName, DDocId, Language, Sig, Lib, MapFuns) ->
ApiMod = get_api_mod(Language),
CtxOpts = #{
@@ -87,6 +103,41 @@ map_docs({ApiMod, Ctx}, Docs) ->
ApiMod:map_docs(Ctx, Docs).
+-spec with_context(with_context_opts(), function()) ->
+ any()
+ | error({invalid_eval_api_mod, Language :: binary()})
+ | error({unknown_eval_api_language, Language :: binary()}).
+with_context(#{language := Language}, Fun) ->
+ {ok, Ctx} = acquire_context(Language),
+ try
+ Fun(Ctx)
+ after
+ release_context(Ctx)
+ end.
+
+
+-spec try_compile(context(), function_type(), function_name(), function_src()) -> ok.
+try_compile({_ApiMod, _Ctx}, reduce, <<_/binary>>, disabled) ->
+ % Reduce functions may be disabled. Accept that as a valid configuration.
+ ok;
+
+try_compile({ApiMod, Ctx}, FuncType, FuncName, FuncSrc) ->
+ ApiMod:try_compile(Ctx, FuncType, FuncName, FuncSrc).
+
+
+acquire_context(Language) ->
+ ApiMod = get_api_mod(Language),
+ {ok, Ctx} = ApiMod:acquire_context(),
+ {ok, {ApiMod, Ctx}}.
+
+
+release_context(nil) ->
+ ok;
+
+release_context({ApiMod, Ctx}) ->
+ ApiMod:release_context(Ctx).
+
+
get_api_mod(Language) when is_binary(Language) ->
try
LangStr = binary_to_list(Language),
diff --git a/src/couch_event/.gitignore b/src/couch_event/.gitignore
deleted file mode 100644
index 1204ed70e..000000000
--- a/src/couch_event/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-deps/
-ebin/
diff --git a/src/couch_event/LICENSE b/src/couch_event/LICENSE
deleted file mode 100644
index f6cd2bc80..000000000
--- a/src/couch_event/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/couch_event/README.md b/src/couch_event/README.md
deleted file mode 100644
index ab2e56877..000000000
--- a/src/couch_event/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Couch Event Notifications
-
-The replacement for couch\_db\_update and related code.
diff --git a/src/couch_event/rebar.config b/src/couch_event/rebar.config
deleted file mode 100644
index f68b4b5ed..000000000
--- a/src/couch_event/rebar.config
+++ /dev/null
@@ -1 +0,0 @@
-{erl_first_files, ["src/couch_event_listener.erl"]}.
diff --git a/src/couch_event/src/couch_event.app.src b/src/couch_event/src/couch_event.app.src
deleted file mode 100644
index b2ac917b9..000000000
--- a/src/couch_event/src/couch_event.app.src
+++ /dev/null
@@ -1,22 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_event, [
- {description, "Event notification system for Apache CouchDB"},
- {vsn, git},
- {registered, [
- couch_event_sup,
- couch_event_server
- ]},
- {applications, [kernel, stdlib, khash, couch_log, config]},
- {mod, {couch_event_app, []}}
-]}.
diff --git a/src/couch_event/src/couch_event.erl b/src/couch_event/src/couch_event.erl
deleted file mode 100644
index 9f8e501df..000000000
--- a/src/couch_event/src/couch_event.erl
+++ /dev/null
@@ -1,65 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_event).
-
--export([
- notify/2
-]).
-
--export([
- listen/4,
- link_listener/4,
- stop_listener/1
-]).
-
--export([
- register/2,
- register_many/2,
- register_all/1,
- unregister/1
-]).
-
-
--define(SERVER, couch_event_server).
-
-
-notify(DbName, Event) ->
- gen_server:cast(?SERVER, {notify, DbName, Event}).
-
-
-listen(Module, Function, State, Options) ->
- couch_event_listener_mfa:enter_loop(Module, Function, State, Options).
-
-
-link_listener(Module, Function, State, Options) ->
- couch_event_listener_mfa:start_link(Module, Function, State, Options).
-
-
-stop_listener(Pid) ->
- couch_event_listener_mfa:stop(Pid).
-
-
-register(Pid, DbName) ->
- gen_server:call(?SERVER, {register, Pid, [DbName]}).
-
-
-register_many(Pid, DbNames) when is_list(DbNames) ->
- gen_server:call(?SERVER, {register, Pid, DbNames}).
-
-
-register_all(Pid) ->
- gen_server:call(?SERVER, {register, Pid, [all_dbs]}).
-
-
-unregister(Pid) ->
- gen_server:call(?SERVER, {unregister, Pid}).
diff --git a/src/couch_event/src/couch_event_app.erl b/src/couch_event/src/couch_event_app.erl
deleted file mode 100644
index 3a8341b9e..000000000
--- a/src/couch_event/src/couch_event_app.erl
+++ /dev/null
@@ -1,27 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_event_app).
--behavior(application).
-
--export([
- start/2,
- stop/1
-]).
-
-
-start(_StartType, _StartArgs) ->
- couch_event_sup2:start_link().
-
-
-stop(_State) ->
- ok.
diff --git a/src/couch_event/src/couch_event_int.hrl b/src/couch_event/src/couch_event_int.hrl
deleted file mode 100644
index f837e1dec..000000000
--- a/src/couch_event/src/couch_event_int.hrl
+++ /dev/null
@@ -1,19 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(REGISTRY_TABLE, couch_event_registry).
--define(MONITOR_TABLE, couch_event_registry_monitors).
-
--record(client, {
- dbname,
- pid
-}).
diff --git a/src/couch_event/src/couch_event_listener.erl b/src/couch_event/src/couch_event_listener.erl
deleted file mode 100644
index a9ed33199..000000000
--- a/src/couch_event/src/couch_event_listener.erl
+++ /dev/null
@@ -1,238 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_event_listener).
-
-
--export([
- start/3,
- start/4,
- start_link/3,
- start_link/4,
- enter_loop/3,
- cast/2
-]).
-
--export([
- do_init/3,
- loop/2
-]).
-
-
--record(st, {
- module,
- state
-}).
-
-
--callback init(Arg :: term()) ->
- term().
-
--callback terminate(Reason :: term(), State :: term()) ->
- term().
-
--callback handle_cast(Message :: term(), State :: term()) ->
- term().
-
--callback handle_event(DbName :: term(), Event :: term(), State :: term()) ->
- term().
-
--callback handle_info(Message :: term(), State :: term()) ->
- term().
-
-
-start(Mod, Arg, Options) ->
- Pid = erlang:spawn(?MODULE, do_init, [Mod, Arg, Options]),
- {ok, Pid}.
-
-
-start(Name, Mod, Arg, Options) ->
- case where(Name) of
- undefined ->
- start(Mod, Arg, [{name, Name} | Options]);
- Pid ->
- {error, {already_started, Pid}}
- end.
-
-
-start_link(Mod, Arg, Options) ->
- Pid = erlang:spawn_link(?MODULE, do_init, [Mod, Arg, Options]),
- {ok, Pid}.
-
-
-start_link(Name, Mod, Arg, Options) ->
- case where(Name) of
- undefined ->
- start_link(Mod, Arg, [{name, Name} | Options]);
- Pid ->
- {error, {already_started, Pid}}
- end.
-
-
-enter_loop(Module, State, Options) ->
- ok = register_listeners(Options),
- ?MODULE:loop(#st{module=Module, state=State}, infinity).
-
-
-cast(Pid, Message) ->
- Pid ! {'$couch_event_cast', Message},
- ok.
-
-
-do_init(Module, Arg, Options) ->
- ok = maybe_name_process(Options),
- ok = register_listeners(Options),
- case (catch Module:init(Arg)) of
- {ok, State} ->
- ?MODULE:loop(#st{module=Module, state=State}, infinity);
- {ok, State, Timeout} when is_integer(Timeout), Timeout >= 0 ->
- ?MODULE:loop(#st{module=Module, state=State}, Timeout);
- Else ->
- erlang:exit(Else)
- end.
-
-
-loop(St, Timeout) ->
- receive
- {'$couch_event', DbName, Event} ->
- do_event(St, DbName, Event);
- {'$couch_event_cast', Message} ->
- do_cast(St, Message);
- Else ->
- do_info(St, Else)
- after Timeout ->
- do_info(St, timeout)
- end.
-
-
-maybe_name_process(Options) ->
- case proplists:lookup(name, Options) of
- {name, Name} ->
- case name_register(Name) of
- true ->
- ok;
- {false, Pid} ->
- erlang:error({already_started, Pid})
- end;
- none ->
- ok
- end.
-
-
-register_listeners(Options) ->
- case get_all_dbnames(Options) of
- all_dbs ->
- couch_event:register_all(self());
- DbNames ->
- couch_event:register_many(self(), DbNames)
- end,
- ok.
-
-
-do_event(#st{module=Module, state=State}=St, DbName, Event) ->
- case (catch Module:handle_event(DbName, Event, State)) of
- {ok, NewState} ->
- ?MODULE:loop(St#st{state=NewState}, infinity);
- {ok, NewState, Timeout} when is_integer(Timeout), Timeout >= 0 ->
- ?MODULE:loop(St#st{state=NewState}, Timeout);
- {stop, Reason, NewState} ->
- do_terminate(Reason, St#st{state=NewState});
- Else ->
- erlang:error(Else)
- end.
-
-
-do_cast(#st{module=Module, state=State}=St, Message) ->
- case (catch Module:handle_cast(Message, State)) of
- {ok, NewState} ->
- ?MODULE:loop(St#st{state=NewState}, infinity);
- {ok, NewState, Timeout} when is_integer(Timeout), Timeout >= 0 ->
- ?MODULE:loop(St#st{state=NewState}, Timeout);
- {stop, Reason, NewState} ->
- do_terminate(Reason, St#st{state=NewState});
- Else ->
- erlang:error(Else)
- end.
-
-
-do_info(#st{module=Module, state=State}=St, Message) ->
- case (catch Module:handle_info(Message, State)) of
- {ok, NewState} ->
- ?MODULE:loop(St#st{state=NewState}, infinity);
- {ok, NewState, Timeout} when is_integer(Timeout), Timeout >= 0 ->
- ?MODULE:loop(St#st{state=NewState}, Timeout);
- {stop, Reason, NewState} ->
- do_terminate(Reason, St#st{state=NewState});
- Else ->
- erlang:error(Else)
- end.
-
-
-do_terminate(Reason, #st{module=Module, state=State}) ->
- % Order matters. We want to make sure Module:terminate/1
- % is called even if couch_event:unregister/1 hangs
- % indefinitely.
- catch Module:terminate(Reason, State),
- catch couch_event:unregister(self()),
- Status = case Reason of
- normal -> normal;
- shutdown -> normal;
- ignore -> normal;
- Else -> Else
- end,
- erlang:exit(Status).
-
-
-where({global, Name}) -> global:whereis_name(Name);
-where({local, Name}) -> whereis(Name).
-
-
-name_register({global, Name}=GN) ->
- case global:register_name(Name, self()) of
- yes -> true;
- no -> {false, where(GN)}
- end;
-name_register({local, Name}=LN) ->
- try register(Name, self()) of
- true -> true
- catch error:_ ->
- {false, where(LN)}
- end.
-
-
-get_all_dbnames(Options) ->
- case proplists:get_value(all_dbs, Options) of
- true -> all_dbs;
- _ -> get_all_dbnames(Options, [])
- end.
-
-
-get_all_dbnames([], []) ->
- erlang:error(no_dbnames_provided);
-get_all_dbnames([], Acc) ->
- lists:usort(convert_dbname_list(Acc));
-get_all_dbnames([{dbname, DbName} | Rest], Acc) ->
- get_all_dbnames(Rest, [DbName | Acc]);
-get_all_dbnames([{dbnames, DbNames} | Rest], Acc) when is_list(DbNames) ->
- get_all_dbnames(Rest, DbNames ++ Acc);
-get_all_dbnames([_Ignored | Rest], Acc) ->
- get_all_dbnames(Rest, Acc).
-
-
-convert_dbname_list([]) ->
- [];
-convert_dbname_list([DbName | Rest]) when is_binary(DbName) ->
- [DbName | convert_dbname_list(Rest)];
-convert_dbname_list([DbName | Rest]) when is_list(DbName) ->
- [list_to_binary(DbName) | convert_dbname_list(Rest)];
-convert_dbname_list([DbName | _]) ->
- erlang:error({invalid_dbname, DbName}).
diff --git a/src/couch_event/src/couch_event_listener_mfa.erl b/src/couch_event/src/couch_event_listener_mfa.erl
deleted file mode 100644
index 9be58880a..000000000
--- a/src/couch_event/src/couch_event_listener_mfa.erl
+++ /dev/null
@@ -1,107 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_event_listener_mfa).
--behavior(couch_event_listener).
-
-
--export([
- start_link/4,
- enter_loop/4,
- stop/1
-]).
-
--export([
- init/1,
- terminate/2,
- handle_event/3,
- handle_cast/2,
- handle_info/2
-]).
-
-
--record(st, {
- mod,
- func,
- state,
- parent
-}).
-
-
-start_link(Mod, Func, State, Options) ->
- Parent = case proplists:get_value(parent, Options) of
- P when is_pid(P) -> P;
- _ -> self()
- end,
- Arg = {Parent, Mod, Func, State},
- couch_event_listener:start_link(?MODULE, Arg, Options).
-
-
-enter_loop(Mod, Func, State, Options) ->
- Parent = case proplists:get_value(parent, Options) of
- P when is_pid(P) ->
- erlang:monitor(process, P),
- P;
- _ ->
- undefined
- end,
- St = #st{
- mod = Mod,
- func = Func,
- state = State,
- parent = Parent
- },
- couch_event_listener:enter_loop(?MODULE, St, Options).
-
-
-stop(Pid) ->
- couch_event_listener:cast(Pid, shutdown).
-
-
-init({Parent, Mod, Func, State}) ->
- erlang:monitor(process, Parent),
- {ok, #st{
- mod = Mod,
- func = Func,
- state = State,
- parent = Parent
- }}.
-
-
-terminate(_Reason, _MFA) ->
- ok.
-
-
-handle_event(DbName, Event, #st{mod=Mod, func=Func, state=State}=St) ->
- case (catch Mod:Func(DbName, Event, State)) of
- {ok, NewState} ->
- {ok, St#st{state=NewState}};
- stop ->
- {stop, normal, St};
- Else ->
- erlang:error(Else)
- end.
-
-
-handle_cast(shutdown, St) ->
- {stop, normal, St};
-
-handle_cast(_Msg, St) ->
- {ok, St}.
-
-
-handle_info({'DOWN', _Ref, process, Parent, _Reason}, #st{parent=Parent}=St) ->
- {stop, normal, St};
-
-handle_info(_Msg, St) ->
- {ok, St}.
-
diff --git a/src/couch_event/src/couch_event_os_listener.erl b/src/couch_event/src/couch_event_os_listener.erl
deleted file mode 100644
index 4de0a4416..000000000
--- a/src/couch_event/src/couch_event_os_listener.erl
+++ /dev/null
@@ -1,76 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_event_os_listener).
--behaviour(gen_server).
--vsn(1).
-
-
--export([
- start_link/1
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
-
-start_link(Exe) when is_list(Exe) ->
- gen_server:start_link(?MODULE, Exe, []).
-
-
-init(Exe) ->
- process_flag(trap_exit, true),
- ok = couch_event:register_all(self()),
- couch_os_process:start_link(Exe, []).
-
-
-terminate(_Reason, Pid) when is_pid(Pid) ->
- couch_os_process:stop(Pid);
-terminate(_Reason, _Pid) ->
- ok.
-
-
-handle_call(Msg, From, Pid) ->
- couch_log:notice("~s ignoring call ~w from ~w", [?MODULE, Msg, From]),
- {reply, ignored, Pid, 0}.
-
-
-handle_cast(Msg, Pid) ->
- couch_log:notice("~s ignoring cast ~w", [?MODULE, Msg]),
- {noreply, Pid, 0}.
-
-
-handle_info({'$couch_event', DbName, Event}, Pid) ->
- Obj = {[
- {db, DbName},
- {type, list_to_binary(atom_to_list(Event))}
- ]},
- ok = couch_os_process:send(Pid, Obj),
- {noreply, Pid};
-
-handle_info({'EXIT', Pid, Reason}, Pid) ->
- couch_log:error("Update notificatio process ~w died: ~w", [Pid, Reason]),
- {stop, normal, nil};
-
-handle_info(Msg, Pid) ->
- couch_log:notice("~s ignoring info ~w", [?MODULE, Msg]),
- {noreply, Pid, 0}.
-
-
-code_change(_OldVsn, St, _Extra) ->
- {ok, St}.
diff --git a/src/couch_event/src/couch_event_server.erl b/src/couch_event/src/couch_event_server.erl
deleted file mode 100644
index 321e8fafd..000000000
--- a/src/couch_event/src/couch_event_server.erl
+++ /dev/null
@@ -1,156 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_event_server).
--behaviour(gen_server).
--vsn(1).
-
-
--export([
- start_link/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
-
--include("couch_event_int.hrl").
-
-
--record(st, {
- by_pid,
- by_dbname
-}).
-
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, nil, []).
-
-
-init(_) ->
- {ok, ByPid} = khash:new(),
- {ok, ByDbName} = khash:new(),
- {ok, #st{
- by_pid = ByPid,
- by_dbname = ByDbName
- }}.
-
-
-terminate(_Reason, _St) ->
- ok.
-
-
-handle_call({register, Pid, NewDbNames}, _From, St) ->
- case khash:get(St#st.by_pid, Pid) of
- undefined ->
- NewRef = erlang:monitor(process, Pid),
- register(St, NewRef, Pid, NewDbNames);
- {ReuseRef, OldDbNames} ->
- unregister(St, Pid, OldDbNames),
- register(St, ReuseRef, Pid, NewDbNames)
- end,
- {reply, ok, St};
-
-handle_call({unregister, Pid}, _From, St) ->
- Reply = case khash:get(St#st.by_pid, Pid) of
- undefined ->
- not_registered;
- {Ref, OldDbNames} ->
- unregister(St, Pid, OldDbNames),
- erlang:demonitor(Ref, [flush]),
- ok
- end,
- {reply, Reply, St};
-
-handle_call(Msg, From, St) ->
- couch_log:notice("~s ignoring call ~w from ~w", [?MODULE, Msg, From]),
- {reply, ignored, St}.
-
-
-handle_cast({notify, DbName, Event}, St) ->
- notify_listeners(St#st.by_dbname, DbName, Event),
- {noreply, St};
-
-handle_cast(Msg, St) ->
- couch_log:notice("~s ignoring cast ~w", [?MODULE, Msg]),
- {noreply, St}.
-
-
-handle_info({'DOWN', Ref, process, Pid, _Reason}, St) ->
- case khash:get(St#st.by_pid, Pid) of
- {Ref, OldDbNames} ->
- unregister(St, Pid, OldDbNames);
- undefined ->
- ok
- end,
- {noreply, St};
-
-
-handle_info(Msg, St) ->
- couch_log:notice("~s ignoring info ~w", [?MODULE, Msg]),
- {noreply, St}.
-
-
-code_change(_OldVsn, St, _Extra) ->
- {ok, St}.
-
-
-notify_listeners(ByDbName, DbName, Event) ->
- Msg = {'$couch_event', DbName, Event},
- notify_listeners(khash:get(ByDbName, all_dbs), Msg),
- notify_listeners(khash:get(ByDbName, DbName), Msg).
-
-
-notify_listeners(undefined, _) ->
- ok;
-notify_listeners(Listeners, Msg) ->
- khash:fold(Listeners, fun(Pid, _, _) -> Pid ! Msg, nil end, nil).
-
-
-register(St, Ref, Pid, DbNames) ->
- khash:put(St#st.by_pid, Pid, {Ref, DbNames}),
- lists:foreach(fun(DbName) ->
- add_listener(St#st.by_dbname, DbName, Pid)
- end, DbNames).
-
-
-add_listener(ByDbName, DbName, Pid) ->
- case khash:lookup(ByDbName, DbName) of
- {value, Listeners} ->
- khash:put(Listeners, Pid, nil);
- not_found ->
- {ok, NewListeners} = khash:new(),
- khash:put(NewListeners, Pid, nil),
- khash:put(ByDbName, DbName, NewListeners)
- end.
-
-
-unregister(St, Pid, OldDbNames) ->
- ok = khash:del(St#st.by_pid, Pid),
- lists:foreach(fun(DbName) ->
- rem_listener(St#st.by_dbname, DbName, Pid)
- end, OldDbNames).
-
-
-rem_listener(ByDbName, DbName, Pid) ->
- {value, Listeners} = khash:lookup(ByDbName, DbName),
- khash:del(Listeners, Pid),
- Size = khash:size(Listeners),
- if Size > 0 -> ok; true ->
- khash:del(ByDbName, DbName)
- end.
diff --git a/src/couch_event/src/couch_event_sup2.erl b/src/couch_event/src/couch_event_sup2.erl
deleted file mode 100644
index 2d88b93d4..000000000
--- a/src/couch_event/src/couch_event_sup2.erl
+++ /dev/null
@@ -1,44 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% This is named couch_event_sup2 to avoid
-% naming collisions with the couch_event_sup
-% module contained in the couch app. When
-% that supervisor is removed we'll be free
-% to rename this one.
-
--module(couch_event_sup2).
--behavior(supervisor).
-
-
--export([
- start_link/0,
- init/1
-]).
-
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, nil).
-
-
-init(_) ->
- Children = [
- {couch_event_server,
- {couch_event_server, start_link, []},
- permanent,
- 5000,
- worker,
- [couch_event_server]
- }
- ],
- {ok, {{one_for_one, 5, 10}, Children}}.
-
diff --git a/src/couch_expiring_cache/.suppressed b/src/couch_expiring_cache/.suppressed
new file mode 100644
index 000000000..3baad65f2
--- /dev/null
+++ b/src/couch_expiring_cache/.suppressed
@@ -0,0 +1 @@
+no_return
diff --git a/src/couch_expiring_cache/src/couch_expiring_cache.erl b/src/couch_expiring_cache/src/couch_expiring_cache.erl
index b26556e98..f1ce20276 100644
--- a/src/couch_expiring_cache/src/couch_expiring_cache.erl
+++ b/src/couch_expiring_cache/src/couch_expiring_cache.erl
@@ -31,8 +31,9 @@ insert(Name, Key, Value, StaleTS, ExpiresTS)
insert(undefined, Name, Key, Value, StaleTS, ExpiresTS).
--spec insert(Tx :: jtx(), Name :: binary(), Key :: binary(), Value :: binary(),
- StaleTS :: ?TIME_UNIT(), ExpiresTS :: ?TIME_UNIT()) -> ok.
+-spec insert(Tx :: jtx() | undefined, Name :: binary(), Key :: binary(),
+ Value :: binary(), StaleTS :: ?TIME_UNIT(), ExpiresTS :: ?TIME_UNIT()) -> ok.
+-dialyzer({no_return, insert/6}).
insert(Tx, Name, Key, Value, StaleTS, ExpiresTS)
when is_binary(Name), is_binary(Key), is_binary(Value),
is_integer(StaleTS), is_integer(ExpiresTS) ->
diff --git a/src/couch_expiring_cache/src/couch_expiring_cache_fdb.erl b/src/couch_expiring_cache/src/couch_expiring_cache_fdb.erl
index 7c4ad8f6f..ebc97b926 100644
--- a/src/couch_expiring_cache/src/couch_expiring_cache_fdb.erl
+++ b/src/couch_expiring_cache/src/couch_expiring_cache_fdb.erl
@@ -27,6 +27,7 @@
-include_lib("fabric/include/fabric2.hrl").
-include_lib("couch_expiring_cache/include/couch_expiring_cache.hrl").
+-include_lib("kernel/include/logger.hrl").
% Data model
@@ -37,11 +38,12 @@
-spec insert(JTx :: jtx(), Name :: binary(), Key :: binary(), Value :: binary(),
- StaleTS :: ?TIME_UNIT, ExpiresTS :: ?TIME_UNIT) -> ok.
+ StaleTS :: millisecond(), ExpiresTS :: millisecond()) -> ok.
insert(#{jtx := true} = JTx, Name, Key, Val, StaleTS, ExpiresTS) ->
#{tx := Tx, layer_prefix := LayerPrefix} = couch_jobs_fdb:get_jtx(JTx),
PK = primary_key(Name, Key, LayerPrefix),
- case get_val(Tx, PK) of
+ % Use snapshot here to minimize conflicts on parallel inserts
+ case get_val(erlfdb:snapshot(Tx), PK) of
not_found ->
ok;
{_OldVal, _OldStaleTS, OldExpiresTS} ->
@@ -86,7 +88,7 @@ clear_all(Name) ->
end).
--spec clear_range_to(Name :: binary(), EndTS :: ?TIME_UNIT,
+-spec clear_range_to(Name :: binary(), EndTS :: millisecond(),
Limit :: non_neg_integer()) ->
OldestTS :: ?TIME_UNIT.
clear_range_to(Name, EndTS, Limit) when Limit > 0 ->
@@ -98,7 +100,7 @@ clear_range_to(Name, EndTS, Limit) when Limit > 0 ->
end, 0).
--spec get_range_to(Name :: binary(), EndTS :: ?TIME_UNIT,
+-spec get_range_to(Name :: binary(), EndTS :: millisecond(),
Limit :: non_neg_integer()) ->
[{Key :: binary(), Val :: binary()}].
get_range_to(Name, EndTS, Limit) when Limit > 0 ->
@@ -106,6 +108,7 @@ get_range_to(Name, EndTS, Limit) when Limit > 0 ->
fun(Tx, PK, _XK, Key, _ExpiresTS, Acc) ->
case get_val(Tx, PK) of
not_found ->
+ ?LOG_ERROR(#{what => missing_key, key => Key}),
couch_log:error("~p:entry missing Key: ~p", [?MODULE, Key]),
Acc;
Val ->
diff --git a/src/couch_expiring_cache/src/couch_expiring_cache_server.erl b/src/couch_expiring_cache/src/couch_expiring_cache_server.erl
index 74c432e25..9c0c89972 100644
--- a/src/couch_expiring_cache/src/couch_expiring_cache_server.erl
+++ b/src/couch_expiring_cache/src/couch_expiring_cache_server.erl
@@ -37,6 +37,7 @@
-include_lib("couch_expiring_cache/include/couch_expiring_cache.hrl").
+-include_lib("kernel/include/logger.hrl").
start_link(Name, Opts) when is_atom(Name) ->
@@ -96,6 +97,10 @@ handle_info(remove_expired, St) ->
handle_info({Ref, ready}, St) when is_reference(Ref) ->
% Prevent crashing server and application
+ ?LOG_ERROR(#{
+ what => spurious_future_ready,
+ ref => Ref
+ }),
LogMsg = "~p : spurious erlfdb future ready message ~p",
couch_log:error(LogMsg, [?MODULE, Ref]),
{noreply, St};
diff --git a/src/couch_index/.gitignore b/src/couch_index/.gitignore
deleted file mode 100644
index e24db8ab4..000000000
--- a/src/couch_index/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-/ebin
-.eunit
-.rebar
diff --git a/src/couch_index/LICENSE b/src/couch_index/LICENSE
deleted file mode 100644
index f6cd2bc80..000000000
--- a/src/couch_index/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/couch_index/rebar.config b/src/couch_index/rebar.config
deleted file mode 100644
index e0d18443b..000000000
--- a/src/couch_index/rebar.config
+++ /dev/null
@@ -1,2 +0,0 @@
-{cover_enabled, true}.
-{cover_print_enabled, true}.
diff --git a/src/couch_index/src/couch_index.app.src b/src/couch_index/src/couch_index.app.src
deleted file mode 100644
index 3aa92ba5d..000000000
--- a/src/couch_index/src/couch_index.app.src
+++ /dev/null
@@ -1,19 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_index, [
- {description, "CouchDB Secondary Index Manager"},
- {vsn, git},
- {registered, [couch_index_server]},
- {applications, [kernel, stdlib, couch_epi]},
- {mod, {couch_index_app, []}}
-]}.
diff --git a/src/couch_index/src/couch_index.erl b/src/couch_index/src/couch_index.erl
deleted file mode 100644
index 09bd48c61..000000000
--- a/src/couch_index/src/couch_index.erl
+++ /dev/null
@@ -1,639 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index).
--behaviour(gen_server).
-
--compile(tuple_calls).
-
--vsn(3).
-
-%% API
--export([start_link/1, stop/1, get_state/2, get_info/1]).
--export([trigger_update/2]).
--export([compact/1, compact/2, get_compactor_pid/1]).
-
-%% gen_server callbacks
--export([init/1, terminate/2, code_change/3, format_status/2]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-
--include_lib("couch/include/couch_db.hrl").
-
-
--define(CHECK_INTERVAL, 600000). % 10 minutes
-
--record(st, {
- mod,
- idx_state,
- updater,
- compactor,
- waiters=[],
- committed=true,
- shutdown=false
-}).
-
-
-start_link({Module0, IdxState0}) ->
- [Module, IdxState] = couch_index_plugin:before_open(Module0, IdxState0),
- proc_lib:start_link(?MODULE, init, [{Module, IdxState}]).
-
-
-stop(Pid) ->
- gen_server:cast(Pid, stop).
-
-
-get_state(Pid, RequestSeq) ->
- gen_server:call(Pid, {get_state, RequestSeq}, infinity).
-
-
-get_info(Pid) ->
- gen_server:call(Pid, get_info, group_info_timeout_msec()).
-
-
-trigger_update(Pid, UpdateSeq) ->
- gen_server:cast(Pid, {trigger_update, UpdateSeq}).
-
-
-compact(Pid) ->
- compact(Pid, []).
-
-
-compact(Pid, Options) ->
- {ok, CPid} = gen_server:call(Pid, compact),
- case lists:member(monitor, Options) of
- true -> {ok, erlang:monitor(process, CPid)};
- false -> ok
- end.
-
-
-get_compactor_pid(Pid) ->
- gen_server:call(Pid, get_compactor_pid).
-
-init({Mod, IdxState}) ->
- DbName = Mod:get(db_name, IdxState),
- erlang:send_after(?CHECK_INTERVAL, self(), maybe_close),
- Resp = couch_util:with_db(DbName, fun(Db) ->
- case Mod:open(Db, IdxState) of
- {ok, IdxSt} ->
- couch_db:monitor(Db),
- {ok, IdxSt};
- Error ->
- Error
- end
- end),
- case Resp of
- {ok, NewIdxState} ->
- {ok, UPid} = couch_index_updater:start_link(self(), Mod),
- {ok, CPid} = couch_index_compactor:start_link(self(), Mod),
- State = #st{
- mod=Mod,
- idx_state=NewIdxState,
- updater=UPid,
- compactor=CPid
- },
- Args = [
- Mod:get(db_name, IdxState),
- Mod:get(idx_name, IdxState),
- couch_index_util:hexsig(Mod:get(signature, IdxState))
- ],
- couch_log:debug("Opening index for db: ~s idx: ~s sig: ~p", Args),
- proc_lib:init_ack({ok, self()}),
- gen_server:enter_loop(?MODULE, [], State);
- Other ->
- proc_lib:init_ack(Other)
- end.
-
-
-terminate(Reason0, State) ->
- #st{mod=Mod, idx_state=IdxState}=State,
- case Reason0 of
- {shutdown, ddoc_updated} ->
- Mod:shutdown(IdxState),
- Reason = ddoc_updated;
- _ ->
- Mod:close(IdxState),
- Reason = Reason0
- end,
- send_all(State#st.waiters, Reason),
- couch_util:shutdown_sync(State#st.updater),
- couch_util:shutdown_sync(State#st.compactor),
- Args = [
- Mod:get(db_name, IdxState),
- Mod:get(idx_name, IdxState),
- couch_index_util:hexsig(Mod:get(signature, IdxState)),
- Reason
- ],
- couch_log:debug("Closing index for db: ~s idx: ~s sig: ~p because ~r", Args),
- ok.
-
-
-handle_call({get_state, ReqSeq}, From, State) ->
- #st{
- mod=Mod,
- idx_state=IdxState,
- waiters=Waiters
- } = State,
- IdxSeq = Mod:get(update_seq, IdxState),
- case ReqSeq =< IdxSeq of
- true ->
- {reply, {ok, IdxState}, State};
- _ -> % View update required
- couch_index_updater:run(State#st.updater, IdxState),
- Waiters2 = [{From, ReqSeq} | Waiters],
- {noreply, State#st{waiters=Waiters2}, infinity}
- end;
-handle_call(get_info, _From, State) ->
- #st{mod=Mod} = State,
- IdxState = State#st.idx_state,
- {ok, Info0} = Mod:get(info, IdxState),
- IsUpdating = couch_index_updater:is_running(State#st.updater),
- IsCompacting = couch_index_compactor:is_running(State#st.compactor),
- IdxSeq = Mod:get(update_seq, IdxState),
- GetCommSeq = fun(Db) -> couch_db:get_committed_update_seq(Db) end,
- DbName = Mod:get(db_name, IdxState),
- CommittedSeq = couch_util:with_db(DbName, GetCommSeq),
- Info = Info0 ++ [
- {updater_running, IsUpdating},
- {compact_running, IsCompacting},
- {waiting_commit, State#st.committed == false},
- {waiting_clients, length(State#st.waiters)},
- {pending_updates, max(CommittedSeq - IdxSeq, 0)}
- ],
- {reply, {ok, Info}, State};
-handle_call(reset, _From, State) ->
- #st{
- mod=Mod,
- idx_state=IdxState
- } = State,
- {ok, NewIdxState} = Mod:reset(IdxState),
- {reply, {ok, NewIdxState}, State#st{idx_state=NewIdxState}};
-handle_call(compact, _From, State) ->
- Resp = couch_index_compactor:run(State#st.compactor, State#st.idx_state),
- {reply, Resp, State};
-handle_call(get_compactor_pid, _From, State) ->
- {reply, {ok, State#st.compactor}, State};
-handle_call({compacted, NewIdxState}, _From, State) ->
- #st{
- mod=Mod,
- idx_state=OldIdxState
- } = State,
- assert_signature_match(Mod, OldIdxState, NewIdxState),
- NewSeq = Mod:get(update_seq, NewIdxState),
- OldSeq = Mod:get(update_seq, OldIdxState),
- % For indices that require swapping files, we have to make sure we're
- % up to date with the current index. Otherwise indexes could roll back
- % (perhaps considerably) to previous points in history.
- case is_recompaction_enabled(NewIdxState, State) of
- true ->
- case NewSeq >= OldSeq of
- true -> {reply, ok, commit_compacted(NewIdxState, State)};
- false -> {reply, recompact, State}
- end;
- false ->
- {reply, ok, commit_compacted(NewIdxState, State)}
- end;
-handle_call({compaction_failed, Reason}, _From, State) ->
- #st{
- mod = Mod,
- idx_state = OldIdxState,
- waiters = Waiters
- } = State,
- send_all(Waiters, Reason),
- {ok, NewIdxState} = Mod:remove_compacted(OldIdxState),
- NewState = State#st{idx_state = NewIdxState, waiters = []},
- {reply, {ok, NewIdxState}, NewState}.
-
-handle_cast({trigger_update, UpdateSeq}, State) ->
- #st{
- mod=Mod,
- idx_state=IdxState
- } = State,
- case UpdateSeq =< Mod:get(update_seq, IdxState) of
- true ->
- {noreply, State};
- false ->
- couch_index_updater:run(State#st.updater, IdxState),
- {noreply, State}
- end;
-handle_cast({updated, NewIdxState}, State) ->
- {noreply, NewState} = handle_cast({new_state, NewIdxState}, State),
- case NewState#st.shutdown andalso (NewState#st.waiters =:= []) of
- true ->
- {stop, normal, NewState};
- false ->
- maybe_restart_updater(NewState),
- {noreply, NewState}
- end;
-handle_cast({new_state, NewIdxState}, State) ->
- #st{
- mod=Mod,
- idx_state=OldIdxState
- } = State,
- OldFd = Mod:get(fd, OldIdxState),
- NewFd = Mod:get(fd, NewIdxState),
- case NewFd == OldFd of
- true ->
- assert_signature_match(Mod, OldIdxState, NewIdxState),
- CurrSeq = Mod:get(update_seq, NewIdxState),
- Args = [
- Mod:get(db_name, NewIdxState),
- Mod:get(idx_name, NewIdxState),
- CurrSeq
- ],
- couch_log:debug("Updated index for db: ~s idx: ~s seq: ~B", Args),
- Rest = send_replies(State#st.waiters, CurrSeq, NewIdxState),
- case State#st.committed of
- true -> erlang:send_after(commit_delay(), self(), commit);
- false -> ok
- end,
- {noreply, State#st{
- idx_state=NewIdxState,
- waiters=Rest,
- committed=false
- }};
- false ->
- Fmt = "Ignoring update from old indexer for db: ~s idx: ~s",
- Args = [
- Mod:get(db_name, NewIdxState),
- Mod:get(idx_name, NewIdxState)
- ],
- couch_log:warning(Fmt, Args),
- {noreply, State}
- end;
-handle_cast({update_error, Error}, State) ->
- send_all(State#st.waiters, Error),
- {noreply, State#st{waiters=[]}};
-handle_cast(stop, State) ->
- {stop, normal, State};
-handle_cast(delete, State) ->
- #st{mod=Mod, idx_state=IdxState} = State,
- ok = Mod:delete(IdxState),
- {stop, normal, State};
-handle_cast({ddoc_updated, DDocResult}, State) ->
- #st{mod = Mod, idx_state = IdxState} = State,
- Shutdown = case DDocResult of
- {not_found, deleted} ->
- true;
- {ok, DDoc} ->
- DbName = Mod:get(db_name, IdxState),
- couch_util:with_db(DbName, fun(Db) ->
- {ok, NewIdxState} = Mod:init(Db, DDoc),
- Mod:get(signature, NewIdxState) =/= Mod:get(signature, IdxState)
- end)
- end,
- case Shutdown of
- true ->
- {stop, {shutdown, ddoc_updated}, State#st{shutdown = true}};
- false ->
- {noreply, State#st{shutdown = false}}
- end;
-handle_cast(ddoc_updated, State) ->
- #st{mod = Mod, idx_state = IdxState} = State,
- DbName = Mod:get(db_name, IdxState),
- DDocId = Mod:get(idx_name, IdxState),
- Shutdown = couch_util:with_db(DbName, fun(Db) ->
- case couch_db:open_doc(Db, DDocId, [ejson_body, ?ADMIN_CTX]) of
- {not_found, deleted} ->
- true;
- {ok, DDoc} ->
- {ok, NewIdxState} = Mod:init(Db, DDoc),
- Mod:get(signature, NewIdxState) =/= Mod:get(signature, IdxState)
- end
- end),
- case Shutdown of
- true ->
- {stop, {shutdown, ddoc_updated}, State#st{shutdown = true}};
- false ->
- {noreply, State#st{shutdown = false}}
- end;
-handle_cast(_Mesg, State) ->
- {stop, unhandled_cast, State}.
-
-handle_info(commit, #st{committed=true}=State) ->
- {noreply, State};
-handle_info(commit, State) ->
- #st{mod=Mod, idx_state=IdxState} = State,
- DbName = Mod:get(db_name, IdxState),
- IdxName = Mod:get(idx_name, IdxState),
- GetCommSeq = fun(Db) -> couch_db:get_committed_update_seq(Db) end,
- CommittedSeq = couch_util:with_db(DbName, GetCommSeq),
- case CommittedSeq >= Mod:get(update_seq, IdxState) of
- true ->
- % Commit the updates
- ok = Mod:commit(IdxState),
- couch_event:notify(DbName, {index_commit, IdxName}),
- {noreply, State#st{committed=true}};
- _ ->
- % We can't commit the header because the database seq that's
- % fully committed to disk is still behind us. If we committed
- % now and the database lost those changes our view could be
- % forever out of sync with the database. But a crash before we
- % commit these changes, no big deal, we only lose incremental
- % changes since last committal.
- erlang:send_after(commit_delay(), self(), commit),
- {noreply, State}
- end;
-handle_info(maybe_close, State) ->
- % We need to periodically check if our index file still
- % exists on disk because index cleanups don't notify
- % the couch_index process when a file has been deleted. If
- % we don't check for this condition then the index can
- % remain open indefinitely wasting disk space.
- %
- % We make sure that we're idle before closing by looking
- % to see if we have any clients waiting for an update.
- Mod = State#st.mod,
- case State#st.waiters of
- [] ->
- case Mod:index_file_exists(State#st.idx_state) of
- true ->
- erlang:send_after(?CHECK_INTERVAL, self(), maybe_close),
- {noreply, State};
- false ->
- {stop, normal, State}
- end;
- _ ->
- erlang:send_after(?CHECK_INTERVAL, self, maybe_close),
- {noreply, State}
- end;
-handle_info({'DOWN', _, _, _Pid, _}, #st{mod=Mod, idx_state=IdxState}=State) ->
- Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
- couch_log:debug("Index shutdown by monitor notice for db: ~s idx: ~s", Args),
- catch send_all(State#st.waiters, shutdown),
- {stop, normal, State#st{waiters=[]}}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-format_status(Opt, [PDict, State]) ->
- #st{
- mod = Mod,
- waiters = Waiters,
- idx_state = IdxState
- } = State,
- Scrubbed = State#st{waiters = {length, length(Waiters)}},
- IdxSafeState = case erlang:function_exported(Mod, format_status, 2) of
- true ->
- Mod:format_status(Opt, [PDict, IdxState]);
- false ->
- []
- end,
- [{data, [{"State",
- ?record_to_keyval(st, Scrubbed) ++ IdxSafeState
- }]}].
-
-maybe_restart_updater(#st{waiters=[]}) ->
- ok;
-maybe_restart_updater(#st{idx_state=IdxState}=State) ->
- couch_index_updater:run(State#st.updater, IdxState).
-
-
-send_all(Waiters, Reply) ->
- [gen_server:reply(From, Reply) || {From, _} <- Waiters].
-
-
-send_replies(Waiters, UpdateSeq, IdxState) ->
- Pred = fun({_, S}) -> S =< UpdateSeq end,
- {ToSend, Remaining} = lists:partition(Pred, Waiters),
- [gen_server:reply(From, {ok, IdxState}) || {From, _} <- ToSend],
- Remaining.
-
-assert_signature_match(Mod, OldIdxState, NewIdxState) ->
- case {Mod:get(signature, OldIdxState), Mod:get(signature, NewIdxState)} of
- {Sig, Sig} -> ok;
- _ -> erlang:error(signature_mismatch)
- end.
-
-commit_compacted(NewIdxState, State) ->
- #st{
- mod=Mod,
- idx_state=OldIdxState,
- updater=Updater
- } = State,
- {ok, NewIdxState1} = Mod:swap_compacted(OldIdxState, NewIdxState),
- % Restart the indexer if it's running.
- case couch_index_updater:is_running(Updater) of
- true -> ok = couch_index_updater:restart(Updater, NewIdxState1);
- false -> ok
- end,
- case State#st.committed of
- true -> erlang:send_after(commit_delay(), self(), commit);
- false -> ok
- end,
- State#st{
- idx_state=NewIdxState1,
- committed=false
- }.
-
-is_recompaction_enabled(IdxState, #st{mod = Mod}) ->
- DbName = binary_to_list(Mod:get(db_name, IdxState)),
- IdxName = binary_to_list(Mod:get(idx_name, IdxState)),
- IdxKey = DbName ++ ":" ++ IdxName,
-
- IdxSignature = couch_index_util:hexsig((Mod:get(signature, IdxState))),
-
- Global = get_value("view_compaction", "enabled_recompaction"),
- PerSignature = get_value("view_compaction.recompaction", IdxSignature),
- PerIdx = get_value("view_compaction.recompaction", IdxKey),
- PerDb = get_value("view_compaction.recompaction", DbName),
-
- find_most_specific([Global, PerDb, PerIdx, PerSignature], true).
-
-find_most_specific(Settings, Default) ->
- Reversed = lists:reverse([Default | Settings]),
- [Value | _] = lists:dropwhile(fun(A) -> A =:= undefined end, Reversed),
- Value.
-
-get_value(Section, Key) ->
- case config:get(Section, Key) of
- "enabled" -> true;
- "disabled" -> false;
- "true" -> true;
- "false" -> false;
- undefined -> undefined
- end.
-
-commit_delay() ->
- config:get_integer("query_server_config", "commit_freq", 5) * 1000.
-
-
-group_info_timeout_msec() ->
- Timeout = config:get("query_server_config", "group_info_timeout", "5000"),
- case Timeout of
- "infinity" ->
- infinity;
- Milliseconds ->
- list_to_integer(Milliseconds)
- end.
-
-
--ifdef(TEST).
--include_lib("couch/include/couch_eunit.hrl").
-
-get(db_name, _, _) ->
- <<"db_name">>;
-get(idx_name, _, _) ->
- <<"idx_name">>;
-get(signature, _, _) ->
- <<61,237,157,230,136,93,96,201,204,17,137,186,50,249,44,135>>.
-
-setup_all() ->
- Ctx = test_util:start_couch(),
- ok = meck:new([config], [passthrough]),
- ok = meck:new([test_index], [non_strict]),
- ok = meck:expect(test_index, get, fun get/3),
- Ctx.
-
-teardown_all(Ctx) ->
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-setup(Settings) ->
- meck:reset([config, test_index]),
- ok = meck:expect(config, get, fun(Section, Key) ->
- configure(Section, Key, Settings)
- end),
- {undefined, #st{mod = {test_index}}}.
-
-teardown(_, _) ->
- ok.
-
-configure("view_compaction", "enabled_recompaction", [Global, _Db, _Index]) ->
- Global;
-configure("view_compaction.recompaction", "db_name", [_Global, Db, _Index]) ->
- Db;
-configure("view_compaction.recompaction", "db_name:" ++ _, [_, _, Index]) ->
- Index;
-configure(Section, Key, _) ->
- meck:passthrough([Section, Key]).
-
-recompaction_configuration_test_() ->
- {
- "Compaction tests",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- recompaction_configuration_tests()
- }
- }
- }.
-
-recompaction_configuration_tests() ->
- AllCases = couch_tests_combinatorics:product([
- [undefined, "true", "false"],
- [undefined, "enabled", "disabled"],
- [undefined, "enabled", "disabled"]
- ]),
-
- EnabledCases = [
- [undefined, undefined, undefined],
-
- [undefined, undefined,"enabled"],
- [undefined, "enabled", undefined],
- [undefined, "disabled", "enabled"],
- [undefined, "enabled", "enabled"],
-
- ["true", undefined, undefined],
- ["true", undefined, "enabled"],
- ["true", "disabled", "enabled"],
- ["true", "enabled", undefined],
- ["true", "enabled", "enabled"],
-
- ["false", undefined, "enabled"],
- ["false", "enabled", undefined],
- ["false", "disabled", "enabled"],
- ["false", "enabled", "enabled"]
- ],
-
- DisabledCases = [
- [undefined, undefined, "disabled"],
- [undefined, "disabled", undefined],
- [undefined, "disabled", "disabled"],
- [undefined, "enabled", "disabled"],
-
- ["true", undefined, "disabled"],
- ["true", "disabled", undefined],
- ["true", "disabled", "disabled"],
- ["true", "enabled", "disabled"],
-
- ["false", undefined, undefined],
- ["false", undefined, "disabled"],
- ["false", "disabled", undefined],
- ["false", "disabled", "disabled"],
- ["false", "enabled", "disabled"]
- ],
-
- ?assertEqual([], AllCases -- (EnabledCases ++ DisabledCases)),
-
- [{Settings, fun should_not_call_recompact/2} || Settings <- DisabledCases]
- ++
- [{Settings, fun should_call_recompact/2} || Settings <- EnabledCases].
-
-should_call_recompact(Settings, {IdxState, State}) ->
- {test_id(Settings), ?_test(begin
- ?assert(is_recompaction_enabled(IdxState, State)),
- ok
- end)}.
-
-should_not_call_recompact(Settings, {IdxState, State}) ->
- {test_id(Settings), ?_test(begin
- ?assertNot(is_recompaction_enabled(IdxState, State)),
- ok
- end)}.
-
-to_string(undefined) -> "undefined";
-to_string(Value) -> Value.
-
-test_id(Settings0) ->
- Settings1 = [to_string(Value) || Value <- Settings0],
- "[ " ++ lists:flatten(string:join(Settings1, " , ")) ++ " ]".
-
-
-get_group_timeout_info_test_() ->
- {
- foreach,
- fun() -> ok end,
- fun(_) -> meck:unload() end,
- [
- t_group_timeout_info_integer(),
- t_group_timeout_info_infinity()
- ]
- }.
-
-
-t_group_timeout_info_integer() ->
- ?_test(begin
- meck:expect(config, get,
- fun("query_server_config", "group_info_timeout", _) ->
- "5001"
- end),
- ?assertEqual(5001, group_info_timeout_msec())
- end).
-
-
-t_group_timeout_info_infinity() ->
- ?_test(begin
- meck:expect(config, get,
- fun("query_server_config", "group_info_timeout", _) ->
- "infinity"
- end),
- ?assertEqual(infinity, group_info_timeout_msec())
- end).
-
-
--endif.
diff --git a/src/couch_index/src/couch_index_app.erl b/src/couch_index/src/couch_index_app.erl
deleted file mode 100644
index bdf770cb2..000000000
--- a/src/couch_index/src/couch_index_app.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_Type, StartArgs) ->
- couch_index_sup:start_link(StartArgs).
-
-stop(_State) ->
- ok.
diff --git a/src/couch_index/src/couch_index_compactor.erl b/src/couch_index/src/couch_index_compactor.erl
deleted file mode 100644
index 8849cf67d..000000000
--- a/src/couch_index/src/couch_index_compactor.erl
+++ /dev/null
@@ -1,135 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_compactor).
--behaviour(gen_server).
-
-
-%% API
--export([start_link/2, run/2, cancel/1, is_running/1, get_compacting_pid/1]).
-
-%% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-
--include_lib("couch/include/couch_db.hrl").
-
-
--record(st, {
- idx,
- mod,
- pid
-}).
-
-
-start_link(Index, Module) ->
- gen_server:start_link(?MODULE, {Index, Module}, []).
-
-
-run(Pid, IdxState) ->
- gen_server:call(Pid, {compact, IdxState}).
-
-
-cancel(Pid) ->
- gen_server:call(Pid, cancel).
-
-
-is_running(Pid) ->
- gen_server:call(Pid, is_running).
-
-get_compacting_pid(Pid) ->
- gen_server:call(Pid, get_compacting_pid).
-
-init({Index, Module}) ->
- process_flag(trap_exit, true),
- {ok, #st{idx=Index, mod=Module}}.
-
-
-terminate(_Reason, State) ->
- couch_util:shutdown_sync(State#st.pid),
- ok.
-
-
-handle_call({compact, _}, _From, #st{pid=Pid}=State) when is_pid(Pid) ->
- {reply, {ok, Pid}, State};
-handle_call({compact, IdxState}, _From, #st{idx=Idx}=State) ->
- Pid = spawn_link(fun() -> compact(Idx, State#st.mod, IdxState) end),
- {reply, {ok, Pid}, State#st{pid=Pid}};
-handle_call(cancel, _From, #st{pid=undefined}=State) ->
- {reply, ok, State};
-handle_call(cancel, _From, #st{pid=Pid}=State) ->
- unlink(Pid),
- exit(Pid, kill),
- {reply, ok, State#st{pid=undefined}};
-handle_call(get_compacting_pid, _From, #st{pid=Pid}=State) ->
- {reply, {ok, Pid}, State};
-handle_call(is_running, _From, #st{pid=Pid}=State) when is_pid(Pid) ->
- {reply, true, State};
-handle_call(is_running, _From, State) ->
- {reply, false, State}.
-
-
-handle_cast(_Mesg, State) ->
- {stop, unknown_cast, State}.
-
-
-handle_info({'EXIT', Pid, normal}, #st{pid=Pid}=State) ->
- {noreply, State#st{pid=undefined}};
-handle_info({'EXIT', Pid, Reason}, #st{pid = Pid} = State) ->
- #st{idx = Idx, mod = Mod} = State,
- {ok, IdxState} = gen_server:call(Idx, {compaction_failed, Reason}),
- DbName = Mod:get(db_name, IdxState),
- IdxName = Mod:get(idx_name, IdxState),
- Args = [DbName, IdxName, Reason],
- couch_log:error("Compaction failed for db: ~s idx: ~s reason: ~p", Args),
- {noreply, State#st{pid = undefined}};
-handle_info({'EXIT', _Pid, normal}, State) ->
- {noreply, State};
-handle_info({'EXIT', Pid, _Reason}, #st{idx=Pid}=State) ->
- {stop, normal, State};
-handle_info(_Mesg, State) ->
- {stop, unknown_info, State}.
-
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-
-compact(Parent, Mod, IdxState) ->
- DbName = Mod:get(db_name, IdxState),
- %% We use with_db here to make sure we hold db open
- %% during both phases of compaction
- %% * compact
- %% * recompact
- couch_util:with_db(DbName, fun(_) ->
- compact(Parent, Mod, IdxState, [])
- end).
-
-compact(Idx, Mod, IdxState, Opts) ->
- DbName = Mod:get(db_name, IdxState),
- IndexName = Mod:get(idx_name, IdxState),
- erlang:put(io_priority, {view_compact, DbName, IndexName}),
- Args = [DbName, Mod:get(idx_name, IdxState)],
- couch_log:info("Compaction started for db: ~s idx: ~s", Args),
- {ok, NewIdxState} = couch_util:with_db(DbName, fun(Db) ->
- Mod:compact(Db, IdxState, Opts)
- end),
- ok = Mod:commit(NewIdxState),
- case gen_server:call(Idx, {compacted, NewIdxState}) of
- recompact ->
- couch_log:info("Compaction restarting for db: ~s idx: ~s", Args),
- compact(Idx, Mod, NewIdxState, [recompact]);
- _ ->
- couch_log:info("Compaction finished for db: ~s idx: ~s", Args),
- ok
- end.
diff --git a/src/couch_index/src/couch_index_epi.erl b/src/couch_index/src/couch_index_epi.erl
deleted file mode 100644
index 1c4eb9596..000000000
--- a/src/couch_index/src/couch_index_epi.erl
+++ /dev/null
@@ -1,50 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_epi).
-
--behaviour(couch_epi_plugin).
-
--export([
- app/0,
- providers/0,
- services/0,
- data_subscriptions/0,
- data_providers/0,
- processes/0,
- notify/3
-]).
-
-app() ->
- couch_index.
-
-providers() ->
- [
- {couch_db, couch_index_plugin_couch_db}
- ].
-
-services() ->
- [
- {couch_index, couch_index_plugin}
- ].
-
-data_subscriptions() ->
- [].
-
-data_providers() ->
- [].
-
-processes() ->
- [].
-
-notify(_Key, _Old, _New) ->
- ok.
diff --git a/src/couch_index/src/couch_index_plugin.erl b/src/couch_index/src/couch_index_plugin.erl
deleted file mode 100644
index 4c2f7e68a..000000000
--- a/src/couch_index/src/couch_index_plugin.erl
+++ /dev/null
@@ -1,51 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_plugin).
-
--export([index_update/4]).
-
--export([before_open/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(SERVICE_ID, couch_index).
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
-index_update(State, View, Updated, Removed) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- case couch_epi:is_configured(Handle, index_update, 4) of
- true ->
- update(Handle, State, View, Updated, Removed);
- false ->
- ok
- end.
-
-before_open(Mod, State) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- couch_epi:apply(Handle, ?SERVICE_ID, before_open, [Mod, State], [pipe]).
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-maybe_transform(Fun) when is_function(Fun) ->
- Fun();
-maybe_transform(Items) ->
- Items.
-
-update(Handle, State, View, Updated, Removed) ->
- Args = [State, View, maybe_transform(Updated), maybe_transform(Removed)],
- couch_epi:apply(Handle, ?SERVICE_ID, index_update, Args, []).
diff --git a/src/couch_index/src/couch_index_plugin_couch_db.erl b/src/couch_index/src/couch_index_plugin_couch_db.erl
deleted file mode 100644
index 0af22e396..000000000
--- a/src/couch_index/src/couch_index_plugin_couch_db.erl
+++ /dev/null
@@ -1,26 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_plugin_couch_db).
-
--export([
- is_valid_purge_client/2,
- on_compact/2
-]).
-
-
-is_valid_purge_client(DbName, Props) ->
- couch_mrview_index:verify_index_exists(DbName, Props).
-
-
-on_compact(DbName, DDocs) ->
- couch_mrview_index:ensure_local_purge_docs(DbName, DDocs).
diff --git a/src/couch_index/src/couch_index_server.erl b/src/couch_index/src/couch_index_server.erl
deleted file mode 100644
index 6bebff2d8..000000000
--- a/src/couch_index/src/couch_index_server.erl
+++ /dev/null
@@ -1,322 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_server).
--behaviour(gen_server).
--behaviour(config_listener).
-
--vsn(2).
-
--export([start_link/0, validate/2, get_index/4, get_index/3, get_index/2]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-% Exported for callbacks
--export([
- handle_config_change/5,
- handle_config_terminate/3,
- handle_db_event/3
-]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(BY_SIG, couchdb_indexes_by_sig).
--define(BY_PID, couchdb_indexes_by_pid).
--define(BY_DB, couchdb_indexes_by_db).
--define(RELISTEN_DELAY, 5000).
-
--record(st, {root_dir}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-validate(Db, DDoc) ->
- LoadModFun = fun
- ({ModNameList, "true"}) ->
- try
- [list_to_existing_atom(ModNameList)]
- catch error:badarg ->
- []
- end;
- ({_ModNameList, _Enabled}) ->
- []
- end,
- ValidateFun = fun
- (ModName) ->
- ModName:validate(Db, DDoc)
- end,
- EnabledIndexers = lists:flatmap(LoadModFun, config:get("indexers")),
- lists:foreach(ValidateFun, EnabledIndexers).
-
-
-get_index(Module, <<"shards/", _/binary>> = DbName, DDoc)
- when is_record(DDoc, doc) ->
- get_index(Module, DbName, DDoc, nil);
-get_index(Module, <<"shards/", _/binary>> = DbName, DDoc) ->
- {Pid, Ref} = spawn_monitor(fun() ->
- exit(fabric:open_doc(mem3:dbname(DbName), DDoc, [ejson_body, ?ADMIN_CTX]))
- end),
- receive {'DOWN', Ref, process, Pid, {ok, Doc}} ->
- get_index(Module, DbName, Doc, nil);
- {'DOWN', Ref, process, Pid, Error} ->
- Error
- after 61000 ->
- erlang:demonitor(Ref, [flush]),
- {error, timeout}
- end;
-get_index(Module, DbName, DDoc) when is_binary(DbName) ->
- get_index(Module, DbName, DDoc, nil);
-get_index(Module, Db, DDoc) ->
- get_index(Module, couch_db:name(Db), DDoc).
-
-
-get_index(Module, DbName, DDoc, Fun) when is_binary(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- get_index(Module, Db, DDoc, Fun)
- end);
-get_index(Module, Db, DDoc, Fun) when is_binary(DDoc) ->
- case couch_db:open_doc(Db, DDoc, [ejson_body, ?ADMIN_CTX]) of
- {ok, Doc} -> get_index(Module, Db, Doc, Fun);
- Error -> Error
- end;
-get_index(Module, Db, DDoc, Fun) when is_function(Fun, 1) ->
- {ok, InitState} = Module:init(Db, DDoc),
- {ok, FunResp} = Fun(InitState),
- {ok, Pid} = get_index(Module, InitState),
- {ok, Pid, FunResp};
-get_index(Module, Db, DDoc, _Fun) ->
- {ok, InitState} = Module:init(Db, DDoc),
- get_index(Module, InitState).
-
-
-get_index(Module, IdxState) ->
- DbName = Module:get(db_name, IdxState),
- Sig = Module:get(signature, IdxState),
- case ets:lookup(?BY_SIG, {DbName, Sig}) of
- [{_, Pid}] when is_pid(Pid) ->
- DDocId = Module:get(idx_name, IdxState),
- case ets:match_object(?BY_DB, {DbName, {DDocId, Sig}}) of
- [] ->
- Args = [Pid, DbName, DDocId, Sig],
- gen_server:cast(?MODULE, {add_to_ets, Args});
- _ -> ok
- end,
- {ok, Pid};
- _ ->
- Args = {Module, IdxState, DbName, Sig},
- gen_server:call(?MODULE, {get_index, Args}, infinity)
- end.
-
-
-init([]) ->
- process_flag(trap_exit, true),
- ok = config:listen_for_changes(?MODULE, couch_index_util:root_dir()),
- ets:new(?BY_SIG, [protected, set, named_table]),
- ets:new(?BY_PID, [private, set, named_table]),
- ets:new(?BY_DB, [protected, bag, named_table]),
- couch_event:link_listener(?MODULE, handle_db_event, nil, [all_dbs]),
- RootDir = couch_index_util:root_dir(),
- couch_file:init_delete_dir(RootDir),
- {ok, #st{root_dir=RootDir}}.
-
-
-terminate(_Reason, _State) ->
- Pids = [Pid || {Pid, _} <- ets:tab2list(?BY_PID)],
- lists:map(fun couch_util:shutdown_sync/1, Pids),
- ok.
-
-
-handle_call({get_index, {_Mod, _IdxState, DbName, Sig}=Args}, From, State) ->
- case ets:lookup(?BY_SIG, {DbName, Sig}) of
- [] ->
- spawn_link(fun() -> new_index(Args) end),
- ets:insert(?BY_SIG, {{DbName, Sig}, [From]}),
- {noreply, State};
- [{_, Waiters}] when is_list(Waiters) ->
- ets:insert(?BY_SIG, {{DbName, Sig}, [From | Waiters]}),
- {noreply, State};
- [{_, Pid}] when is_pid(Pid) ->
- {reply, {ok, Pid}, State}
- end;
-handle_call({async_open, {DbName, DDocId, Sig}, {ok, Pid}}, _From, State) ->
- [{_, Waiters}] = ets:lookup(?BY_SIG, {DbName, Sig}),
- [gen_server:reply(From, {ok, Pid}) || From <- Waiters],
- link(Pid),
- add_to_ets(DbName, Sig, DDocId, Pid),
- {reply, ok, State};
-handle_call({async_error, {DbName, _DDocId, Sig}, Error}, _From, State) ->
- [{_, Waiters}] = ets:lookup(?BY_SIG, {DbName, Sig}),
- [gen_server:reply(From, Error) || From <- Waiters],
- ets:delete(?BY_SIG, {DbName, Sig}),
- {reply, ok, State};
-handle_call({reset_indexes, DbName}, _From, State) ->
- reset_indexes(DbName, State#st.root_dir),
- {reply, ok, State}.
-
-
-handle_cast({reset_indexes, DbName}, State) ->
- reset_indexes(DbName, State#st.root_dir),
- {noreply, State};
-handle_cast({add_to_ets, [Pid, DbName, DDocId, Sig]}, State) ->
- % check if Pid still exists
- case ets:lookup(?BY_PID, Pid) of
- [{Pid, {DbName, Sig}}] when is_pid(Pid) ->
- ets:insert(?BY_DB, {DbName, {DDocId, Sig}});
- _ -> ok
- end,
- {noreply, State};
-handle_cast({rem_from_ets, [DbName, DDocId, Sig]}, State) ->
- ets:delete_object(?BY_DB, {DbName, {DDocId, Sig}}),
- {noreply, State}.
-
-handle_info({'EXIT', Pid, Reason}, Server) ->
- case ets:lookup(?BY_PID, Pid) of
- [{Pid, {DbName, Sig}}] ->
- DDocIds = [DDocId || {_, {DDocId, _}}
- <- ets:match_object(?BY_DB, {DbName, {'$1', Sig}})],
- rem_from_ets(DbName, Sig, DDocIds, Pid);
- [] when Reason /= normal ->
- exit(Reason);
- _Else ->
- ok
- end,
- {noreply, Server};
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, couch_index_util:root_dir()),
- {noreply, State};
-handle_info(Msg, State) ->
- couch_log:warning("~p did not expect ~p", [?MODULE, Msg]),
- {noreply, State}.
-
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-
-handle_config_change("couchdb", "index_dir", RootDir, _, RootDir) ->
- {ok, RootDir};
-handle_config_change("couchdb", "view_index_dir", RootDir, _, RootDir) ->
- {ok, RootDir};
-handle_config_change("couchdb", "index_dir", _, _, _) ->
- exit(whereis(couch_index_server), config_change),
- remove_handler;
-handle_config_change("couchdb", "view_index_dir", _, _, _) ->
- exit(whereis(couch_index_server), config_change),
- remove_handler;
-handle_config_change(_, _, _, _, RootDir) ->
- {ok, RootDir}.
-
-handle_config_terminate(_, stop, _) ->
- ok;
-handle_config_terminate(_Server, _Reason, _State) ->
- erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener),
- {ok, couch_index_util:root_dir()}.
-
-new_index({Mod, IdxState, DbName, Sig}) ->
- DDocId = Mod:get(idx_name, IdxState),
- case couch_index:start_link({Mod, IdxState}) of
- {ok, Pid} ->
- ok = gen_server:call(
- ?MODULE, {async_open, {DbName, DDocId, Sig}, {ok, Pid}}),
- unlink(Pid);
- Error ->
- ok = gen_server:call(
- ?MODULE, {async_error, {DbName, DDocId, Sig}, Error})
- end.
-
-
-reset_indexes(DbName, Root) ->
- % shutdown all the updaters and clear the files, the db got changed
- SigDDocIds = lists:foldl(fun({_, {DDocId, Sig}}, DDict) ->
- dict:append(Sig, DDocId, DDict)
- end, dict:new(), ets:lookup(?BY_DB, DbName)),
- Fun = fun({Sig, DDocIds}) ->
- [{_, Pid}] = ets:lookup(?BY_SIG, {DbName, Sig}),
- unlink(Pid),
- gen_server:cast(Pid, delete),
- receive
- {'EXIT', Pid, _} ->
- ok
- after
- 0 ->
- ok
- end,
- rem_from_ets(DbName, Sig, DDocIds, Pid)
- end,
- lists:foreach(Fun, dict:to_list(SigDDocIds)),
- Path = couch_index_util:index_dir("", DbName),
- couch_file:nuke_dir(Root, Path).
-
-
-add_to_ets(DbName, Sig, DDocId, Pid) ->
- ets:insert(?BY_SIG, {{DbName, Sig}, Pid}),
- ets:insert(?BY_PID, {Pid, {DbName, Sig}}),
- ets:insert(?BY_DB, {DbName, {DDocId, Sig}}).
-
-
-rem_from_ets(DbName, Sig, DDocIds, Pid) ->
- ets:delete(?BY_SIG, {DbName, Sig}),
- ets:delete(?BY_PID, Pid),
- lists:foreach(fun(DDocId) ->
- ets:delete_object(?BY_DB, {DbName, {DDocId, Sig}})
- end, DDocIds).
-
-
-handle_db_event(DbName, created, St) ->
- gen_server:cast(?MODULE, {reset_indexes, DbName}),
- {ok, St};
-handle_db_event(DbName, deleted, St) ->
- gen_server:cast(?MODULE, {reset_indexes, DbName}),
- {ok, St};
-handle_db_event(<<"shards/", _/binary>> = DbName, {ddoc_updated,
- DDocId}, St) ->
- DDocResult = couch_util:with_db(DbName, fun(Db) ->
- couch_db:open_doc(Db, DDocId, [ejson_body, ?ADMIN_CTX])
- end),
- LocalShards = try mem3:local_shards(mem3:dbname(DbName))
- catch error:database_does_not_exist ->
- []
- end,
- DbShards = [mem3:name(Sh) || Sh <- LocalShards],
- lists:foreach(fun(DbShard) ->
- lists:foreach(fun({_DbShard, {_DDocId, Sig}}) ->
- % check if there are other ddocs with the same Sig for the same db
- SigDDocs = ets:match_object(?BY_DB, {DbShard, {'$1', Sig}}),
- if length(SigDDocs) > 1 ->
- % remove records from ?BY_DB for this DDoc
- Args = [DbShard, DDocId, Sig],
- gen_server:cast(?MODULE, {rem_from_ets, Args});
- true ->
- % single DDoc with this Sig - close couch_index processes
- case ets:lookup(?BY_SIG, {DbShard, Sig}) of
- [{_, IndexPid}] -> (catch
- gen_server:cast(IndexPid, {ddoc_updated, DDocResult}));
- [] -> []
- end
- end
- end, ets:match_object(?BY_DB, {DbShard, {DDocId, '$1'}}))
- end, DbShards),
- {ok, St};
-handle_db_event(DbName, {ddoc_updated, DDocId}, St) ->
- lists:foreach(fun({_DbName, {_DDocId, Sig}}) ->
- case ets:lookup(?BY_SIG, {DbName, Sig}) of
- [{_, IndexPid}] ->
- (catch gen_server:cast(IndexPid, ddoc_updated));
- [] ->
- ok
- end
- end, ets:match_object(?BY_DB, {DbName, {DDocId, '$1'}})),
- {ok, St};
-handle_db_event(_DbName, _Event, St) ->
- {ok, St}.
diff --git a/src/couch_index/src/couch_index_sup.erl b/src/couch_index/src/couch_index_sup.erl
deleted file mode 100644
index 2d4f671e2..000000000
--- a/src/couch_index/src/couch_index_sup.erl
+++ /dev/null
@@ -1,24 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_sup).
--behaviour(supervisor).
--export([init/1]).
-
--export([start_link/1]).
-
-
-start_link(Args) ->
- supervisor:start_link({local,?MODULE}, ?MODULE, Args).
-
-init([]) ->
- {ok, {{one_for_one, 3, 10}, couch_epi:register_service(couch_index_epi, [])}}.
diff --git a/src/couch_index/src/couch_index_updater.erl b/src/couch_index/src/couch_index_updater.erl
deleted file mode 100644
index fb15db052..000000000
--- a/src/couch_index/src/couch_index_updater.erl
+++ /dev/null
@@ -1,239 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_updater).
--behaviour(gen_server).
-
-
-%% API
--export([start_link/2, run/2, is_running/1, update/2, restart/2]).
-
-%% for upgrades
--export([update/3]).
-
-%% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
--record(st, {
- idx,
- mod,
- pid=nil
-}).
-
-
-start_link(Index, Module) ->
- gen_server:start_link(?MODULE, {Index, Module}, []).
-
-
-run(Pid, IdxState) ->
- gen_server:call(Pid, {update, IdxState}).
-
-
-is_running(Pid) ->
- gen_server:call(Pid, is_running).
-
-
-update(Mod, State) ->
- update(nil, Mod, State).
-
-
-restart(Pid, IdxState) ->
- gen_server:call(Pid, {restart, IdxState}).
-
-
-init({Index, Module}) ->
- process_flag(trap_exit, true),
- {ok, #st{idx=Index, mod=Module}}.
-
-
-terminate(_Reason, State) ->
- couch_util:shutdown_sync(State#st.pid),
- ok.
-
-
-handle_call({update, _IdxState}, _From, #st{pid=Pid}=State) when is_pid(Pid) ->
- {reply, ok, State};
-handle_call({update, IdxState}, _From, #st{idx=Idx, mod=Mod}=State) ->
- Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
- couch_log:info("Starting index update for db: ~s idx: ~s", Args),
- Pid = spawn_link(?MODULE, update, [Idx, Mod, IdxState]),
- {reply, ok, State#st{pid=Pid}};
-handle_call({restart, IdxState}, _From, #st{idx=Idx, mod=Mod}=State) ->
- Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
- couch_log:info("Restarting index update for db: ~s idx: ~s", Args),
- Pid = State#st.pid,
- case is_pid(Pid) of
- true -> couch_util:shutdown_sync(State#st.pid);
- _ -> ok
- end,
- % Make sure and flush a possible 'EXIT' message
- % that's already in our mailbox
- receive
- {'EXIT', Pid, _} -> ok
- after 0 ->
- ok
- end,
- NewPid = spawn_link(?MODULE, update, [Idx, State#st.mod, IdxState]),
- {reply, ok, State#st{pid=NewPid}};
-handle_call(is_running, _From, #st{pid=Pid}=State) when is_pid(Pid) ->
- {reply, true, State};
-handle_call(is_running, _From, State) ->
- {reply, false, State}.
-
-
-handle_cast(_Mesg, State) ->
- {stop, unknown_cast, State}.
-
-
-handle_info({'EXIT', _, {updated, Pid, IdxState}}, #st{pid=Pid}=State) ->
- Mod = State#st.mod,
- Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
- couch_log:info("Index update finished for db: ~s idx: ~s", Args),
- ok = gen_server:cast(State#st.idx, {updated, IdxState}),
- {noreply, State#st{pid=undefined}};
-handle_info({'EXIT', _, {reset, Pid}}, #st{idx=Idx, pid=Pid}=State) ->
- {ok, NewIdxState} = gen_server:call(State#st.idx, reset),
- Pid2 = spawn_link(?MODULE, update, [Idx, State#st.mod, NewIdxState]),
- {noreply, State#st{pid=Pid2}};
-handle_info({'EXIT', Pid, normal}, #st{pid=Pid}=State) ->
- {noreply, State#st{pid=undefined}};
-handle_info({'EXIT', Pid, {{nocatch, Error}, _Trace}}, State) ->
- handle_info({'EXIT', Pid, Error}, State);
-handle_info({'EXIT', Pid, Error}, #st{pid=Pid}=State) ->
- ok = gen_server:cast(State#st.idx, {update_error, Error}),
- {noreply, State#st{pid=undefined}};
-handle_info({'EXIT', Pid, _Reason}, #st{idx=Pid}=State) ->
- {stop, normal, State};
-handle_info({'EXIT', _Pid, normal}, State) ->
- {noreply, State};
-handle_info(_Mesg, State) ->
- {stop, unknown_info, State}.
-
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-
-update(Idx, Mod, IdxState) ->
- DbName = Mod:get(db_name, IdxState),
- IndexName = Mod:get(idx_name, IdxState),
- erlang:put(io_priority, {view_update, DbName, IndexName}),
- CurrSeq = Mod:get(update_seq, IdxState),
- UpdateOpts = Mod:get(update_options, IdxState),
- CommittedOnly = lists:member(committed_only, UpdateOpts),
- IncludeDesign = lists:member(include_design, UpdateOpts),
- DocOpts = case lists:member(local_seq, UpdateOpts) of
- true -> [conflicts, deleted_conflicts, local_seq];
- _ -> [conflicts, deleted_conflicts]
- end,
-
- couch_util:with_db(DbName, fun(Db) ->
- DbUpdateSeq = couch_db:get_update_seq(Db),
- DbCommittedSeq = couch_db:get_committed_update_seq(Db),
-
- NumUpdateChanges = couch_db:count_changes_since(Db, CurrSeq),
- NumPurgeChanges = count_pending_purged_docs_since(Db, Mod, IdxState),
- TotalChanges = NumUpdateChanges + NumPurgeChanges,
- {ok, PurgedIdxState} = purge_index(Db, Mod, IdxState),
-
- GetSeq = fun
- (#full_doc_info{update_seq=Seq}) -> Seq;
- (#doc_info{high_seq=Seq}) -> Seq
- end,
-
- GetInfo = fun
- (#full_doc_info{id=Id, update_seq=Seq, deleted=Del}=FDI) ->
- {Id, Seq, Del, couch_doc:to_doc_info(FDI)};
- (#doc_info{id=Id, high_seq=Seq, revs=[RI|_]}=DI) ->
- {Id, Seq, RI#rev_info.deleted, DI}
- end,
-
- LoadDoc = fun(DI) ->
- {DocId, Seq, Deleted, DocInfo} = GetInfo(DI),
-
- case {IncludeDesign, DocId} of
- {false, <<"_design/", _/binary>>} ->
- {nil, Seq};
- _ when Deleted ->
- {#doc{id=DocId, deleted=true}, Seq};
- _ ->
- {ok, Doc} = couch_db:open_doc_int(Db, DocInfo, DocOpts),
- {Doc, Seq}
- end
- end,
-
- Proc = fun(DocInfo, {IdxStateAcc, _}) ->
- case CommittedOnly and (GetSeq(DocInfo) > DbCommittedSeq) of
- true ->
- {stop, {IdxStateAcc, false}};
- false ->
- {Doc, Seq} = LoadDoc(DocInfo),
- {ok, NewSt} = Mod:process_doc(Doc, Seq, IdxStateAcc),
- garbage_collect(),
- {ok, {NewSt, true}}
- end
- end,
- {ok, InitIdxState} = Mod:start_update(
- Idx,
- PurgedIdxState,
- TotalChanges,
- NumPurgeChanges
- ),
-
- Acc0 = {InitIdxState, true},
- {ok, Acc} = couch_db:fold_changes(Db, CurrSeq, Proc, Acc0, []),
- {ProcIdxSt, SendLast} = Acc,
-
- % If we didn't bail due to hitting the last committed seq we need
- % to send our last update_seq through.
- {ok, LastIdxSt} = case SendLast of
- true ->
- Mod:process_doc(nil, DbUpdateSeq, ProcIdxSt);
- _ ->
- {ok, ProcIdxSt}
- end,
-
- {ok, FinalIdxState} = Mod:finish_update(LastIdxSt),
- exit({updated, self(), FinalIdxState})
- end).
-
-
-purge_index(Db, Mod, IdxState) ->
- DbPurgeSeq = couch_db:get_purge_seq(Db),
- IdxPurgeSeq = Mod:get(purge_seq, IdxState),
- if IdxPurgeSeq == DbPurgeSeq -> {ok, IdxState}; true ->
- FoldFun = fun({PurgeSeq, _UUId, Id, Revs}, Acc) ->
- Mod:purge(Db, PurgeSeq, [{Id, Revs}], Acc)
- end,
- {ok, NewStateAcc} = try
- couch_db:fold_purge_infos(
- Db,
- IdxPurgeSeq,
- FoldFun,
- IdxState,
- []
- )
- catch error:{invalid_start_purge_seq, _} ->
- exit({reset, self()})
- end,
- Mod:update_local_purge_doc(Db, NewStateAcc),
- {ok, NewStateAcc}
- end.
-
-
-count_pending_purged_docs_since(Db, Mod, IdxState) ->
- DbPurgeSeq = couch_db:get_purge_seq(Db),
- IdxPurgeSeq = Mod:get(purge_seq, IdxState),
- DbPurgeSeq - IdxPurgeSeq.
diff --git a/src/couch_index/src/couch_index_util.erl b/src/couch_index/src/couch_index_util.erl
deleted file mode 100644
index dcb33b5b0..000000000
--- a/src/couch_index/src/couch_index_util.erl
+++ /dev/null
@@ -1,78 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_util).
-
--export([root_dir/0, index_dir/2, index_file/3]).
--export([load_doc/3, sort_lib/1, hexsig/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
-
-root_dir() ->
- config:get("couchdb", "view_index_dir").
-
-
-index_dir(Module, DbName) when is_binary(DbName) ->
- DbDir = "." ++ binary_to_list(DbName) ++ "_design",
- filename:join([root_dir(), DbDir, Module]);
-index_dir(Module, Db) ->
- index_dir(Module, couch_db:name(Db)).
-
-
-index_file(Module, DbName, FileName) ->
- filename:join(index_dir(Module, DbName), FileName).
-
-
-load_doc(Db, #doc_info{}=DI, Opts) ->
- Deleted = lists:member(deleted, Opts),
- case (catch couch_db:open_doc(Db, DI, Opts)) of
- {ok, #doc{deleted=false}=Doc} -> Doc;
- {ok, #doc{deleted=true}=Doc} when Deleted -> Doc;
- _Else -> null
- end;
-load_doc(Db, {DocId, Rev}, Opts) ->
- case (catch load_doc(Db, DocId, Rev, Opts)) of
- #doc{deleted=false} = Doc -> Doc;
- _ -> null
- end.
-
-
-load_doc(Db, DocId, Rev, Options) ->
- case Rev of
- nil -> % open most recent rev
- case (catch couch_db:open_doc(Db, DocId, Options)) of
- {ok, Doc} -> Doc;
- _Error -> null
- end;
- _ -> % open a specific rev (deletions come back as stubs)
- case (catch couch_db:open_doc_revs(Db, DocId, [Rev], Options)) of
- {ok, [{ok, Doc}]} -> Doc;
- {ok, [{{not_found, missing}, Rev}]} -> null;
- {ok, [_Else]} -> null
- end
- end.
-
-
-sort_lib({Lib}) ->
- sort_lib(Lib, []).
-sort_lib([], LAcc) ->
- lists:keysort(1, LAcc);
-sort_lib([{LName, {LObj}}|Rest], LAcc) ->
- LSorted = sort_lib(LObj, []), % descend into nested object
- sort_lib(Rest, [{LName, LSorted}|LAcc]);
-sort_lib([{LName, LCode}|Rest], LAcc) ->
- sort_lib(Rest, [{LName, LCode}|LAcc]).
-
-
-hexsig(Sig) ->
- couch_util:to_hex(binary_to_list(Sig)).
diff --git a/src/couch_index/test/eunit/couch_index_compaction_tests.erl b/src/couch_index/test/eunit/couch_index_compaction_tests.erl
deleted file mode 100644
index ab493a969..000000000
--- a/src/couch_index/test/eunit/couch_index_compaction_tests.erl
+++ /dev/null
@@ -1,117 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_compaction_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(WAIT_TIMEOUT, 1000).
-
-
-setup_all() ->
- Ctx = test_util:start_couch(),
- meck:new([test_index], [non_strict]),
- Ctx.
-
-teardown_all(Ctx) ->
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- couch_db:close(Db),
- fake_index(DbName),
- {ok, IndexerPid} = couch_index_server:get_index(test_index, Db, undefined),
- ?assertNot(is_opened(Db)),
- {Db, IndexerPid}.
-
-fake_index(DbName) ->
- ok = meck:expect(test_index, init, ['_', '_'], {ok, 10}),
- ok = meck:expect(test_index, open, fun(_Db, State) ->
- {ok, State}
- end),
- ok = meck:expect(test_index, compact, ['_', '_', '_'],
- meck:seq([{ok, 9}, {ok, 10}])), %% to trigger recompaction
- ok = meck:expect(test_index, commit, ['_'], ok),
- ok = meck:expect(test_index, get, fun
- (db_name, _) ->
- DbName;
- (idx_name, _) ->
- <<"idx_name">>;
- (signature, _) ->
- <<61,237,157,230,136,93,96,201,204,17,137,186,50,249,44,135>>;
- (update_seq, Seq) ->
- Seq
- end),
- ok = meck:expect(test_index, close, ['_'], ok),
- ok = meck:expect(test_index, swap_compacted, fun(_, NewState) ->
- {ok, NewState}
- end).
-
-teardown(_) ->
- ok.
-
-compaction_test_() ->
- {
- "Check compaction",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun hold_db_for_recompaction/1
- ]
- }
- }
- }.
-
-
-hold_db_for_recompaction({Db, Idx}) ->
- ?_test(begin
- ?assertNot(is_opened(Db)),
- ok = meck:reset(test_index),
- {ok, Monitor} = couch_index:compact(Idx, [monitor]),
-
- %% we expect Mod:commit/1 to be called twice
- %% once for compact and once for recompact
- meck:wait(2, test_index, commit, ['_'], 5000),
- ?assertEqual(1, meck:num_calls(test_index, compact, ['_', '_', []])),
- ?assertEqual(1, meck:num_calls(test_index, compact, ['_', '_', [recompact]])),
-
- %% wait compaction finish
- receive
- {'DOWN', Monitor, _, _, _} -> ok
- after 5000 ->
- throw(timeout)
- end,
-
- ?assertEqual(ok, wait_db_close(Db)),
- ok
- end).
-
-wait_db_close(Db) ->
- test_util:wait(fun() ->
- case is_opened(Db) of
- false -> ok;
- true -> wait
- end
- end, ?WAIT_TIMEOUT).
-
-is_opened(Db) ->
- Monitors = [M || M <- couch_db:monitored_by(Db), M =/= self()],
- Monitors /= [].
diff --git a/src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl b/src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl
deleted file mode 100644
index 0e23adf91..000000000
--- a/src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl
+++ /dev/null
@@ -1,145 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_ddoc_updated_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-start() ->
- fake_index(),
- Ctx = test_util:start_couch([mem3, fabric]),
- DbName = ?tempdb(),
- ok = fabric:create_db(DbName, [?ADMIN_CTX]),
- {Ctx, DbName}.
-
-
-stop({Ctx, DbName}) ->
- meck:unload(test_index),
- ok = fabric:delete_db(DbName, [?ADMIN_CTX]),
- DbDir = config:get("couchdb", "database_dir", "."),
- WaitFun = fun() ->
- filelib:fold_files(DbDir, <<".*", DbName/binary, "\.[0-9]+.*">>,
- true, fun(_F, _A) -> wait end, ok)
- end,
- ok = test_util:wait(WaitFun),
- test_util:stop_couch(Ctx),
- ok.
-
-
-ddoc_update_test_() ->
- {
- "Check ddoc update actions",
- {
- setup,
- fun start/0, fun stop/1,
- fun check_all_indexers_exit_on_ddoc_change/1
- }
- }.
-
-
-check_all_indexers_exit_on_ddoc_change({_Ctx, DbName}) ->
- ?_test(begin
- [DbShard1 | RestDbShards] = lists:map(fun(Sh) ->
- {ok, ShardDb} = couch_db:open(mem3:name(Sh), []),
- ShardDb
- end, mem3:local_shards(mem3:dbname(DbName))),
-
- % create a DDoc on Db1
- DDocID = <<"idx_name">>,
- DDocJson = couch_doc:from_json_obj({[
- {<<"_id">>, DDocID},
- {<<"value">>, 1}
- ]}),
- {ok, _Rev} = couch_db:update_doc(DbShard1, DDocJson, []),
- {ok, DbShard} = couch_db:reopen(DbShard1),
- {ok, DDoc} = couch_db:open_doc(
- DbShard, DDocID, [ejson_body, ?ADMIN_CTX]),
- DbShards = [DbShard | RestDbShards],
- N = length(DbShards),
-
- % run couch_index process for each shard database
- ok = meck:reset(test_index),
- lists:foreach(fun(ShardDb) ->
- couch_index_server:get_index(test_index, ShardDb, DDoc)
- end, DbShards),
-
- IndexesBefore = get_indexes_by_ddoc(DDocID, N),
- ?assertEqual(N, length(IndexesBefore)),
-
- AliveBefore = lists:filter(fun erlang:is_process_alive/1, IndexesBefore),
- ?assertEqual(N, length(AliveBefore)),
-
- % update ddoc
- DDocJson2 = couch_doc:from_json_obj({[
- {<<"_id">>, DDocID},
- {<<"value">>, 2},
- {<<"_rev">>, couch_doc:rev_to_str(DDoc#doc.revs)}
- ]}),
- {ok, _} = couch_db:update_doc(DbShard, DDocJson2, []),
-
- % assert that all index processes exit after ddoc updated
- ok = meck:reset(test_index),
- couch_index_server:handle_db_event(
- couch_db:name(DbShard), {ddoc_updated, DDocID}, {st, ""}),
-
- ok = meck:wait(N, test_index, init, ['_', '_'], 5000),
- IndexesAfter = get_indexes_by_ddoc(DDocID, 0),
- ?assertEqual(0, length(IndexesAfter)),
-
- %% assert that previously running indexes are gone
- AliveAfter = lists:filter(fun erlang:is_process_alive/1, IndexesBefore),
- ?assertEqual(0, length(AliveAfter)),
- ok
- end).
-
-
-fake_index() ->
- ok = meck:new([test_index], [non_strict]),
- ok = meck:expect(test_index, init, fun(Db, DDoc) ->
- {ok, {couch_db:name(Db), DDoc}}
- end),
- ok = meck:expect(test_index, open, fun(_Db, State) ->
- {ok, State}
- end),
- ok = meck:expect(test_index, get, fun
- (db_name, {DbName, _DDoc}) ->
- DbName;
- (idx_name, {_DbName, DDoc}) ->
- DDoc#doc.id;
- (signature, {_DbName, DDoc}) ->
- couch_hash:md5_hash(term_to_binary(DDoc));
- (update_seq, Seq) ->
- Seq
- end),
- ok = meck:expect(test_index, shutdown, ['_'], ok).
-
-
-get_indexes_by_ddoc(DDocID, N) ->
- Indexes = test_util:wait(fun() ->
- Indxs = ets:match_object(
- couchdb_indexes_by_db, {'$1', {DDocID, '$2'}}),
- case length(Indxs) == N of
- true ->
- Indxs;
- false ->
- wait
- end
- end),
- lists:foldl(fun({DbName, {_DDocID, Sig}}, Acc) ->
- case ets:lookup(couchdb_indexes_by_sig, {DbName, Sig}) of
- [{_, Pid}] -> [Pid|Acc];
- _ -> Acc
- end
- end, [], Indexes).
-
diff --git a/src/couch_jobs/src/couch_jobs.erl b/src/couch_jobs/src/couch_jobs.erl
index 6c40f5dff..1229fca23 100644
--- a/src/couch_jobs/src/couch_jobs.erl
+++ b/src/couch_jobs/src/couch_jobs.erl
@@ -121,9 +121,9 @@ get_job_state(Tx, Type, JobId) when is_binary(JobId) ->
-spec get_active_jobs_ids(jtx(), job_type()) -> [job_id()] | {error,
any()}.
get_active_jobs_ids(Tx, Type) ->
+ SinceVS = {versionstamp, 0, 0},
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
- Since = couch_jobs_fdb:get_active_since(JTx, Type,
- {versionstamp, 0, 0}),
+ {Since, _} = couch_jobs_fdb:get_active_since(JTx, Type, SinceVS, []),
maps:keys(Since)
end).
@@ -349,9 +349,7 @@ accept_loop(Type, NoSched, MaxSchedTime, Timeout) ->
AcceptResult = try
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), TxFun)
catch
- error:{timeout, _} ->
- retry;
- error:{erlfdb_error, Err} when Err =:= 1020 orelse Err =:= 1031 ->
+ error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
retry
end,
case AcceptResult of
@@ -387,12 +385,12 @@ wait_pending(PendingWatch, MaxSTime, UserTimeout, NoSched) ->
erlfdb:wait(PendingWatch, [{timeout, Timeout}]),
ok
catch
- error:{erlfdb_error, ?FUTURE_VERSION} ->
- erlfdb:cancel(PendingWatch, [flush]),
- retry;
error:{timeout, _} ->
erlfdb:cancel(PendingWatch, [flush]),
- {error, not_found}
+ {error, not_found};
+ error:{Err, Tag} when ?COUCH_JOBS_RETRYABLE(Err, Tag) ->
+ erlfdb:cancel(PendingWatch, [flush]),
+ retry
end.
diff --git a/src/couch_jobs/src/couch_jobs.hrl b/src/couch_jobs/src/couch_jobs.hrl
index bb561b136..a160605a0 100644
--- a/src/couch_jobs/src/couch_jobs.hrl
+++ b/src/couch_jobs/src/couch_jobs.hrl
@@ -40,6 +40,12 @@
-define(COUCH_JOBS_CURRENT, '$couch_jobs_current').
-define(UNDEFINED_MAX_SCHEDULED_TIME, 1 bsl 36).
+-define(COUCH_JOBS_RETRYABLE(Tag, Err), (
+ (Tag == timeout) orelse (
+ (Tag == erlfdb_error andalso ?ERLFDB_IS_RETRYABLE(Err)) orelse
+ (Tag == erlfdb_error andalso Err =:= ?ERLFDB_TRANSACTION_TIMED_OUT))
+)).
+
-type jtx() :: map() | undefined | tuple().
-type job_id() :: binary().
diff --git a/src/couch_jobs/src/couch_jobs_activity_monitor.erl b/src/couch_jobs/src/couch_jobs_activity_monitor.erl
index 9802f5798..5cebcf946 100644
--- a/src/couch_jobs/src/couch_jobs_activity_monitor.erl
+++ b/src/couch_jobs/src/couch_jobs_activity_monitor.erl
@@ -28,16 +28,25 @@
code_change/3
]).
+
+-include("couch_jobs.hrl").
+-include_lib("kernel/include/logger.hrl").
+
+
-record(st, {
jtx,
type,
tref,
timeout = 0,
- vs = not_found
+ vs = not_found,
+ batch_size
}).
--define(MAX_JITTER_DEFAULT, 10000).
+-define(MAX_JITTER_DEFAULT, "10000").
+-define(INIT_BATCH_SIZE, "1000").
+-define(BATCH_FACTOR, "0.75").
+-define(BATCH_INCREMENT, "100").
-define(MISSING_TIMEOUT_CHECK, 5000).
@@ -48,7 +57,11 @@ start_link(Type) ->
%% gen_server callbacks
init([Type]) ->
- St = #st{jtx = couch_jobs_fdb:get_jtx(), type = Type},
+ St = #st{
+ jtx = couch_jobs_fdb:get_jtx(),
+ type = Type,
+ batch_size = init_batch_size()
+ },
{ok, schedule_check(St)}.
@@ -68,7 +81,13 @@ handle_info(check_activity, St) ->
St1 = try
check_activity(St)
catch
- error:{erlfdb_error, Err} when Err =:= 1020 orelse Err =:= 1031 ->
+ error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
+ ?LOG_ERROR(#{
+ what => erlfdb_error,
+ job_type => St#st.type,
+ error_code => Err,
+ details => "possible overload condition"
+ }),
LogMsg = "~p : type:~p got ~p error, possibly from overload",
couch_log:error(LogMsg, [?MODULE, St#st.type, Err]),
St
@@ -79,6 +98,10 @@ handle_info(check_activity, St) ->
handle_info({Ref, ready}, St) when is_reference(Ref) ->
% Don't crash out couch_jobs_server and the whole application would need to
% eventually do proper cleanup in erlfdb:wait timeout code.
+ ?LOG_ERROR(#{
+ what => spurious_future_ready,
+ ref => Ref
+ }),
LogMsg = "~p : spurious erlfdb future ready message ~p",
couch_log:error(LogMsg, [?MODULE, Ref]),
{noreply, St};
@@ -94,19 +117,12 @@ code_change(_OldVsn, St, _Extra) ->
% Private helper functions
check_activity(#st{jtx = JTx, type = Type, vs = not_found} = St) ->
- NewVS = couch_jobs_fdb:tx(JTx, fun(JTx1) ->
- couch_jobs_fdb:get_activity_vs(JTx1, Type)
- end),
- St#st{vs = NewVS};
-
-check_activity(#st{jtx = JTx, type = Type, vs = VS} = St) ->
- NewVS = couch_jobs_fdb:tx(JTx, fun(JTx1) ->
- NewVS = couch_jobs_fdb:get_activity_vs(JTx1, Type),
- JobIds = couch_jobs_fdb:get_inactive_since(JTx1, Type, VS),
- couch_jobs_fdb:re_enqueue_inactive(JTx1, Type, JobIds),
- NewVS
- end),
- St#st{vs = NewVS}.
+ St#st{vs = get_activity_vs(JTx, Type)};
+
+check_activity(#st{} = St) ->
+ #st{jtx = JTx, type = Type, vs = VS, batch_size = BatchSize} = St,
+ NewBatchSize = re_enqueue_inactive(JTx, Type, VS, BatchSize),
+ St#st{vs = get_activity_vs(JTx, Type), batch_size = NewBatchSize}.
get_timeout_msec(JTx, Type) ->
@@ -135,6 +151,53 @@ schedule_check(#st{jtx = JTx, type = Type, timeout = OldTimeout} = St) ->
St1#st{tref = erlang:send_after(Wait, self(), check_activity)}.
+re_enqueue_inactive(JTx, Type, VS, BatchSize) ->
+ Result = try
+ couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+ Opts = [{limit, BatchSize}],
+ JobIds = couch_jobs_fdb:get_inactive_since(JTx1, Type, VS, Opts),
+ couch_jobs_fdb:re_enqueue_inactive(JTx1, Type, JobIds),
+ length(JobIds)
+ end)
+ catch
+ error:{erlfdb_error, ?ERLFDB_TRANSACTION_TOO_LARGE} ->
+ failed;
+ error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
+ failed
+ end,
+ case Result of
+ JobCnt when is_integer(JobCnt), JobCnt < BatchSize ->
+ BatchSize;
+ JobCnt when is_integer(JobCnt), JobCnt >= BatchSize ->
+ NewBatchSize = BatchSize + batch_increment(),
+ re_enqueue_inactive(JTx, Type, VS, NewBatchSize);
+ failed ->
+ NewBatchSize = max(1, round(BatchSize * batch_factor())),
+ re_enqueue_inactive(JTx, Type, VS, NewBatchSize)
+ end.
+
+
+get_activity_vs(JTx, Type) ->
+ couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+ couch_jobs_fdb:get_activity_vs(JTx1, Type)
+ end).
+
+
get_max_jitter_msec()->
- config:get_integer("couch_jobs", "activity_monitor_max_jitter_msec",
+ couch_jobs_util:get_non_neg_int(activity_monitor_max_jitter_msec,
?MAX_JITTER_DEFAULT).
+
+
+init_batch_size() ->
+ couch_jobs_util:get_non_neg_int(activity_monitor_init_batch_size,
+ ?INIT_BATCH_SIZE).
+
+
+batch_increment() ->
+ couch_jobs_util:get_non_neg_int(activity_monitor_batch_increment,
+ ?BATCH_INCREMENT).
+
+
+batch_factor() ->
+ couch_jobs_util:get_float_0_1(activity_monitor_batch_factor,
+ ?BATCH_FACTOR).
diff --git a/src/couch_jobs/src/couch_jobs_fdb.erl b/src/couch_jobs/src/couch_jobs_fdb.erl
index 27131ec86..cea138876 100644
--- a/src/couch_jobs/src/couch_jobs_fdb.erl
+++ b/src/couch_jobs/src/couch_jobs_fdb.erl
@@ -33,8 +33,8 @@
get_activity_vs/2,
get_activity_vs_and_watch/2,
- get_active_since/3,
- get_inactive_since/3,
+ get_active_since/4,
+ get_inactive_since/4,
re_enqueue_inactive/3,
init_cache/0,
@@ -356,26 +356,26 @@ get_activity_vs_and_watch(#{jtx := true} = JTx, Type) ->
end.
-get_active_since(#{jtx := true} = JTx, Type, Versionstamp) ->
+get_active_since(#{jtx := true} = JTx, Type, Versionstamp, Opts) ->
#{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
Prefix = erlfdb_tuple:pack({?ACTIVITY}, Jobs),
StartKey = erlfdb_tuple:pack({Type, Versionstamp}, Prefix),
StartKeySel = erlfdb_key:first_greater_or_equal(StartKey),
{_, EndKey} = erlfdb_tuple:range({Type}, Prefix),
- Opts = [{streaming_mode, want_all}],
Future = erlfdb:get_range(Tx, StartKeySel, EndKey, Opts),
- maps:from_list(lists:map(fun({_K, V}) ->
- erlfdb_tuple:unpack(V)
- end, erlfdb:wait(Future))).
+ {JobIdsData, LastSeq} = lists:mapfoldl(fun({K, V}, _PrevSeq) ->
+ {Type, Seq} = erlfdb_tuple:unpack(K, Prefix),
+ {erlfdb_tuple:unpack(V), Seq}
+ end, Versionstamp, erlfdb:wait(Future)),
+ {maps:from_list(JobIdsData), LastSeq}.
-get_inactive_since(#{jtx := true} = JTx, Type, Versionstamp) ->
+get_inactive_since(#{jtx := true} = JTx, Type, Versionstamp, Opts) ->
#{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
Prefix = erlfdb_tuple:pack({?ACTIVITY}, Jobs),
{StartKey, _} = erlfdb_tuple:range({Type}, Prefix),
EndKey = erlfdb_tuple:pack({Type, Versionstamp}, Prefix),
EndKeySel = erlfdb_key:first_greater_than(EndKey),
- Opts = [{streaming_mode, want_all}],
Future = erlfdb:get_range(Tx, StartKey, EndKeySel, Opts),
lists:map(fun({_K, V}) ->
{JobId, _} = erlfdb_tuple:unpack(V),
diff --git a/src/couch_jobs/src/couch_jobs_notifier.erl b/src/couch_jobs/src/couch_jobs_notifier.erl
index 99581cb79..b47834f2f 100644
--- a/src/couch_jobs/src/couch_jobs_notifier.erl
+++ b/src/couch_jobs/src/couch_jobs_notifier.erl
@@ -33,11 +33,14 @@
-include("couch_jobs.hrl").
+-include_lib("kernel/include/logger.hrl").
--define(TYPE_MONITOR_HOLDOFF_DEFAULT, 50).
+-define(TYPE_MONITOR_HOLDOFF_DEFAULT, "50").
-define(TYPE_MONITOR_TIMEOUT_DEFAULT, "infinity").
--define(GET_JOBS_RANGE_RATIO, 0.5).
+-define(INIT_BATCH_SIZE, "1000").
+-define(BATCH_FACTOR, "0.75").
+-define(BATCH_INCREMENT, "100").
-record(st, {
@@ -46,7 +49,8 @@
monitor_pid,
subs, % #{JobId => #{Ref => {Pid, State, Seq}}}
pidmap, % #{{Jobid, Pid} => Ref}
- refmap % #{Ref => JobId}
+ refmap, % #{Ref => JobId}
+ batch_size
}).
@@ -76,7 +80,8 @@ init([Type]) ->
type = Type,
subs = #{},
pidmap = #{},
- refmap = #{}
+ refmap = #{},
+ batch_size = init_batch_size()
},
VS = get_type_vs(St),
HoldOff = get_holdoff(),
@@ -121,6 +126,10 @@ handle_info({type_updated, VS}, St) ->
handle_info({Ref, ready}, St) when is_reference(Ref) ->
% Don't crash out couch_jobs_server and the whole application would need to
% eventually do proper cleanup in erlfdb:wait timeout code.
+ ?LOG_ERROR(#{
+ what => spurious_future_ready,
+ ref => Ref
+ }),
LogMsg = "~p : spurious erlfdb future ready message ~p",
couch_log:error(LogMsg, [?MODULE, Ref]),
{noreply, St};
@@ -199,31 +208,44 @@ flush_type_updated_messages(VSMax) ->
end.
-get_jobs(#st{jtx = JTx, type = Type}, InactiveIdMap, Ratio)
- when Ratio >= ?GET_JOBS_RANGE_RATIO ->
- Filter = fun(JobId) -> maps:is_key(JobId, InactiveIdMap) end,
- JobMap = couch_jobs_fdb:tx(JTx, fun(JTx1) ->
- couch_jobs_fdb:get_jobs(JTx1, Type, Filter)
- end),
- maps:map(fun(JobId, _) ->
- case maps:is_key(JobId, JobMap) of
- true -> maps:get(JobId, JobMap);
- false -> {null, not_found, not_found}
- end
- end, InactiveIdMap);
-
-get_jobs(#st{jtx = JTx, type = Type}, InactiveIdMap, _) ->
- couch_jobs_fdb:tx(JTx, fun(JTx1) ->
- maps:map(fun(JobId, _) ->
- Job = #{job => true, type => Type, id => JobId},
- case couch_jobs_fdb:get_job_state_and_data(JTx1, Job) of
- {ok, Seq, State, Data} ->
- {Seq, State, Data};
- {error, not_found} ->
- {null, not_found, not_found}
- end
- end, InactiveIdMap)
- end).
+get_jobs(#st{} = St, Ids) when is_list(Ids) ->
+ #st{jtx = JTx, type = Type, batch_size = BatchSize} = St,
+ {Jobs, NewBatchSize} = get_jobs_iter(JTx, Type, Ids, BatchSize, #{}),
+ {Jobs, St#st{batch_size = NewBatchSize}}.
+
+
+get_jobs_iter(_Jtx, _Type, [], BatchSize, #{} = Acc) ->
+ {Acc, BatchSize};
+
+get_jobs_iter(JTx, Type, Ids, BatchSize, #{} = Acc0) ->
+ {BatchIds, RestIds} = case length(Ids) < BatchSize of
+ true -> {Ids, []};
+ false -> lists:split(BatchSize, Ids)
+ end,
+ Result = try
+ couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+ lists:foldl(fun(JobId, #{} = Acc) ->
+ Job = #{job => true, type => Type, id => JobId},
+ case couch_jobs_fdb:get_job_state_and_data(JTx1, Job) of
+ {ok, Seq, State, Data} ->
+ Acc#{JobId => {Seq, State, Data}};
+ {error, not_found} ->
+ Acc#{JobId => {null, not_found, not_found}}
+ end
+ end, Acc0, BatchIds)
+ end)
+ catch
+ error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
+ failed
+ end,
+ case Result of
+ #{} = AccF ->
+ NewBatchSize = BatchSize + batch_increment(),
+ get_jobs_iter(JTx, Type, RestIds, NewBatchSize, AccF);
+ failed ->
+ NewBatchSize = max(1, round(BatchSize * batch_factor())),
+ get_jobs_iter(JTx, Type, Ids, NewBatchSize, Acc0)
+ end.
get_type_vs(#st{jtx = JTx, type = Type}) ->
@@ -236,24 +258,48 @@ get_type_vs(#st{jtx = JTx, type = Type}) ->
% and updated at least once since the given versionstamp. These are relatively
% cheap to find as it's just a range read in the ?ACTIVITY subspace.
%
-get_active_since(#st{} = _St, not_found) ->
- #{};
-
-get_active_since(#st{jtx = JTx, type = Type, subs = Subs}, VS) ->
- AllUpdated = couch_jobs_fdb:tx(JTx, fun(JTx1) ->
- couch_jobs_fdb:get_active_since(JTx1, Type, VS)
- end),
- maps:map(fun(_JobId, Data) ->
+get_active_since(#st{} = St, not_found) ->
+ {#{}, St};
+
+get_active_since(#st{} = St, VS) ->
+ #st{jtx = JTx, type = Type, subs = Subs, batch_size = BatchSize} = St,
+ {Updated, NewBatchSize} = get_active_iter(JTx, Type, VS, BatchSize, #{}),
+ UpdatedSubs = maps:map(fun(_JobId, Data) ->
{VS, running, Data}
- end, maps:with(maps:keys(Subs), AllUpdated)).
+ end, maps:with(maps:keys(Subs), Updated)),
+ {UpdatedSubs, St#st{batch_size = NewBatchSize}}.
+
+
+get_active_iter(JTx, Type, VS, BatchSize, #{} = Acc) ->
+ Opts = [{limit, BatchSize}],
+ Result = try
+ couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+ couch_jobs_fdb:get_active_since(JTx1, Type, VS, Opts)
+ end)
+ catch
+ error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
+ failed
+ end,
+ case Result of
+ {Updated, _FinalSeq} when map_size(Updated) < BatchSize ->
+ {maps:merge(Acc, Updated), BatchSize};
+ {Updated, FinalSeq} when map_size(Updated) >= BatchSize ->
+ Acc1 = maps:merge(Acc, Updated),
+ NewBatchSize = BatchSize + batch_increment(),
+ NextSeq = fabric2_fdb:next_vs(FinalSeq),
+ get_active_iter(JTx, Type, NextSeq, NewBatchSize, Acc1);
+ failed ->
+ NewBatchSize = max(1, round(BatchSize * batch_factor())),
+ get_active_iter(JTx, Type, VS, NewBatchSize, Acc)
+ end.
try_notify_subscribers(ActiveVS, #st{} = St) ->
try
notify_subscribers(ActiveVS, St)
catch
- error:{timeout, _} -> try_notify_subscribers(ActiveVS, St);
- error:{erlfdb_error, 1031} -> try_notify_subscribers(ActiveVS, St)
+ error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
+ try_notify_subscribers(ActiveVS, St)
end.
@@ -263,14 +309,13 @@ notify_subscribers(_, #st{subs = Subs} = St) when map_size(Subs) =:= 0 ->
notify_subscribers(ActiveVS, #st{} = St1) ->
% First gather the easy (cheap) active jobs. Then with those out of way
% inspect each job to get its state.
- Active = get_active_since(St1, ActiveVS),
- St2 = notify_job_ids(Active, St1),
+ {Active, St2} = get_active_since(St1, ActiveVS),
+ St3 = notify_job_ids(Active, St2),
ActiveIds = maps:keys(Active),
- Subs = St2#st.subs,
- InactiveIdMap = maps:without(ActiveIds, Subs),
- InactiveRatio = maps:size(InactiveIdMap) / maps:size(Subs),
- Inactive = get_jobs(St2, InactiveIdMap, InactiveRatio),
- notify_job_ids(Inactive, St2).
+ Subs = St3#st.subs,
+ InactiveIds = maps:keys(maps:without(ActiveIds, Subs)),
+ {Inactive, St4} = get_jobs(St3, InactiveIds),
+ notify_job_ids(Inactive, St4).
notify_job_ids(#{} = Jobs, #st{type = Type} = St0) ->
@@ -302,13 +347,25 @@ notify(Pid, Ref, Type, Id, State, Data) ->
get_holdoff() ->
- config:get_integer("couch_jobs", "type_monitor_holdoff_msec",
+ couch_jobs_util:get_non_neg_int(type_monitor_holdoff_msec,
?TYPE_MONITOR_HOLDOFF_DEFAULT).
get_timeout() ->
- Default = ?TYPE_MONITOR_TIMEOUT_DEFAULT,
- case config:get("couch_jobs", "type_monitor_timeout_msec", Default) of
- "infinity" -> infinity;
- Milliseconds -> list_to_integer(Milliseconds)
- end.
+ couch_jobs_util:get_timeout(type_monitor_timeout_msec,
+ ?TYPE_MONITOR_TIMEOUT_DEFAULT).
+
+
+init_batch_size() ->
+ couch_jobs_util:get_non_neg_int(notifier_init_batch_size,
+ ?INIT_BATCH_SIZE).
+
+
+batch_increment() ->
+ couch_jobs_util:get_non_neg_int(notifier_batch_increment,
+ ?BATCH_INCREMENT).
+
+
+batch_factor() ->
+ couch_jobs_util:get_float_0_1(notifier_batch_factor,
+ ?BATCH_FACTOR).
diff --git a/src/couch_jobs/src/couch_jobs_server.erl b/src/couch_jobs/src/couch_jobs_server.erl
index 2e03c7dcf..afa1fba7a 100644
--- a/src/couch_jobs/src/couch_jobs_server.erl
+++ b/src/couch_jobs/src/couch_jobs_server.erl
@@ -14,6 +14,10 @@
-behaviour(gen_server).
+-include_lib("kernel/include/logger.hrl").
+
+-include("couch_jobs.hrl").
+
-export([
start_link/0,
@@ -31,8 +35,8 @@
]).
--define(TYPE_CHECK_PERIOD_DEFAULT, 15000).
--define(MAX_JITTER_DEFAULT, 5000).
+-define(TYPE_CHECK_PERIOD_DEFAULT, "15000").
+-define(MAX_JITTER_DEFAULT, "5000").
start_link() ->
@@ -92,6 +96,11 @@ handle_info(check_types, St) ->
{noreply, St};
handle_info({'DOWN', _Ref, process, Pid, Reason}, St) ->
+ ?LOG_ERROR(#{
+ what => monitored_process_crash,
+ pid => Pid,
+ details => Reason
+ }),
LogMsg = "~p : process ~p exited with ~p",
couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
{stop, {unexpected_process_exit, Pid, Reason}, St};
@@ -99,6 +108,10 @@ handle_info({'DOWN', _Ref, process, Pid, Reason}, St) ->
handle_info({Ref, ready}, St) when is_reference(Ref) ->
% Don't crash out couch_jobs_server and the whole application would need to
% eventually do proper cleanup in erlfdb:wait timeout code.
+ ?LOG_ERROR(#{
+ what => spurious_future_ready,
+ ref => Ref
+ }),
LogMsg = "~p : spurious erlfdb future ready message ~p",
couch_log:error(LogMsg, [?MODULE, Ref]),
{noreply, St};
@@ -170,8 +183,14 @@ fdb_types() ->
couch_jobs_fdb:get_types(JTx)
end)
catch
- error:{timeout, _} ->
- couch_log:warning("~p : Timed out connecting to FDB", [?MODULE]),
+ error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
+ ?LOG_WARNING(#{
+ what => fdb_connection_error,
+ tag => Tag,
+ details => Err
+ }),
+ LogMsg = "~p : Error ~p:~p connecting to FDB",
+ couch_log:warning(LogMsg, [?MODULE, Tag, Err]),
[]
end.
@@ -184,10 +203,10 @@ schedule_check() ->
get_period_msec() ->
- config:get_integer("couch_jobs", "type_check_period_msec",
+ couch_jobs_util:get_non_neg_int(type_check_period_msec,
?TYPE_CHECK_PERIOD_DEFAULT).
get_max_jitter_msec() ->
- config:get_integer("couch_jobs", "type_check_max_jitter_msec",
+ couch_jobs_util:get_non_neg_int(type_check_max_jitter_msec,
?MAX_JITTER_DEFAULT).
diff --git a/src/couch_jobs/src/couch_jobs_type_monitor.erl b/src/couch_jobs/src/couch_jobs_type_monitor.erl
index 04ad60acc..95aee4e7a 100644
--- a/src/couch_jobs/src/couch_jobs_type_monitor.erl
+++ b/src/couch_jobs/src/couch_jobs_type_monitor.erl
@@ -55,10 +55,7 @@ loop(#st{vs = VS, timeout = Timeout} = St) ->
try
erlfdb:wait(Watch, [{timeout, Timeout}])
catch
- error:{erlfdb_error, ?FUTURE_VERSION} ->
- erlfdb:cancel(Watch, [flush]),
- ok;
- error:{timeout, _} ->
+ error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
erlfdb:cancel(Watch, [flush]),
ok
end,
@@ -78,7 +75,14 @@ notify(#st{} = St) ->
St#st{timestamp = Now}.
-get_vs_and_watch(#st{jtx = JTx, type = Type}) ->
- couch_jobs_fdb:tx(JTx, fun(JTx1) ->
- couch_jobs_fdb:get_activity_vs_and_watch(JTx1, Type)
- end).
+get_vs_and_watch(#st{} = St) ->
+ #st{jtx = JTx, type = Type, holdoff = HoldOff} = St,
+ try
+ couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+ couch_jobs_fdb:get_activity_vs_and_watch(JTx1, Type)
+ end)
+ catch
+ error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
+ timer:sleep(HoldOff),
+ get_vs_and_watch(St)
+ end.
diff --git a/src/couch_jobs/src/couch_jobs_util.erl b/src/couch_jobs/src/couch_jobs_util.erl
new file mode 100644
index 000000000..747ab60d8
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_util.erl
@@ -0,0 +1,58 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_util).
+
+
+-export([
+ get_non_neg_int/2,
+ get_float_0_1/2,
+ get_timeout/2
+]).
+
+
+get_non_neg_int(Key, Default) when is_atom(Key), is_list(Default) ->
+ StrVal = config:get("couch_jobs", atom_to_list(Key), Default),
+ non_neg_int(Key, StrVal).
+
+
+get_float_0_1(Key, Default) when is_atom(Key), is_list(Default) ->
+ StrVal = config:get("couch_jobs", atom_to_list(Key), Default),
+ float_0_1(Key, StrVal).
+
+
+get_timeout(Key, Default) when is_atom(Key), is_list(Default) ->
+ case config:get("couch_jobs", atom_to_list(Key), Default) of
+ "infinity" -> infinity;
+ StrVal -> non_neg_int(Key, StrVal)
+ end.
+
+
+non_neg_int(Name, Str) ->
+ try
+ Val = list_to_integer(Str),
+ true = Val > 0,
+ Val
+ catch _:_ ->
+ erlang:error({invalid_non_neg_integer, {couch_jobs, Name, Str}})
+ end.
+
+
+float_0_1(Name, Str) ->
+ Val = try
+ list_to_float(Str)
+ catch error:badarg ->
+ erlang:error({invalid_float, {couch_jobs, Name, Str}})
+ end,
+ if Val >= 0.0 andalso Val =< 1.0 -> Val; true ->
+ erlang:error({float_out_of_range, {couch_jobs, Name, Str}})
+ end.
diff --git a/src/couch_jobs/test/couch_jobs_tests.erl b/src/couch_jobs/test/couch_jobs_tests.erl
index 11572a4b9..9f3a3721d 100644
--- a/src/couch_jobs/test/couch_jobs_tests.erl
+++ b/src/couch_jobs/test/couch_jobs_tests.erl
@@ -16,6 +16,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("eunit/include/eunit.hrl").
+-include_lib("fabric/test/fabric2_test.hrl").
% Job creation API can take an undefined Tx object
@@ -33,37 +34,68 @@ couch_jobs_basic_test_() ->
foreach,
fun setup/0, fun teardown/1,
[
- fun add_remove_pending/1,
- fun add_remove_errors/1,
- fun add_with_the_same_scheduled_time/1,
- fun get_job_data_and_state/1,
- fun resubmit_as_job_creator/1,
- fun type_timeouts_and_server/1,
- fun dead_notifier_restarts_jobs_server/1,
- fun bad_messages_restart_couch_jobs_server/1,
- fun bad_messages_restart_notifier/1,
- fun bad_messages_restart_activity_monitor/1,
- fun basic_accept_and_finish/1,
- fun accept_blocking/1,
- fun job_processor_update/1,
- fun resubmit_enqueues_job/1,
- fun resubmit_finished_updates_job_data/1,
- fun resubmit_running_does_not_update_job_data/1,
- fun resubmit_custom_schedtime/1,
- fun add_pending_updates_job_data/1,
- fun add_finished_updates_job_data/1,
- fun add_running_does_not_update_job_data/1,
- fun accept_max_schedtime/1,
- fun accept_no_schedule/1,
- fun subscribe/1,
- fun remove_when_subscribed_and_pending/1,
- fun remove_when_subscribed_and_running/1,
- fun subscribe_wait_multiple/1,
- fun enqueue_inactive/1,
- fun remove_running_job/1,
- fun check_get_jobs/1,
- fun use_fabric_transaction_object/1,
- fun metadata_version_bump/1
+ ?TDEF_FE(add_remove_pending),
+ ?TDEF_FE(add_remove_errors),
+ ?TDEF_FE(add_with_the_same_scheduled_time),
+ ?TDEF_FE(get_job_data_and_state),
+ ?TDEF_FE(resubmit_as_job_creator),
+ ?TDEF_FE(type_timeouts_and_server, 15),
+ ?TDEF_FE(dead_notifier_restarts_jobs_server),
+ ?TDEF_FE(bad_cast_restarts_couch_jobs_server),
+ ?TDEF_FE(bad_call_restarts_couch_jobs_server),
+ ?TDEF_FE(bad_info_restarts_couch_jobs_server),
+ ?TDEF_FE(bad_cast_restarts_notifier),
+ ?TDEF_FE(bad_call_restarts_notifier),
+ ?TDEF_FE(bad_info_restarts_notifier),
+ ?TDEF_FE(bad_cast_restarts_activity_monitor),
+ ?TDEF_FE(bad_call_restarts_activity_monitor),
+ ?TDEF_FE(bad_info_restarts_activity_monitor),
+ ?TDEF_FE(basic_accept_and_finish),
+ ?TDEF_FE(accept_blocking),
+ ?TDEF_FE(job_processor_update),
+ ?TDEF_FE(resubmit_enqueues_job),
+ ?TDEF_FE(resubmit_finished_updates_job_data),
+ ?TDEF_FE(resubmit_running_does_not_update_job_data),
+ ?TDEF_FE(resubmit_custom_schedtime),
+ ?TDEF_FE(add_pending_updates_job_data),
+ ?TDEF_FE(add_finished_updates_job_data),
+ ?TDEF_FE(add_running_does_not_update_job_data),
+ ?TDEF_FE(accept_max_schedtime),
+ ?TDEF_FE(accept_no_schedule),
+ ?TDEF_FE(subscribe),
+ ?TDEF_FE(remove_when_subscribed_and_pending),
+ ?TDEF_FE(remove_when_subscribed_and_running),
+ ?TDEF_FE(subscribe_wait_multiple),
+ ?TDEF_FE(enqueue_inactive, 15),
+ ?TDEF_FE(remove_running_job),
+ ?TDEF_FE(check_get_jobs),
+ ?TDEF_FE(use_fabric_transaction_object),
+ ?TDEF_FE(metadata_version_bump)
+ ]
+ }
+ }
+ }.
+
+
+couch_jobs_batching_test_() ->
+ {
+ "Test couch jobs batching logic",
+ {
+ setup,
+ fun setup_couch/0, fun teardown_couch/1,
+ {
+ foreach,
+ fun setup_batch/0, fun teardown_batch/1,
+ [
+ ?TDEF_FE(accept_blocking),
+ ?TDEF_FE(resubmit_enqueues_job),
+ ?TDEF_FE(accept_max_schedtime),
+ ?TDEF_FE(accept_no_schedule),
+ ?TDEF_FE(subscribe),
+ ?TDEF_FE(remove_when_subscribed_and_pending),
+ ?TDEF_FE(remove_when_subscribed_and_running),
+ ?TDEF_FE(subscribe_wait_multiple),
+ ?TDEF_FE(enqueue_inactive, 15)
]
}
}
@@ -71,15 +103,28 @@ couch_jobs_basic_test_() ->
setup_couch() ->
+ meck:new(couch_jobs_fdb, [passthrough]),
+ meck:new(couch_jobs_util, [passthrough]),
+ % Because of a circular dependency between `couch_jobs` and `fabric` in
+ % `fabric2_db_expiration` module, disable db expiration so when
+ % `couch_jobs` is stopped the test, `fabric` app doesn't get torn down as
+ % well and we don't see spurious <<"db_expiration">> jobs show up in test
+ % results.
+ meck:new(fabric2_db_expiration, [passthrough]),
+ meck:expect(fabric2_db_expiration, handle_info, fun
+ (timeout, St) -> {noreply, St};
+ (Msg, St) -> meck:passthrough([Msg, St])
+ end),
test_util:start_couch([fabric]).
teardown_couch(Ctx) ->
- test_util:stop_couch(Ctx),
- meck:unload().
+ meck:unload(),
+ test_util:stop_couch(Ctx).
setup() ->
+ application:start(fabric),
application:start(couch_jobs),
clear_jobs(),
T1 = {<<"t1">>, 1024}, % a complex type should work
@@ -98,15 +143,58 @@ setup() ->
}.
-teardown(#{dbname := DbName}) ->
- clear_jobs(),
+teardown(#{}) ->
application:stop(couch_jobs),
- AllDbs = fabric2_db:list_dbs(),
- case lists:member(DbName, AllDbs) of
- true -> ok = fabric2_db:delete(DbName, []);
- false -> ok
- end,
- meck:unload().
+ application:stop(fabric),
+ ok.
+
+
+setup_batch() ->
+ Ctx = setup(),
+
+ % Simulate having too many jobs to fit in a 10Mb
+ meck:expect(couch_jobs_fdb, re_enqueue_inactive, 3, meck:loop([
+ meck:raise(error, {erlfdb_error, 2101}),
+ meck:passthrough()
+ ])),
+
+ % Simulate get_inactive_since GRV timing out
+ meck:expect(couch_jobs_fdb, get_inactive_since, 4, meck:loop([
+ meck:raise(error, {erlfdb_error, 1007}),
+ meck:passthrough()
+ ])),
+
+ % Simulate get_active_since transaction timing out
+ meck:expect(couch_jobs_fdb, get_active_since, 4, meck:loop([
+ meck:raise(error, {erlfdb_error, 1031}),
+ meck:passthrough()
+ ])),
+
+ % Set up batching parameters to test small batches down to size 1
+ meck:expect(couch_jobs_util, get_non_neg_int, [
+ {[notifier_batch_increment, '_'], 1},
+ {[activity_monitor_batch_increment, '_'], 1},
+ {2, meck:passthrough()}
+ ]),
+ meck:expect(couch_jobs_util, get_float_0_1, [
+ {[notifier_batch_factor, '_'], 0.0001},
+ {[activity_monitor_batch_factor, '_'], 0.0001},
+ {2, meck:passthrough()}
+ ]),
+
+ Ctx.
+
+
+teardown_batch(Ctx) ->
+ teardown(Ctx),
+ meck:reset(couch_jobs_fdb),
+ meck:reset(couch_jobs_util),
+ meck:expect(couch_jobs_fdb, re_enqueue_inactive, 3, meck:passthrough()),
+ meck:expect(couch_jobs_fdb, get_active_since, 4, meck:passthrough()),
+ meck:expect(couch_jobs_fdb, get_inactive_since, 4, meck:passthrough()),
+ meck:expect(couch_jobs_util, get_non_neg_int, 2, meck:passthrough()),
+ meck:expect(couch_jobs_util, get_float_0_1, 2, meck:passthrough()),
+ ok.
clear_jobs() ->
@@ -116,647 +204,569 @@ clear_jobs() ->
end).
-restart_app() ->
- application:stop(couch_jobs),
- application:start(couch_jobs),
- couch_jobs_server:force_check_types().
-
-
get_job(Type, JobId) ->
couch_jobs_fdb:get_job(Type, JobId).
add_remove_pending(#{t1 := T1, j1 := J1, t2 := T2, j2 := J2}) ->
- ?_test(begin
- ?assertEqual(ok, couch_jobs:add(?TX, T1, J1, #{})),
- ?assertMatch(#{state := pending, data := #{}}, get_job(T1, J1)),
- ?assertEqual(ok, couch_jobs:remove(?TX, T1, J1)),
- % Data and numeric type should work as well. Also do it in a
- % transaction
- Data = #{<<"x">> => 42},
- ?assertEqual(ok, fabric2_fdb:transactional(fun(Tx) ->
- couch_jobs:add(Tx, T2, J2, Data)
- end)),
- ?assertMatch(#{state := pending, data := Data}, get_job(T2, J2)),
- ?assertEqual(ok, couch_jobs:remove(?TX, T2, J2))
- end).
+ ?assertEqual(ok, couch_jobs:add(?TX, T1, J1, #{})),
+ ?assertMatch(#{state := pending, data := #{}}, get_job(T1, J1)),
+ ?assertEqual(ok, couch_jobs:remove(?TX, T1, J1)),
+ % Data and numeric type should work as well. Also do it in a
+ % transaction
+ Data = #{<<"x">> => 42},
+ ?assertEqual(ok, fabric2_fdb:transactional(fun(Tx) ->
+ couch_jobs:add(Tx, T2, J2, Data)
+ end)),
+ ?assertMatch(#{state := pending, data := Data}, get_job(T2, J2)),
+ ?assertEqual(ok, couch_jobs:remove(?TX, T2, J2)).
+
get_job_data_and_state(#{t1 := T, j1 := J}) ->
- ?_test(begin
- Data = #{<<"x">> => 42},
- ok = couch_jobs:add(?TX, T, J, Data),
- ?assertEqual({ok, Data}, couch_jobs:get_job_data(?TX, T, J)),
- ?assertEqual({ok, pending}, couch_jobs:get_job_state(?TX, T, J)),
- ?assertEqual(ok, couch_jobs:remove(?TX, T, J)),
- ?assertEqual({error, not_found}, couch_jobs:get_job_data(?TX, T, J)),
- ?assertEqual({error, not_found}, couch_jobs:get_job_state(?TX, T, J))
- end).
+ Data = #{<<"x">> => 42},
+ ok = couch_jobs:add(?TX, T, J, Data),
+ ?assertEqual({ok, Data}, couch_jobs:get_job_data(?TX, T, J)),
+ ?assertEqual({ok, pending}, couch_jobs:get_job_state(?TX, T, J)),
+ ?assertEqual(ok, couch_jobs:remove(?TX, T, J)),
+ ?assertEqual({error, not_found}, couch_jobs:get_job_data(?TX, T, J)),
+ ?assertEqual({error, not_found}, couch_jobs:get_job_state(?TX, T, J)).
add_remove_errors(#{t1 := T, j1 := J}) ->
- ?_test(begin
- ?assertEqual({error, not_found}, couch_jobs:remove(?TX, 999, <<"x">>)),
- ?assertMatch({error, {json_encoding_error, _}}, couch_jobs:add(?TX, T,
- J, #{1 => 2})),
- ?assertEqual({error, no_type_timeout}, couch_jobs:add(?TX, <<"x">>, J,
- #{})),
- ?assertEqual(ok, couch_jobs:add(?TX, T, J, #{})),
- ?assertEqual(ok, couch_jobs:add(?TX, T, J, #{})),
- ?assertEqual(ok, couch_jobs:remove(?TX, T, J))
- end).
+ ?assertEqual({error, not_found}, couch_jobs:remove(?TX, 999, <<"x">>)),
+ ?assertMatch({error, {json_encoding_error, _}}, couch_jobs:add(?TX, T,
+ J, #{1 => 2})),
+ ?assertEqual({error, no_type_timeout}, couch_jobs:add(?TX, <<"x">>, J,
+ #{})),
+ ?assertEqual(ok, couch_jobs:add(?TX, T, J, #{})),
+ ?assertEqual(ok, couch_jobs:add(?TX, T, J, #{})),
+ ?assertEqual(ok, couch_jobs:remove(?TX, T, J)).
add_with_the_same_scheduled_time(#{t1 := T, j1 := J}) ->
- ?_test(begin
- ?assertEqual(ok, couch_jobs:add(?TX, T, J, #{})),
- fabric2_fdb:transactional(fun(Tx) ->
- ?assertEqual(ok, couch_jobs:add(Tx, T, J, #{})),
- ?assert(erlfdb:is_read_only(Tx))
- end)
+ ?assertEqual(ok, couch_jobs:add(?TX, T, J, #{})),
+ fabric2_fdb:transactional(fun(Tx) ->
+ ?assertEqual(ok, couch_jobs:add(Tx, T, J, #{})),
+ ?assert(erlfdb:is_read_only(Tx))
end).
resubmit_as_job_creator(#{t1 := T, j1 := J}) ->
- ?_test(begin
- Data = #{<<"x">> => 42},
- ok = couch_jobs:add(?TX, T, J, Data, 15),
-
- % Job was pending, doesn't get resubmitted
- ok = couch_jobs:add(?TX, T, J, Data, 16),
- ?assertMatch(#{state := pending, stime := 16}, get_job(T, J)),
-
- {ok, Job1, Data} = couch_jobs:accept(T),
-
- % If is running, it gets flagged to be resubmitted
- ok = couch_jobs:add(?TX, T, J, Data, 17),
- ?assertMatch(#{state := running, stime := 17}, get_job(T, J)),
- ?assertEqual(true, couch_jobs:is_resubmitted(get_job(T, J))),
-
- ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
- % It should be pending according to the resubmit flag
- ?assertMatch(#{state := pending, stime := 17}, get_job(T, J)),
-
- % A finished job will be re-enqueued
- {ok, Job2, _} = couch_jobs:accept(T),
- ?assertEqual(ok, couch_jobs:finish(?TX, Job2)),
- ?assertMatch(#{state := finished, stime := 17}, get_job(T, J)),
- ok = couch_jobs:add(?TX, T, J, Data, 18),
- ?assertMatch(#{state := pending, stime := 18}, get_job(T, J))
- end).
+ Data = #{<<"x">> => 42},
+ ok = couch_jobs:add(?TX, T, J, Data, 15),
+ % Job was pending, doesn't get resubmitted
+ ok = couch_jobs:add(?TX, T, J, Data, 16),
+ ?assertMatch(#{state := pending, stime := 16}, get_job(T, J)),
-type_timeouts_and_server(#{t1 := T, t1_timeout := T1Timeout}) ->
- {timeout, 15, ?_test(begin
+ {ok, Job1, Data} = couch_jobs:accept(T),
- WaitForActivityMonitors = fun(N) ->
- test_util:wait(fun() ->
- Pids = couch_jobs_activity_monitor_sup:get_child_pids(),
- case length(Pids) == N of
- true -> ok;
- false -> wait
- end
- end)
- end,
+ % If is running, it gets flagged to be resubmitted
+ ok = couch_jobs:add(?TX, T, J, Data, 17),
+ ?assertMatch(#{state := running, stime := 17}, get_job(T, J)),
+ ?assertEqual(true, couch_jobs:is_resubmitted(get_job(T, J))),
- WaitForNotifiers = fun(N) ->
- test_util:wait(fun() ->
- Pids = couch_jobs_notifier_sup:get_child_pids(),
- case length(Pids) == N of
- true -> ok;
- false -> wait
- end
- end)
- end,
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
+ % It should be pending according to the resubmit flag
+ ?assertMatch(#{state := pending, stime := 17}, get_job(T, J)),
- couch_jobs_server:force_check_types(),
+ % A finished job will be re-enqueued
+ {ok, Job2, _} = couch_jobs:accept(T),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job2)),
+ ?assertMatch(#{state := finished, stime := 17}, get_job(T, J)),
+ ok = couch_jobs:add(?TX, T, J, Data, 18),
+ ?assertMatch(#{state := pending, stime := 18}, get_job(T, J)).
- ?assertEqual(T1Timeout, couch_jobs:get_type_timeout(T)),
- WaitForActivityMonitors(2),
- ?assertEqual(2,
- length(couch_jobs_activity_monitor_sup:get_child_pids())),
-
- WaitForNotifiers(2),
- ?assertEqual(2, length(couch_jobs_notifier_sup:get_child_pids())),
-
- ?assertMatch({ok, _}, couch_jobs_server:get_notifier_server(T)),
+type_timeouts_and_server(#{t1 := T, t1_timeout := T1Timeout}) ->
+ WaitForActivityMonitors = fun(N) ->
+ test_util:wait(fun() ->
+ Pids = couch_jobs_activity_monitor_sup:get_child_pids(),
+ case length(Pids) == N of
+ true -> ok;
+ false -> wait
+ end
+ end)
+ end,
- ?assertEqual(ok, couch_jobs:set_type_timeout(<<"t3">>, 8)),
- couch_jobs_server:force_check_types(),
+ WaitForNotifiers = fun(N) ->
+ test_util:wait(fun() ->
+ Pids = couch_jobs_notifier_sup:get_child_pids(),
+ case length(Pids) == N of
+ true -> ok;
+ false -> wait
+ end
+ end)
+ end,
- WaitForActivityMonitors(3),
- ?assertEqual(3,
- length(couch_jobs_activity_monitor_sup:get_child_pids())),
+ couch_jobs_server:force_check_types(),
- WaitForNotifiers(3),
- ?assertEqual(3, length(couch_jobs_notifier_sup:get_child_pids())),
+ ?assertEqual(T1Timeout, couch_jobs:get_type_timeout(T)),
- ?assertEqual(ok, couch_jobs:clear_type_timeout(<<"t3">>)),
- couch_jobs_server:force_check_types(),
+ WaitForActivityMonitors(2),
+ ?assertEqual(2,
+ length(couch_jobs_activity_monitor_sup:get_child_pids())),
- WaitForActivityMonitors(2),
- ?assertEqual(2,
- length(couch_jobs_activity_monitor_sup:get_child_pids())),
+ WaitForNotifiers(2),
+ ?assertEqual(2, length(couch_jobs_notifier_sup:get_child_pids())),
- WaitForNotifiers(2),
- ?assertEqual(2,
- length(couch_jobs_notifier_sup:get_child_pids())),
+ ?assertMatch({ok, _}, couch_jobs_server:get_notifier_server(T)),
- ?assertMatch({error, _},
- couch_jobs_server:get_notifier_server(<<"t3">>)),
+ ?assertEqual(ok, couch_jobs:set_type_timeout(<<"t3">>, 8)),
+ couch_jobs_server:force_check_types(),
- ?assertEqual(not_found, couch_jobs:get_type_timeout(<<"t3">>))
- end)}.
+ WaitForActivityMonitors(3),
+ ?assertEqual(3,
+ length(couch_jobs_activity_monitor_sup:get_child_pids())),
+ WaitForNotifiers(3),
+ ?assertEqual(3, length(couch_jobs_notifier_sup:get_child_pids())),
-dead_notifier_restarts_jobs_server(#{}) ->
- ?_test(begin
- couch_jobs_server:force_check_types(),
+ ?assertEqual(ok, couch_jobs:clear_type_timeout(<<"t3">>)),
+ couch_jobs_server:force_check_types(),
- ServerPid = whereis(couch_jobs_server),
- Ref = monitor(process, ServerPid),
+ WaitForActivityMonitors(2),
+ ?assertEqual(2,
+ length(couch_jobs_activity_monitor_sup:get_child_pids())),
- [Notifier1, _Notifier2] = couch_jobs_notifier_sup:get_child_pids(),
- exit(Notifier1, kill),
+ WaitForNotifiers(2),
+ ?assertEqual(2,
+ length(couch_jobs_notifier_sup:get_child_pids())),
- % Killing a notifier should kill the server as well
- receive {'DOWN', Ref, _, _, _} -> ok end
- end).
+ ?assertMatch({error, _},
+ couch_jobs_server:get_notifier_server(<<"t3">>)),
+ ?assertEqual(not_found, couch_jobs:get_type_timeout(<<"t3">>)).
-bad_messages_restart_couch_jobs_server(#{}) ->
- ?_test(begin
- % couch_jobs_server dies on bad cast
- ServerPid1 = whereis(couch_jobs_server),
- Ref1 = monitor(process, ServerPid1),
- gen_server:cast(ServerPid1, bad_cast),
- receive {'DOWN', Ref1, _, _, _} -> ok end,
- restart_app(),
+dead_notifier_restarts_jobs_server(#{}) ->
+ couch_jobs_server:force_check_types(),
- % couch_jobs_server dies on bad call
- ServerPid2 = whereis(couch_jobs_server),
- Ref2 = monitor(process, ServerPid2),
- catch gen_server:call(ServerPid2, bad_call),
- receive {'DOWN', Ref2, _, _, _} -> ok end,
+ ServerPid = whereis(couch_jobs_server),
+ Ref = monitor(process, ServerPid),
- restart_app(),
+ [Notifier1, _Notifier2] = couch_jobs_notifier_sup:get_child_pids(),
+ exit(Notifier1, kill),
- % couch_jobs_server dies on bad info
- ServerPid3 = whereis(couch_jobs_server),
- Ref3 = monitor(process, ServerPid3),
- ServerPid3 ! a_random_message,
- receive {'DOWN', Ref3, _, _, _} -> ok end,
+ % Killing a notifier should kill the server as well
+ receive {'DOWN', Ref, _, _, _} -> ok end.
- restart_app()
- end).
+bad_cast_restarts_couch_jobs_server(#{}) ->
+ ServerPid1 = whereis(couch_jobs_server),
+ Ref1 = monitor(process, ServerPid1),
+ gen_server:cast(ServerPid1, bad_cast),
+ receive {'DOWN', Ref1, _, _, _} -> ok end.
-bad_messages_restart_notifier(#{}) ->
- ?_test(begin
- couch_jobs_server:force_check_types(),
- % bad cast kills the activity monitor
- [AMon1, _] = couch_jobs_notifier_sup:get_child_pids(),
- Ref1 = monitor(process, AMon1),
- gen_server:cast(AMon1, bad_cast),
- receive {'DOWN', Ref1, _, _, _} -> ok end,
+bad_call_restarts_couch_jobs_server(#{}) ->
+ ServerPid2 = whereis(couch_jobs_server),
+ Ref2 = monitor(process, ServerPid2),
+ catch gen_server:call(ServerPid2, bad_call),
+ receive {'DOWN', Ref2, _, _, _} -> ok end.
- restart_app(),
- % bad calls restart activity monitor
- [AMon2, _] = couch_jobs_notifier_sup:get_child_pids(),
- Ref2 = monitor(process, AMon2),
- catch gen_server:call(AMon2, bad_call),
- receive {'DOWN', Ref2, _, _, _} -> ok end,
+bad_info_restarts_couch_jobs_server(#{}) ->
+ ServerPid3 = whereis(couch_jobs_server),
+ Ref3 = monitor(process, ServerPid3),
+ ServerPid3 ! a_random_message,
+ receive {'DOWN', Ref3, _, _, _} -> ok end.
- restart_app(),
- % bad info message kills activity monitor
- [AMon3, _] = couch_jobs_notifier_sup:get_child_pids(),
- Ref3 = monitor(process, AMon3),
- AMon3 ! a_bad_message,
- receive {'DOWN', Ref3, _, _, _} -> ok end,
+bad_cast_restarts_notifier(#{}) ->
+ couch_jobs_server:force_check_types(),
+ [AMon1, _] = couch_jobs_notifier_sup:get_child_pids(),
+ Ref1 = monitor(process, AMon1),
+ gen_server:cast(AMon1, bad_cast),
+ receive {'DOWN', Ref1, _, _, _} -> ok end.
- restart_app()
- end).
+bad_call_restarts_notifier(#{}) ->
+ couch_jobs_server:force_check_types(),
+ [AMon2, _] = couch_jobs_notifier_sup:get_child_pids(),
+ Ref2 = monitor(process, AMon2),
+ catch gen_server:call(AMon2, bad_call),
+ receive {'DOWN', Ref2, _, _, _} -> ok end.
-bad_messages_restart_activity_monitor(#{}) ->
- ?_test(begin
- couch_jobs_server:force_check_types(),
+bad_info_restarts_notifier(#{}) ->
+ couch_jobs_server:force_check_types(),
+ [AMon3, _] = couch_jobs_notifier_sup:get_child_pids(),
+ Ref3 = monitor(process, AMon3),
+ AMon3 ! a_bad_message,
+ receive {'DOWN', Ref3, _, _, _} -> ok end.
- % bad cast kills the activity monitor
- [AMon1, _] = couch_jobs_activity_monitor_sup:get_child_pids(),
- Ref1 = monitor(process, AMon1),
- gen_server:cast(AMon1, bad_cast),
- receive {'DOWN', Ref1, _, _, _} -> ok end,
- restart_app(),
+bad_cast_restarts_activity_monitor(#{}) ->
+ couch_jobs_server:force_check_types(),
+ [AMon1, _] = couch_jobs_activity_monitor_sup:get_child_pids(),
+ Ref1 = monitor(process, AMon1),
+ gen_server:cast(AMon1, bad_cast),
+ receive {'DOWN', Ref1, _, _, _} -> ok end.
- % bad calls restart activity monitor
- [AMon2, _] = couch_jobs_activity_monitor_sup:get_child_pids(),
- Ref2 = monitor(process, AMon2),
- catch gen_server:call(AMon2, bad_call),
- receive {'DOWN', Ref2, _, _, _} -> ok end,
- restart_app(),
+bad_call_restarts_activity_monitor(#{}) ->
+ couch_jobs_server:force_check_types(),
+ [AMon2, _] = couch_jobs_activity_monitor_sup:get_child_pids(),
+ Ref2 = monitor(process, AMon2),
+ catch gen_server:call(AMon2, bad_call),
+ receive {'DOWN', Ref2, _, _, _} -> ok end.
- % bad info message kills activity monitor
- [AMon3, _] = couch_jobs_activity_monitor_sup:get_child_pids(),
- Ref3 = monitor(process, AMon3),
- AMon3 ! a_bad_message,
- receive {'DOWN', Ref3, _, _, _} -> ok end,
- restart_app()
- end).
+bad_info_restarts_activity_monitor(#{}) ->
+ couch_jobs_server:force_check_types(),
+ % bad info message kills activity monitor
+ [AMon3, _] = couch_jobs_activity_monitor_sup:get_child_pids(),
+ Ref3 = monitor(process, AMon3),
+ AMon3 ! a_bad_message,
+ receive {'DOWN', Ref3, _, _, _} -> ok end.
basic_accept_and_finish(#{t1 := T, j1 := J}) ->
- ?_test(begin
- ok = couch_jobs:add(?TX, T, J, #{}),
- {ok, Job, #{}} = couch_jobs:accept(T),
- ?assertMatch(#{state := running}, get_job(T, J)),
- % check json validation for bad data in finish
- ?assertMatch({error, {json_encoding_error, _}},
- fabric2_fdb:transactional(fun(Tx) ->
- couch_jobs:finish(Tx, Job, #{1 => 1})
- end)),
- Data = #{<<"x">> => 42},
- ?assertEqual(ok, fabric2_fdb:transactional(fun(Tx) ->
- couch_jobs:finish(Tx, Job, Data)
+ ok = couch_jobs:add(?TX, T, J, #{}),
+ {ok, Job, #{}} = couch_jobs:accept(T),
+ ?assertMatch(#{state := running}, get_job(T, J)),
+ % check json validation for bad data in finish
+ ?assertMatch({error, {json_encoding_error, _}},
+ fabric2_fdb:transactional(fun(Tx) ->
+ couch_jobs:finish(Tx, Job, #{1 => 1})
end)),
- ?assertMatch(#{state := finished, data := Data}, get_job(T, J))
- end).
+ Data = #{<<"x">> => 42},
+ ?assertEqual(ok, fabric2_fdb:transactional(fun(Tx) ->
+ couch_jobs:finish(Tx, Job, Data)
+ end)),
+ ?assertMatch(#{state := finished, data := Data}, get_job(T, J)).
accept_blocking(#{t1 := T, j1 := J1, j2 := J2}) ->
- ?_test(begin
- Accept = fun() -> exit(couch_jobs:accept(T)) end,
- WaitAccept = fun(Ref) ->
- receive
- {'DOWN', Ref, _, _, Res} -> Res
- after
- 500 -> timeout
- end
- end,
- {_, Ref1} = spawn_monitor(Accept),
- ok = couch_jobs:add(?TX, T, J1, #{}),
- ?assertMatch({ok, #{id := J1}, #{}}, WaitAccept(Ref1)),
- {_, Ref2} = spawn_monitor(Accept),
- ?assertEqual(timeout, WaitAccept(Ref2)),
- ok = couch_jobs:add(?TX, T, J2, #{}),
- ?assertMatch({ok, #{id := J2}, #{}}, WaitAccept(Ref2))
- end).
+ Accept = fun() -> exit(couch_jobs:accept(T)) end,
+ WaitAccept = fun(Ref) ->
+ receive
+ {'DOWN', Ref, _, _, Res} -> Res
+ after
+ 500 -> timeout
+ end
+ end,
+ {_, Ref1} = spawn_monitor(Accept),
+ ok = couch_jobs:add(?TX, T, J1, #{}),
+ ?assertMatch({ok, #{id := J1}, #{}}, WaitAccept(Ref1)),
+ {_, Ref2} = spawn_monitor(Accept),
+ ?assertEqual(timeout, WaitAccept(Ref2)),
+ ok = couch_jobs:add(?TX, T, J2, #{}),
+ ?assertMatch({ok, #{id := J2}, #{}}, WaitAccept(Ref2)).
job_processor_update(#{t1 := T, j1 := J}) ->
- ?_test(begin
- ok = couch_jobs:add(?TX, T, J, #{}),
- {ok, Job, #{}} = couch_jobs:accept(T),
+ ok = couch_jobs:add(?TX, T, J, #{}),
+ {ok, Job, #{}} = couch_jobs:accept(T),
- % Use proper transactions in a few places here instead of passing in
- % ?TX This is mostly to increase code coverage
+ % Use proper transactions in a few places here instead of passing in
+ % ?TX This is mostly to increase code coverage
- ?assertMatch({ok, #{job := true}}, fabric2_fdb:transactional(fun(Tx) ->
- couch_jobs:update(Tx, Job, #{<<"x">> => 1})
- end)),
+ ?assertMatch({ok, #{job := true}}, fabric2_fdb:transactional(fun(Tx) ->
+ couch_jobs:update(Tx, Job, #{<<"x">> => 1})
+ end)),
- ?assertMatch(#{data := #{<<"x">> := 1}, state := running},
- get_job(T, J)),
+ ?assertMatch(#{data := #{<<"x">> := 1}, state := running},
+ get_job(T, J)),
- ?assertMatch({ok, #{job := true}}, fabric2_fdb:transactional(fun(Tx) ->
- couch_jobs:update(Tx, Job)
- end)),
+ ?assertMatch({ok, #{job := true}}, fabric2_fdb:transactional(fun(Tx) ->
+ couch_jobs:update(Tx, Job)
+ end)),
- ?assertMatch(#{data := #{<<"x">> := 1}, state := running},
- get_job(T, J)),
+ ?assertMatch(#{data := #{<<"x">> := 1}, state := running},
+ get_job(T, J)),
- ?assertMatch({ok, #{job := true}}, fabric2_fdb:transactional(fun(Tx) ->
- couch_jobs:update(Tx, Job, #{<<"x">> => 2})
- end)),
+ ?assertMatch({ok, #{job := true}}, fabric2_fdb:transactional(fun(Tx) ->
+ couch_jobs:update(Tx, Job, #{<<"x">> => 2})
+ end)),
- % check json validation for bad data in update
- ?assertMatch({error, {json_encoding_error, _}},
- fabric2_fdb:transactional(fun(Tx) ->
- couch_jobs:update(Tx, Job, #{1 => 1})
- end)),
+ % check json validation for bad data in update
+ ?assertMatch({error, {json_encoding_error, _}},
+ fabric2_fdb:transactional(fun(Tx) ->
+ couch_jobs:update(Tx, Job, #{1 => 1})
+ end)),
- ?assertMatch(#{data := #{<<"x">> := 2}, state := running},
- get_job(T, J)),
+ ?assertMatch(#{data := #{<<"x">> := 2}, state := running},
+ get_job(T, J)),
- % Finish may update the data as well
- ?assertEqual(ok, couch_jobs:finish(?TX, Job, #{<<"x">> => 3})),
- ?assertMatch(#{data := #{<<"x">> := 3}, state := finished},
- get_job(T, J))
- end).
+ % Finish may update the data as well
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job, #{<<"x">> => 3})),
+ ?assertMatch(#{data := #{<<"x">> := 3}, state := finished},
+ get_job(T, J)).
resubmit_enqueues_job(#{t1 := T, j1 := J}) ->
- ?_test(begin
- ok = couch_jobs:add(?TX, T, J, #{}),
- {ok, Job1, #{}} = couch_jobs:accept(T),
- ?assertMatch({ok, _}, couch_jobs:resubmit(?TX, Job1, 6)),
- ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
- ?assertMatch(#{state := pending, stime := 6}, get_job(T, J)),
- {ok, Job2, #{}} = couch_jobs:accept(T),
- ?assertEqual(ok, couch_jobs:finish(?TX, Job2)),
- ?assertMatch(#{state := finished}, get_job(T, J))
- end).
+ ok = couch_jobs:add(?TX, T, J, #{}),
+ {ok, Job1, #{}} = couch_jobs:accept(T),
+ ?assertMatch({ok, _}, couch_jobs:resubmit(?TX, Job1, 6)),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
+ ?assertMatch(#{state := pending, stime := 6}, get_job(T, J)),
+ {ok, Job2, #{}} = couch_jobs:accept(T),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job2)),
+ ?assertMatch(#{state := finished}, get_job(T, J)).
+
resubmit_finished_updates_job_data(#{t1 := T, j1 := J}) ->
- ?_test(begin
- Data1 = #{<<"test">> => 1},
- Data2 = #{<<"test">> => 2},
- ok = couch_jobs:add(?TX, T, J, Data1),
- {ok, Job1, #{}} = couch_jobs:accept(T),
- ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
- ?assertMatch({ok, _}, couch_jobs:resubmit(?TX, Job1, 6, Data2)),
- ?assertMatch({ok, _, Data2}, couch_jobs:accept(T))
- end).
+ Data1 = #{<<"test">> => 1},
+ Data2 = #{<<"test">> => 2},
+ ok = couch_jobs:add(?TX, T, J, Data1),
+ {ok, Job1, #{}} = couch_jobs:accept(T),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
+ ?assertMatch({ok, _}, couch_jobs:resubmit(?TX, Job1, 6, Data2)),
+ ?assertMatch({ok, _, Data2}, couch_jobs:accept(T)).
resubmit_running_does_not_update_job_data(#{t1 := T, j1 := J}) ->
- ?_test(begin
- Data1 = #{<<"test">> => 1},
- Data2 = #{<<"test">> => 2},
- ok = couch_jobs:add(?TX, T, J, Data1),
- {ok, Job1, #{}} = couch_jobs:accept(T),
- ?assertMatch({ok, _}, couch_jobs:resubmit(?TX, Job1, 6, Data2)),
- ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
- ?assertMatch({ok, _, Data1}, couch_jobs:accept(T))
- end).
+ Data1 = #{<<"test">> => 1},
+ Data2 = #{<<"test">> => 2},
+ ok = couch_jobs:add(?TX, T, J, Data1),
+ {ok, Job1, #{}} = couch_jobs:accept(T),
+ ?assertMatch({ok, _}, couch_jobs:resubmit(?TX, Job1, 6, Data2)),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
+ ?assertMatch({ok, _, Data1}, couch_jobs:accept(T)).
resubmit_custom_schedtime(#{t1 := T, j1 := J}) ->
- ?_test(begin
- ?assertEqual(ok, couch_jobs:add(?TX, T, J, #{}, 7)),
- {ok, Job, #{}} = couch_jobs:accept(T),
- ?assertMatch({ok, _}, couch_jobs:resubmit(?TX, Job, 9)),
- ?assertEqual(ok, couch_jobs:finish(?TX, Job)),
- ?assertMatch(#{stime := 9, state := pending}, get_job(T, J))
- end).
+ ?assertEqual(ok, couch_jobs:add(?TX, T, J, #{}, 7)),
+ {ok, Job, #{}} = couch_jobs:accept(T),
+ ?assertMatch({ok, _}, couch_jobs:resubmit(?TX, Job, 9)),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job)),
+ ?assertMatch(#{stime := 9, state := pending}, get_job(T, J)).
add_pending_updates_job_data(#{t1 := T, j1 := J}) ->
- ?_test(begin
- Data1 = #{<<"test">> => 1},
- Data2 = #{<<"test">> => 2},
- ok = couch_jobs:add(?TX, T, J, Data1),
- ?assertEqual(ok, couch_jobs:add(?TX, T, J, Data2, 6)),
- ?assertMatch({ok, _, Data2}, couch_jobs:accept(T))
- end).
+ Data1 = #{<<"test">> => 1},
+ Data2 = #{<<"test">> => 2},
+ ok = couch_jobs:add(?TX, T, J, Data1),
+ ?assertEqual(ok, couch_jobs:add(?TX, T, J, Data2, 6)),
+ ?assertMatch({ok, _, Data2}, couch_jobs:accept(T)).
add_finished_updates_job_data(#{t1 := T, j1 := J}) ->
- ?_test(begin
- Data1 = #{<<"test">> => 1},
- Data2 = #{<<"test">> => 2},
- ok = couch_jobs:add(?TX, T, J, Data1),
- {ok, Job1, #{}} = couch_jobs:accept(T),
- ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
- ?assertEqual(ok, couch_jobs:add(?TX, T, J, Data2, 6)),
- ?assertMatch({ok, _, Data2}, couch_jobs:accept(T))
- end).
+ Data1 = #{<<"test">> => 1},
+ Data2 = #{<<"test">> => 2},
+ ok = couch_jobs:add(?TX, T, J, Data1),
+ {ok, Job1, #{}} = couch_jobs:accept(T),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
+ ?assertEqual(ok, couch_jobs:add(?TX, T, J, Data2, 6)),
+ ?assertMatch({ok, _, Data2}, couch_jobs:accept(T)).
add_running_does_not_update_job_data(#{t1 := T, j1 := J}) ->
- ?_test(begin
- Data1 = #{<<"test">> => 1},
- Data2 = #{<<"test">> => 2},
- ok = couch_jobs:add(?TX, T, J, Data1),
- {ok, Job1, #{}} = couch_jobs:accept(T),
- ?assertEqual(ok, couch_jobs:add(?TX, T, J, Data2, 6)),
- ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
- ?assertMatch({ok, _, Data1}, couch_jobs:accept(T))
- end).
+ Data1 = #{<<"test">> => 1},
+ Data2 = #{<<"test">> => 2},
+ ok = couch_jobs:add(?TX, T, J, Data1),
+ {ok, Job1, #{}} = couch_jobs:accept(T),
+ ?assertEqual(ok, couch_jobs:add(?TX, T, J, Data2, 6)),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
+ ?assertMatch({ok, _, Data1}, couch_jobs:accept(T)).
accept_max_schedtime(#{t1 := T, j1 := J1, j2 := J2}) ->
- ?_test(begin
- ok = couch_jobs:add(?TX, T, J1, #{}, 5000),
- ok = couch_jobs:add(?TX, T, J2, #{}, 3000),
- ?assertEqual({error, not_found}, couch_jobs:accept(T,
- #{max_sched_time => 1000})),
- ?assertMatch({ok, #{id := J2}, _}, couch_jobs:accept(T,
- #{max_sched_time => 3000})),
- ?assertMatch({ok, #{id := J1}, _}, couch_jobs:accept(T,
- #{max_sched_time => 9000}))
- end).
+ ok = couch_jobs:add(?TX, T, J1, #{}, 5000),
+ ok = couch_jobs:add(?TX, T, J2, #{}, 3000),
+ ?assertEqual({error, not_found}, couch_jobs:accept(T,
+ #{max_sched_time => 1000})),
+ ?assertMatch({ok, #{id := J2}, _}, couch_jobs:accept(T,
+ #{max_sched_time => 3000})),
+ ?assertMatch({ok, #{id := J1}, _}, couch_jobs:accept(T,
+ #{max_sched_time => 9000})).
accept_no_schedule(#{t1 := T}) ->
- ?_test(begin
- JobCount = 25,
- Jobs = [fabric2_util:uuid() || _ <- lists:seq(1, JobCount)],
- [couch_jobs:add(?TX, T, J, #{}) || J <- Jobs],
- InvalidOpts = #{no_schedule => true, max_sched_time => 1},
- ?assertMatch({error, _}, couch_jobs:accept(T, InvalidOpts)),
- AcceptOpts = #{no_schedule => true},
- Accepted = [begin
- {ok, #{id := J}, _} = couch_jobs:accept(T, AcceptOpts),
- J
- end || _ <- lists:seq(1, JobCount)],
- ?assertEqual(lists:sort(Jobs), lists:sort(Accepted))
- end).
+ JobCount = 25,
+ Jobs = [fabric2_util:uuid() || _ <- lists:seq(1, JobCount)],
+ [couch_jobs:add(?TX, T, J, #{}) || J <- Jobs],
+ InvalidOpts = #{no_schedule => true, max_sched_time => 1},
+ ?assertMatch({error, _}, couch_jobs:accept(T, InvalidOpts)),
+ AcceptOpts = #{no_schedule => true},
+ Accepted = [begin
+ {ok, #{id := J}, _} = couch_jobs:accept(T, AcceptOpts),
+ J
+ end || _ <- lists:seq(1, JobCount)],
+ ?assertEqual(lists:sort(Jobs), lists:sort(Accepted)).
subscribe(#{t1 := T, j1 := J}) ->
- ?_test(begin
- ok = couch_jobs:add(?TX, T, J, #{<<"z">> => 1}),
+ ok = couch_jobs:add(?TX, T, J, #{<<"z">> => 1}),
- ?assertEqual({error, not_found}, couch_jobs:subscribe(<<"xyz">>, J)),
- ?assertEqual({error, not_found}, couch_jobs:subscribe(T, <<"j5">>)),
+ ?assertEqual({error, not_found}, couch_jobs:subscribe(<<"xyz">>, J)),
+ ?assertEqual({error, not_found}, couch_jobs:subscribe(T, <<"j5">>)),
- SubRes0 = couch_jobs:subscribe(T, J),
- ?assertMatch({ok, {_, _}, pending, #{<<"z">> := 1}}, SubRes0),
- {ok, SubId0, pending, _} = SubRes0,
+ SubRes0 = couch_jobs:subscribe(T, J),
+ ?assertMatch({ok, {_, _}, pending, #{<<"z">> := 1}}, SubRes0),
+ {ok, SubId0, pending, _} = SubRes0,
- SubRes1 = couch_jobs:subscribe(T, J),
- ?assertEqual(SubRes0, SubRes1),
+ SubRes1 = couch_jobs:subscribe(T, J),
+ ?assertEqual(SubRes0, SubRes1),
- ?assertEqual(ok, couch_jobs:unsubscribe(SubId0)),
+ ?assertEqual(ok, couch_jobs:unsubscribe(SubId0)),
- SubRes = couch_jobs:subscribe(T, J),
- ?assertMatch({ok, {_, _}, pending, #{<<"z">> := 1}}, SubRes),
- {ok, SubId, pending, _} = SubRes,
+ SubRes = couch_jobs:subscribe(T, J),
+ ?assertMatch({ok, {_, _}, pending, #{<<"z">> := 1}}, SubRes),
+ {ok, SubId, pending, _} = SubRes,
- {ok, Job, _} = couch_jobs:accept(T),
- ?assertMatch({T, J, running, #{<<"z">> := 1}},
- couch_jobs:wait(SubId, 5000)),
+ {ok, Job, _} = couch_jobs:accept(T),
+ ?assertMatch({T, J, running, #{<<"z">> := 1}},
+ couch_jobs:wait(SubId, 5000)),
- % Make sure we get intermediate `running` updates
- ?assertMatch({ok, _}, couch_jobs:update(?TX, Job, #{<<"z">> => 2})),
- ?assertMatch({T, J, running, #{<<"z">> := 2}},
- couch_jobs:wait(SubId, 5000)),
+ % Make sure we get intermediate `running` updates
+ ?assertMatch({ok, _}, couch_jobs:update(?TX, Job, #{<<"z">> => 2})),
+ ?assertMatch({T, J, running, #{<<"z">> := 2}},
+ couch_jobs:wait(SubId, 5000)),
- ?assertEqual(ok, couch_jobs:finish(?TX, Job, #{<<"z">> => 3})),
- ?assertMatch({T, J, finished, #{<<"z">> := 3}},
- couch_jobs:wait(SubId, finished, 5000)),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job, #{<<"z">> => 3})),
+ ?assertMatch({T, J, finished, #{<<"z">> := 3}},
+ couch_jobs:wait(SubId, finished, 5000)),
- ?assertEqual(timeout, couch_jobs:wait(SubId, 50)),
+ ?assertEqual(timeout, couch_jobs:wait(SubId, 50)),
- ?assertEqual({ok, finished, #{<<"z">> => 3}},
- couch_jobs:subscribe(T, J)),
+ ?assertEqual({ok, finished, #{<<"z">> => 3}},
+ couch_jobs:subscribe(T, J)),
- ?assertEqual(ok, couch_jobs:remove(?TX, T, J)),
- ?assertEqual({error, not_found}, couch_jobs:subscribe(T, J))
- end).
+ ?assertEqual(ok, couch_jobs:remove(?TX, T, J)),
+ ?assertEqual({error, not_found}, couch_jobs:subscribe(T, J)).
remove_when_subscribed_and_pending(#{t1 := T, j1 := J}) ->
- ?_test(begin
- ok = couch_jobs:add(?TX, T, J, #{<<"x">> => 1}),
- {ok, SId, pending, _} = couch_jobs:subscribe(T, J),
+ ok = couch_jobs:add(?TX, T, J, #{<<"x">> => 1}),
+ {ok, SId, pending, _} = couch_jobs:subscribe(T, J),
- couch_jobs:remove(?TX, T, J),
+ couch_jobs:remove(?TX, T, J),
- ?assertMatch({T, J, not_found, not_found}, couch_jobs:wait(SId, 5000)),
- ?assertEqual(timeout, couch_jobs:wait(SId, 50))
- end).
+ ?assertMatch({T, J, not_found, not_found}, couch_jobs:wait(SId, 5000)),
+ ?assertEqual(timeout, couch_jobs:wait(SId, 50)).
remove_when_subscribed_and_running(#{t1 := T, j1 := J}) ->
- ?_test(begin
- ok = couch_jobs:add(?TX, T, J, #{<<"z">> => 2}),
- {ok, SId, pending, _} = couch_jobs:subscribe(T, J),
- {ok, #{}, _} = couch_jobs:accept(T),
- ?assertMatch({_, _, running, _}, couch_jobs:wait(SId, 5000)),
+ ok = couch_jobs:add(?TX, T, J, #{<<"z">> => 2}),
+ {ok, SId, pending, _} = couch_jobs:subscribe(T, J),
+ {ok, #{}, _} = couch_jobs:accept(T),
+ ?assertMatch({_, _, running, _}, couch_jobs:wait(SId, 5000)),
- couch_jobs:remove(?TX, T, J),
+ couch_jobs:remove(?TX, T, J),
- ?assertMatch({T, J, not_found, not_found}, couch_jobs:wait(SId, 5000)),
- ?assertEqual(timeout, couch_jobs:wait(SId, 50))
- end).
+ ?assertMatch({T, J, not_found, not_found}, couch_jobs:wait(SId, 5000)),
+ ?assertEqual(timeout, couch_jobs:wait(SId, 50)).
subscribe_wait_multiple(#{t1 := T, j1 := J1, j2 := J2}) ->
- ?_test(begin
- ok = couch_jobs:add(?TX, T, J1, #{}),
- ok = couch_jobs:add(?TX, T, J2, #{}),
-
- {ok, S1, pending, #{}} = couch_jobs:subscribe(T, J1),
- {ok, S2, pending, #{}} = couch_jobs:subscribe(T, J2),
-
- Subs = [S1, S2],
-
- % Accept one job. Only one running update is expected. PJob1 and PJob2
- % do not necessarily correspond got Job1 and Job2, they could be
- % accepted as Job2 and Job1 respectively.
- {ok, PJob1, _} = couch_jobs:accept(T),
- ?assertMatch({_, _, running, _}, couch_jobs:wait(Subs, 5000)),
- ?assertMatch(timeout, couch_jobs:wait(Subs, 50)),
-
- % Accept another job. Expect another update.
- {ok, PJob2, _} = couch_jobs:accept(T),
- ?assertMatch({_, _, running, _}, couch_jobs:wait(Subs, 5000)),
- ?assertMatch(timeout, couch_jobs:wait(Subs, 50)),
-
- ?assertMatch({ok, _}, couch_jobs:update(?TX, PJob1, #{<<"q">> => 5})),
- ?assertMatch({ok, _}, couch_jobs:update(?TX, PJob2, #{<<"r">> => 6})),
-
- % Each job was updated once, expect two running updates.
- ?assertMatch({_, _, running, _}, couch_jobs:wait(Subs, 5000)),
- ?assertMatch({_, _, running, _}, couch_jobs:wait(Subs, 5000)),
-
- % Finish one job. Expect one finished update only.
- ?assertEqual(ok, couch_jobs:finish(?TX, PJob1)),
-
- ?assertMatch({_, _, finished, #{<<"q">> := 5}},
- couch_jobs:wait(Subs, finished, 5000)),
- ?assertMatch(timeout, couch_jobs:wait(Subs, finished, 50)),
-
- % Finish another job. However, unsubscribe should flush the
- % the message and we should not get it.
- ?assertEqual(ok, couch_jobs:finish(?TX, PJob2)),
- ?assertEqual(ok, couch_jobs:unsubscribe(S1)),
- ?assertEqual(ok, couch_jobs:unsubscribe(S2)),
- ?assertMatch(timeout, couch_jobs:wait(Subs, finished, 50))
- end).
+ ok = couch_jobs:add(?TX, T, J1, #{}),
+ ok = couch_jobs:add(?TX, T, J2, #{}),
+
+ {ok, S1, pending, #{}} = couch_jobs:subscribe(T, J1),
+ {ok, S2, pending, #{}} = couch_jobs:subscribe(T, J2),
+
+ Subs = [S1, S2],
+
+ % Accept one job. Only one running update is expected. PJob1 and PJob2
+ % do not necessarily correspond got Job1 and Job2, they could be
+ % accepted as Job2 and Job1 respectively.
+ {ok, PJob1, _} = couch_jobs:accept(T),
+ ?assertMatch({_, _, running, _}, couch_jobs:wait(Subs, 5000)),
+ ?assertMatch(timeout, couch_jobs:wait(Subs, 50)),
+
+ % Accept another job. Expect another update.
+ {ok, PJob2, _} = couch_jobs:accept(T),
+ ?assertMatch({_, _, running, _}, couch_jobs:wait(Subs, 5000)),
+ ?assertMatch(timeout, couch_jobs:wait(Subs, 50)),
+
+ ?assertMatch({ok, _}, couch_jobs:update(?TX, PJob1, #{<<"q">> => 5})),
+ ?assertMatch({ok, _}, couch_jobs:update(?TX, PJob2, #{<<"r">> => 6})),
+
+ % Each job was updated once, expect two running updates.
+ ?assertMatch({_, _, running, _}, couch_jobs:wait(Subs, 5000)),
+ ?assertMatch({_, _, running, _}, couch_jobs:wait(Subs, 5000)),
+
+ % Finish one job. Expect one finished update only.
+ ?assertEqual(ok, couch_jobs:finish(?TX, PJob1)),
+
+ ?assertMatch({_, _, finished, #{<<"q">> := 5}},
+ couch_jobs:wait(Subs, finished, 5000)),
+ ?assertMatch(timeout, couch_jobs:wait(Subs, finished, 50)),
+
+ % Finish another job. However, unsubscribe should flush the
+ % the message and we should not get it.
+ ?assertEqual(ok, couch_jobs:finish(?TX, PJob2)),
+ ?assertEqual(ok, couch_jobs:unsubscribe(S1)),
+ ?assertEqual(ok, couch_jobs:unsubscribe(S2)),
+ ?assertMatch(timeout, couch_jobs:wait(Subs, finished, 50)).
enqueue_inactive(#{t1 := T, j1 := J, t1_timeout := Timeout}) ->
- {timeout, 10, ?_test(begin
- couch_jobs_server:force_check_types(),
+ couch_jobs_server:force_check_types(),
- ok = couch_jobs:add(?TX, T, J, #{<<"y">> => 1}),
- {ok, Job, _} = couch_jobs:accept(T),
+ ok = couch_jobs:add(?TX, T, J, #{<<"y">> => 1}),
+ {ok, Job, _} = couch_jobs:accept(T),
- {ok, SubId, running, #{<<"y">> := 1}} = couch_jobs:subscribe(T, J),
- Wait = 3 * Timeout * 1000,
- ?assertEqual({T, J, pending, #{<<"y">> => 1}},
- couch_jobs:wait(SubId, pending, Wait)),
- ?assertMatch(#{state := pending}, get_job(T, J)),
+ {ok, SubId, running, #{<<"y">> := 1}} = couch_jobs:subscribe(T, J),
+ Wait = 3 * Timeout * 1000,
+ ?assertEqual({T, J, pending, #{<<"y">> => 1}},
+ couch_jobs:wait(SubId, pending, Wait)),
+ ?assertMatch(#{state := pending}, get_job(T, J)),
- % After job was re-enqueued, old job processor can't update it anymore
- ?assertEqual({error, halt}, couch_jobs:update(?TX, Job)),
- ?assertEqual({error, halt}, couch_jobs:finish(?TX, Job))
- end)}.
+ % After job was re-enqueued, old job processor can't update it anymore
+ ?assertEqual({error, halt}, couch_jobs:update(?TX, Job)),
+ ?assertEqual({error, halt}, couch_jobs:finish(?TX, Job)).
remove_running_job(#{t1 := T, j1 := J}) ->
- ?_test(begin
- ok = couch_jobs:add(?TX, T, J, #{}),
- {ok, Job, _} = couch_jobs:accept(T),
- ?assertEqual(ok, couch_jobs:remove(?TX, T, J)),
- ?assertEqual({error, not_found}, couch_jobs:remove(?TX, T, J)),
- ?assertEqual({error, halt}, couch_jobs:update(?TX, Job)),
- ?assertEqual({error, halt}, couch_jobs:finish(?TX, Job))
- end).
+ ok = couch_jobs:add(?TX, T, J, #{}),
+ {ok, Job, _} = couch_jobs:accept(T),
+ ?assertEqual(ok, couch_jobs:remove(?TX, T, J)),
+ ?assertEqual({error, not_found}, couch_jobs:remove(?TX, T, J)),
+ ?assertEqual({error, halt}, couch_jobs:update(?TX, Job)),
+ ?assertEqual({error, halt}, couch_jobs:finish(?TX, Job)).
check_get_jobs(#{t1 := T1, j1 := J1, t2 := T2, j2 := J2}) ->
- ?_test(begin
- ok = couch_jobs:add(?TX, T1, J1, #{}),
- ok = couch_jobs:add(?TX, T2, J2, #{}),
- ?assertMatch([
- {T2, J2, pending, #{}},
- {T1, J1, pending, #{}}
- ], lists:sort(couch_jobs_fdb:get_jobs())),
- {ok, _, _} = couch_jobs:accept(T1),
- ?assertMatch([
- {T2, J2, pending, #{}},
- {T1, J1, running, #{}}
- ], lists:sort(couch_jobs_fdb:get_jobs()))
- end).
+ ok = couch_jobs:add(?TX, T1, J1, #{}),
+ ok = couch_jobs:add(?TX, T2, J2, #{}),
+ ?assertMatch([
+ {T2, J2, pending, #{}},
+ {T1, J1, pending, #{}}
+ ], lists:sort(couch_jobs_fdb:get_jobs())),
+ {ok, _, _} = couch_jobs:accept(T1),
+ ?assertMatch([
+ {T2, J2, pending, #{}},
+ {T1, J1, running, #{}}
+ ], lists:sort(couch_jobs_fdb:get_jobs())).
use_fabric_transaction_object(#{t1 := T1, j1 := J1, dbname := DbName}) ->
- ?_test(begin
- {ok, Db} = fabric2_db:create(DbName, []),
- ?assertEqual(ok, couch_jobs:add(Db, T1, J1, #{})),
- ?assertMatch(#{state := pending, data := #{}}, get_job(T1, J1)),
- {ok, Job, _} = couch_jobs:accept(T1),
- ?assertEqual(ok, fabric2_fdb:transactional(Db, fun(Db1) ->
- {ok, #{}} = couch_jobs:get_job_data(Db1, T1, J1),
- Doc1 = #doc{id = <<"1">>, body = {[]}},
- {ok, {_, _}} = fabric2_db:update_doc(Db1, Doc1),
- Doc2 = #doc{id = <<"2">>, body = {[]}},
- {ok, {_, _}} = fabric2_db:update_doc(Db1, Doc2),
- couch_jobs:finish(Db1, Job, #{<<"d">> => 1})
- end)),
- ok = couch_jobs:remove(#{tx => undefined}, T1, J1),
- ok = fabric2_db:delete(DbName, [])
- end).
+ {ok, Db} = fabric2_db:create(DbName, []),
+ ?assertEqual(ok, couch_jobs:add(Db, T1, J1, #{})),
+ ?assertMatch(#{state := pending, data := #{}}, get_job(T1, J1)),
+ {ok, Job, _} = couch_jobs:accept(T1),
+ ?assertEqual(ok, fabric2_fdb:transactional(Db, fun(Db1) ->
+ {ok, #{}} = couch_jobs:get_job_data(Db1, T1, J1),
+ Doc1 = #doc{id = <<"1">>, body = {[]}},
+ {ok, {_, _}} = fabric2_db:update_doc(Db1, Doc1),
+ Doc2 = #doc{id = <<"2">>, body = {[]}},
+ {ok, {_, _}} = fabric2_db:update_doc(Db1, Doc2),
+ couch_jobs:finish(Db1, Job, #{<<"d">> => 1})
+ end)),
+ ok = couch_jobs:remove(#{tx => undefined}, T1, J1),
+ ok = fabric2_db:delete(DbName, []).
metadata_version_bump(_) ->
- ?_test(begin
- JTx1 = couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(Tx) -> Tx end),
- ?assertMatch(#{md_version := not_found}, JTx1),
-
- ets:delete_all_objects(couch_jobs_fdb),
- couch_jobs_fdb:bump_metadata_version(),
- JTx2 = couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(Tx) -> Tx end),
- ?assertMatch(#{md_version := Bin} when is_binary(Bin), JTx2),
-
- ets:delete_all_objects(couch_jobs_fdb),
- couch_jobs_fdb:bump_metadata_version(),
- JTx3 = couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(Tx) -> Tx end),
- OldMdv = maps:get(md_version, JTx2),
- NewMdv = maps:get(md_version, JTx3),
- ?assert(NewMdv > OldMdv)
- end).
+ JTx1 = couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(Tx) -> Tx end),
+ ?assertMatch(#{md_version := not_found}, JTx1),
+
+ couch_jobs_fdb:bump_metadata_version(),
+ ets:delete_all_objects(couch_jobs_fdb),
+
+ JTx2 = couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(Tx) -> Tx end),
+ ?assertMatch(#{md_version := Bin} when is_binary(Bin), JTx2),
+
+ couch_jobs_fdb:bump_metadata_version(),
+ ets:delete_all_objects(couch_jobs_fdb),
+
+ JTx3 = couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(Tx) -> Tx end),
+ OldMdv = maps:get(md_version, JTx2),
+ NewMdv = maps:get(md_version, JTx3),
+ ?assert(NewMdv > OldMdv).
diff --git a/src/couch_js/src/couch_js.app.src b/src/couch_js/src/couch_js.app.src
index 44efd6d7d..d53d3c362 100644
--- a/src/couch_js/src/couch_js.app.src
+++ b/src/couch_js/src/couch_js.app.src
@@ -21,7 +21,6 @@
kernel,
stdlib,
config,
- couch_log,
- ioq
+ couch_log
]}
]}.
diff --git a/src/couch_js/src/couch_js.erl b/src/couch_js/src/couch_js.erl
index 1bc0f1927..a9c974ef8 100644
--- a/src/couch_js/src/couch_js.erl
+++ b/src/couch_js/src/couch_js.erl
@@ -18,7 +18,10 @@
-export([
acquire_map_context/1,
release_map_context/1,
- map_docs/2
+ map_docs/2,
+ acquire_context/0,
+ release_context/1,
+ try_compile/4
]).
@@ -49,3 +52,15 @@ map_docs(Proc, Docs) ->
end, Results),
{Doc#doc.id, Tupled}
end, Docs)}.
+
+acquire_context() ->
+ Ctx = couch_query_servers:get_os_process(?JS),
+ {ok, Ctx}.
+
+
+release_context(Proc) ->
+ couch_query_servers:ret_os_process(Proc).
+
+
+try_compile(Proc, FunctionType, FunName, FunSrc) ->
+ couch_query_servers:try_compile(Proc, FunctionType, FunName, FunSrc).
diff --git a/src/couch_js/src/couch_js_native_process.erl b/src/couch_js/src/couch_js_native_process.erl
index d5ed3f94f..8add3d5f2 100644
--- a/src/couch_js/src/couch_js_native_process.erl
+++ b/src/couch_js/src/couch_js_native_process.erl
@@ -56,6 +56,7 @@
}).
-include_lib("couch/include/couch_db.hrl").
+-include_lib("kernel/include/logger.hrl").
start_link() ->
gen_server:start_link(?MODULE, [], []).
@@ -80,6 +81,11 @@ handle_call({set_timeout, TimeOut}, _From, State) ->
{reply, ok, State#evstate{timeout=TimeOut}, State#evstate.idle};
handle_call({prompt, Data}, _From, State) ->
+ ?LOG_DEBUG(#{
+ what => prompt,
+ in => native_process,
+ msg => ?JSON_ENCODE(Data)
+ }),
couch_log:debug("Prompt native qs: ~s",[?JSON_ENCODE(Data)]),
{NewState, Resp} = try run(State, to_binary(Data)) of
{S, R} -> {S, R}
@@ -211,6 +217,11 @@ run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, DDocId | Rest]) ->
DDoc = load_ddoc(DDocs, DDocId),
ddoc(State, DDoc, Rest);
run(_, Unknown) ->
+ ?LOG_ERROR(#{
+ what => unknown_command,
+ in => native_process,
+ cmd => Unknown
+ }),
couch_log:error("Native Process: Unknown command: ~p~n", [Unknown]),
throw({error, unknown_command}).
@@ -237,7 +248,13 @@ ddoc(State, {_, Fun}, [<<"filters">>|_], [Docs, Req]) ->
case catch Fun(Doc, Req) of
true -> true;
false -> false;
- {'EXIT', Error} -> couch_log:error("~p", [Error])
+ {'EXIT', Error} ->
+ ?LOG_ERROR(#{
+ what => filter_fun_crash,
+ in => native_process,
+ details => Error
+ }),
+ couch_log:error("~p", [Error])
end
end,
Resp = lists:map(FilterFunWrapper, Docs),
@@ -249,7 +266,13 @@ ddoc(State, {_, Fun}, [<<"views">>|_], [Docs]) ->
ok -> false;
false -> false;
[_|_] -> true;
- {'EXIT', Error} -> couch_log:error("~p", [Error])
+ {'EXIT', Error} ->
+ ?LOG_ERROR(#{
+ what => view_fun_crash,
+ in => native_process,
+ details => Error
+ }),
+ couch_log:error("~p", [Error])
end
end,
Resp = lists:map(MapFunWrapper, Docs),
@@ -318,6 +341,12 @@ bindings(State, Sig, DDoc) ->
Self = self(),
Log = fun(Msg) ->
+ ?LOG_INFO(#{
+ what => user_defined_log,
+ in => native_process,
+ signature => Sig,
+ msg => Msg
+ }),
couch_log:info(Msg, [])
end,
@@ -393,6 +422,13 @@ makefun(_State, Source, BindFuns) when is_list(BindFuns) ->
{ok, [ParsedForm]} ->
ParsedForm;
{error, {LineNum, _Mod, [Mesg, Params]}}=Error ->
+ ?LOG_ERROR(#{
+ what => syntax_error,
+ in => native_process,
+ line => LineNum,
+ details => Mesg,
+ parameters => Params
+ }),
couch_log:error("Syntax error on line: ~p~n~s~p~n",
[LineNum, Mesg, Params]),
throw(Error)
diff --git a/src/couch_js/src/couch_js_os_process.erl b/src/couch_js/src/couch_js_os_process.erl
index a453d1ab2..4ff01e74b 100644
--- a/src/couch_js/src/couch_js_os_process.erl
+++ b/src/couch_js/src/couch_js_os_process.erl
@@ -20,6 +20,7 @@
-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
-include_lib("couch/include/couch_db.hrl").
+-include_lib("kernel/include/logger.hrl").
-define(PORT_OPTIONS, [stream, {line, 4096}, binary, exit_status, hide]).
@@ -51,10 +52,16 @@ send(Pid, Data) ->
gen_server:cast(Pid, {send, Data}).
prompt(Pid, Data) ->
- case ioq:call(Pid, {prompt, Data}, erlang:get(io_priority)) of
+ case gen_server:call(Pid, {prompt, Data}, infinity) of
{ok, Result} ->
Result;
Error ->
+ ?LOG_ERROR(#{
+ what => communication_error,
+ in => os_process,
+ pid => Pid,
+ details => Error
+ }),
couch_log:error("OS Process Error ~p :: ~p",[Pid,Error]),
throw(Error)
end.
@@ -95,12 +102,24 @@ readline(#os_proc{port = Port} = OsProc, Acc) ->
% Standard JSON functions
writejson(OsProc, Data) when is_record(OsProc, os_proc) ->
JsonData = ?JSON_ENCODE(Data),
+ ?LOG_DEBUG(#{
+ what => writeline,
+ in => os_process,
+ port => OsProc#os_proc.port,
+ data => JsonData
+ }),
couch_log:debug("OS Process ~p Input :: ~s",
[OsProc#os_proc.port, JsonData]),
true = writeline(OsProc, JsonData).
readjson(OsProc) when is_record(OsProc, os_proc) ->
Line = iolist_to_binary(readline(OsProc)),
+ ?LOG_DEBUG(#{
+ what => readline,
+ in => os_process,
+ port => OsProc#os_proc.port,
+ data => Line
+ }),
couch_log:debug("OS Process ~p Output :: ~s", [OsProc#os_proc.port, Line]),
try
% Don't actually parse the whole JSON. Just try to see if it's
@@ -115,12 +134,25 @@ readjson(OsProc) when is_record(OsProc, os_proc) ->
case ?JSON_DECODE(Line) of
[<<"log">>, Msg] when is_binary(Msg) ->
% we got a message to log. Log it and continue
+ ?LOG_INFO(#{
+ what => user_defined_log,
+ in => os_process,
+ port => OsProc#os_proc.port,
+ msg => Msg
+ }),
couch_log:info("OS Process ~p Log :: ~s",
[OsProc#os_proc.port, Msg]),
readjson(OsProc);
[<<"error">>, Id, Reason] ->
throw({error, {couch_util:to_existing_atom(Id),Reason}});
[<<"fatal">>, Id, Reason] ->
+ ?LOG_INFO(#{
+ what => fatal_error,
+ in => os_process,
+ port => OsProc#os_proc.port,
+ tag => Id,
+ details => Reason
+ }),
couch_log:info("OS Process ~p Fatal Error :: ~s ~p",
[OsProc#os_proc.port, Id, Reason]),
throw({couch_util:to_existing_atom(Id),Reason});
@@ -163,6 +195,11 @@ init([Command, Options, PortOptions]) ->
},
KillCmd = iolist_to_binary(readline(BaseProc)),
Pid = self(),
+ ?LOG_DEBUG(#{
+ what => init,
+ in => os_process,
+ port => BaseProc#os_proc.port
+ }),
couch_log:debug("OS Process Start :: ~p", [BaseProc#os_proc.port]),
spawn(fun() ->
% this ensure the real os process is killed when this process dies.
@@ -216,6 +253,12 @@ handle_cast({send, Data}, #os_proc{writer=Writer, idle=Idle}=OsProc) ->
{noreply, OsProc, Idle}
catch
throw:OsError ->
+ ?LOG_INFO(#{
+ what => write_failure,
+ in => os_process,
+ details => OsError,
+ data => Data
+ }),
couch_log:error("Failed sending data: ~p -> ~p", [Data, OsError]),
{stop, normal, OsProc}
end;
@@ -225,6 +268,11 @@ handle_cast(garbage_collect, #os_proc{idle=Idle}=OsProc) ->
handle_cast(stop, OsProc) ->
{stop, normal, OsProc};
handle_cast(Msg, #os_proc{idle=Idle}=OsProc) ->
+ ?LOG_DEBUG(#{
+ what => unknown_message,
+ in => os_process,
+ data => Msg
+ }),
couch_log:debug("OS Proc: Unknown cast: ~p", [Msg]),
{noreply, OsProc, Idle}.
@@ -233,12 +281,29 @@ handle_info(timeout, #os_proc{idle=Idle}=OsProc) ->
erlang:garbage_collect(),
{noreply, OsProc, Idle};
handle_info({Port, {exit_status, 0}}, #os_proc{port=Port}=OsProc) ->
+ ?LOG_INFO(#{
+ what => normal_termination,
+ in => os_process,
+ port => Port
+ }),
couch_log:info("OS Process terminated normally", []),
{stop, normal, OsProc};
handle_info({Port, {exit_status, Status}}, #os_proc{port=Port}=OsProc) ->
+ ?LOG_ERROR(#{
+ what => abnormal_termination,
+ in => os_process,
+ port => Port,
+ exit_status => Status
+ }),
couch_log:error("OS Process died with status: ~p", [Status]),
{stop, {exit_status, Status}, OsProc};
handle_info(Msg, #os_proc{idle=Idle}=OsProc) ->
+ ?LOG_DEBUG(#{
+ what => unexpected_message,
+ in => os_process,
+ port => OsProc#os_proc.port,
+ msg => Msg
+ }),
couch_log:debug("OS Proc: Unknown info: ~p", [Msg]),
{noreply, OsProc, Idle}.
diff --git a/src/couch_js/src/couch_js_proc_manager.erl b/src/couch_js/src/couch_js_proc_manager.erl
index db5c492f5..0f55cedb9 100644
--- a/src/couch_js/src/couch_js_proc_manager.erl
+++ b/src/couch_js/src/couch_js_proc_manager.erl
@@ -40,6 +40,7 @@
]).
-include_lib("couch/include/couch_db.hrl").
+-include_lib("kernel/include/logger.hrl").
-define(PROCS, couch_js_proc_manager_procs).
-define(WAITERS, couch_js_proc_manager_waiters).
@@ -197,6 +198,7 @@ handle_cast({os_proc_idle, Pid}, #state{counts=Counts}=State) ->
[#proc_int{client=undefined, lang=Lang}=Proc] ->
case dict:find(Lang, Counts) of
{ok, Count} when Count >= State#state.soft_limit ->
+ ?LOG_INFO(#{what => close_idle_os_process, pid => Pid}),
couch_log:info("Closing idle OS Process: ~p", [Pid]),
remove_proc(State, Proc);
{ok, _} ->
@@ -239,6 +241,7 @@ handle_info({'EXIT', Pid, spawn_error}, State) ->
{noreply, flush_waiters(NewState, Lang)};
handle_info({'EXIT', Pid, Reason}, State) ->
+ ?LOG_INFO(#{what => os_process_failure, pid => Pid, details => Reason}),
couch_log:info("~p ~p died ~p", [?MODULE, Pid, Reason]),
case ets:lookup(?PROCS, Pid) of
[#proc_int{} = Proc] ->
@@ -320,8 +323,12 @@ find_proc(#client{lang = Lang, ddoc = DDoc, ddoc_key = DDocKey} = Client) ->
find_proc(Lang, Fun) ->
try iter_procs(Lang, Fun)
- catch error:Reason ->
- StackTrace = erlang:get_stacktrace(),
+ catch error:Reason:StackTrace ->
+ ?LOG_ERROR(#{
+ what => os_process_not_available,
+ details => Reason,
+ stacktrace => StackTrace
+ }),
couch_log:error("~p ~p ~p", [?MODULE, Reason, StackTrace]),
{error, Reason}
end.
diff --git a/src/couch_js/src/couch_js_query_servers.erl b/src/couch_js/src/couch_js_query_servers.erl
index 12dc864ea..7dbacc633 100644
--- a/src/couch_js/src/couch_js_query_servers.erl
+++ b/src/couch_js/src/couch_js_query_servers.erl
@@ -18,7 +18,6 @@
-export([filter_docs/5]).
-export([filter_view/3]).
-export([finalize/2]).
--export([rewrite/3]).
-export([with_ddoc_proc/2, proc_prompt/2, ddoc_prompt/3, ddoc_proc_prompt/3, json_doc/1]).
@@ -26,6 +25,7 @@
-export([get_os_process/1, get_ddoc_process/2, ret_os_process/1]).
-include_lib("couch/include/couch_db.hrl").
+-include_lib("kernel/include/logger.hrl").
-define(SUMERROR, <<"The _sum function requires that map values be numbers, "
"arrays of numbers, or objects. Objects cannot be mixed with other "
@@ -265,6 +265,12 @@ check_sum_overflow(InSize, OutSize, Sum) ->
end.
log_sum_overflow(InSize, OutSize) ->
+ ?LOG_ERROR(#{
+ what => reduce_function_overflow,
+ input_size => InSize,
+ output_size => OutSize,
+ details => "reduce output must shrink more rapidly"
+ }),
Fmt = "Reduce output must shrink more rapidly: "
"input size: ~b "
"output size: ~b",
@@ -386,85 +392,6 @@ validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
throw({unknown_error, Message})
end.
-
-rewrite(Req, Db, DDoc) ->
- Fields = [F || F <- chttpd_external:json_req_obj_fields(),
- F =/= <<"info">>, F =/= <<"form">>,
- F =/= <<"uuid">>, F =/= <<"id">>],
- JsonReq = chttpd_external:json_req_obj(Req, Db, null, Fields),
- case ddoc_prompt(DDoc, [<<"rewrites">>], [JsonReq]) of
- {[{<<"forbidden">>, Message}]} ->
- throw({forbidden, Message});
- {[{<<"unauthorized">>, Message}]} ->
- throw({unauthorized, Message});
- [<<"no_dispatch_rule">>] ->
- undefined;
- [<<"ok">>, {V}=Rewrite] when is_list(V) ->
- ok = validate_rewrite_response(Rewrite),
- Rewrite;
- [<<"ok">>, _] ->
- throw_rewrite_error(<<"bad rewrite">>);
- V ->
- couch_log:error("bad rewrite return ~p", [V]),
- throw({unknown_error, V})
- end.
-
-validate_rewrite_response({Fields}) when is_list(Fields) ->
- validate_rewrite_response_fields(Fields).
-
-validate_rewrite_response_fields([{Key, Value} | Rest]) ->
- validate_rewrite_response_field(Key, Value),
- validate_rewrite_response_fields(Rest);
-validate_rewrite_response_fields([]) ->
- ok.
-
-validate_rewrite_response_field(<<"method">>, Method) when is_binary(Method) ->
- ok;
-validate_rewrite_response_field(<<"method">>, _) ->
- throw_rewrite_error(<<"bad method">>);
-validate_rewrite_response_field(<<"path">>, Path) when is_binary(Path) ->
- ok;
-validate_rewrite_response_field(<<"path">>, _) ->
- throw_rewrite_error(<<"bad path">>);
-validate_rewrite_response_field(<<"body">>, Body) when is_binary(Body) ->
- ok;
-validate_rewrite_response_field(<<"body">>, _) ->
- throw_rewrite_error(<<"bad body">>);
-validate_rewrite_response_field(<<"headers">>, {Props}=Headers) when is_list(Props) ->
- validate_object_fields(Headers);
-validate_rewrite_response_field(<<"headers">>, _) ->
- throw_rewrite_error(<<"bad headers">>);
-validate_rewrite_response_field(<<"query">>, {Props}=Query) when is_list(Props) ->
- validate_object_fields(Query);
-validate_rewrite_response_field(<<"query">>, _) ->
- throw_rewrite_error(<<"bad query">>);
-validate_rewrite_response_field(<<"code">>, Code) when is_integer(Code) andalso Code >= 200 andalso Code < 600 ->
- ok;
-validate_rewrite_response_field(<<"code">>, _) ->
- throw_rewrite_error(<<"bad code">>);
-validate_rewrite_response_field(K, V) ->
- couch_log:debug("unknown rewrite field ~p=~p", [K, V]),
- ok.
-
-validate_object_fields({Props}) when is_list(Props) ->
- lists:foreach(fun
- ({Key, Value}) when is_binary(Key) andalso is_binary(Value) ->
- ok;
- ({Key, Value}) ->
- Reason = io_lib:format(
- "object key/value must be strings ~p=~p", [Key, Value]),
- throw_rewrite_error(Reason);
- (Value) ->
- throw_rewrite_error(io_lib:format("bad value ~p", [Value]))
- end, Props).
-
-
-throw_rewrite_error(Reason) when is_list(Reason)->
- throw_rewrite_error(iolist_to_binary(Reason));
-throw_rewrite_error(Reason) when is_binary(Reason) ->
- throw({rewrite_error, Reason}).
-
-
json_doc_options() ->
json_doc_options([]).
diff --git a/src/couch_lib/.gitignore b/src/couch_lib/.gitignore
new file mode 100644
index 000000000..fed08d1ca
--- /dev/null
+++ b/src/couch_lib/.gitignore
@@ -0,0 +1,21 @@
++*.o
++*.so
++*.lib
++*.dll
++*.d/
++
++ebin/
++.eunit
++.rebar
++
++*.plt
++*.swp
++*.swo
++.erlang.cookie
++erl_crash.dump
++.idea
++.vscode
++*.iml
++rebar.lock
++_*
++*~ \ No newline at end of file
diff --git a/src/couch_lib/README.md b/src/couch_lib/README.md
new file mode 100644
index 000000000..510c12f8e
--- /dev/null
+++ b/src/couch_lib/README.md
@@ -0,0 +1,28 @@
+# Description
+
+`couch_lib` application is a collection of "pure" miscellaneous functions.
+They are "pure" in a sense that these functions should not call any other CouchDB
+applications. Think of this application as an extension for Erlang/OTP standard library.
+
+The two main reasons for this application to exist are:
+
+- to share non CouchDB specific helper functions between applications
+- avoid or break cyclic dependencies between applications
+
+Please DO NOT put CouchDB specific functionality in here. This means you shouldn't:
+
+- call couch_log:
+- call config:
+- rely on process dictionary values set by processes within CouchDB using `erlang:put/2`
+- send messages to specific `gen_server` processes using `gen_server:call`
+
+# Provided functionality
+
+## String parsing
+
+All string parsing functions expect either string or binary as well as the type itself. I.e. if `parse_boolean/1` is called as `parse_boolean(true)` it would return `true`. They return either the parsed type or `{error, Reason :: term()}`.
+
+- `couch_lib_parse:parse_boolean/1` - parses given string or binary into Erlang's `boolean()` type
+- `couch_lib_parse:parse_integer/1` - parses given string as Erlang's `integer()` type
+- `couch_lib_parse:parse_non_neg_integer/1` - parses given string as Erlang's `non_neg_integer()` type (`[0..infinity)`)
+
diff --git a/src/couch_pse_tests/src/couch_pse_tests.app.src b/src/couch_lib/src/couch_lib.app.src
index 83f3875a0..1f78cf9fd 100644
--- a/src/couch_pse_tests/src/couch_pse_tests.app.src
+++ b/src/couch_lib/src/couch_lib.app.src
@@ -10,9 +10,11 @@
% License for the specific language governing permissions and limitations under
% the License.
-{application, couch_pse_tests, [
- {description, "Apache CouchDB Pluggable Storage Engine Test Suite"},
+{application, couch_lib, [
+ {description, "CouchDB library of various helpers"},
{vsn, git},
+ {registered, [
+ ]},
{applications, [
kernel,
stdlib
diff --git a/src/couch_lib/src/couch_lib_parse.erl b/src/couch_lib/src/couch_lib_parse.erl
new file mode 100644
index 000000000..e0b6c85d8
--- /dev/null
+++ b/src/couch_lib/src/couch_lib_parse.erl
@@ -0,0 +1,61 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_lib_parse).
+
+-export([
+ parse_boolean/1,
+ parse_integer/1,
+ parse_non_neg_integer/1
+]).
+
+parse_boolean(true) ->
+ true;
+parse_boolean(false) ->
+ false;
+
+parse_boolean(Val) when is_binary(Val) ->
+ parse_boolean(binary_to_list(Val));
+
+parse_boolean(Val) ->
+ case string:to_lower(Val) of
+ "true" -> true;
+ "false" -> false;
+ _ ->
+ Msg = io_lib:format("Invalid boolean: ~p", [Val]),
+ {error, list_to_binary(Msg)}
+ end.
+
+
+parse_integer(Val) when is_integer(Val) ->
+ Val;
+parse_integer(Val) when is_list(Val) ->
+ case (catch list_to_integer(Val)) of
+ IntVal when is_integer(IntVal) ->
+ IntVal;
+ _ ->
+ Msg = io_lib:format("Invalid value for integer: ~p", [Val]),
+ {error, list_to_binary(Msg)}
+ end;
+parse_integer(Val) when is_binary(Val) ->
+ binary_to_list(Val).
+
+
+parse_non_neg_integer(Val) ->
+ case parse_integer(Val) of
+ IntVal when IntVal >= 0 ->
+ IntVal;
+ _ ->
+ Fmt = "Invalid value for non negative integer: ~p",
+ Msg = io_lib:format(Fmt, [Val]),
+ {error, list_to_binary(Msg)}
+ end. \ No newline at end of file
diff --git a/src/couch_log/src/couch_log_config.erl b/src/couch_log/src/couch_log_config.erl
index ab076cc69..55925c39f 100644
--- a/src/couch_log/src/couch_log_config.erl
+++ b/src/couch_log/src/couch_log_config.erl
@@ -50,7 +50,8 @@ entries() ->
{level, "level", "info"},
{level_int, "level", "info"},
{max_message_size, "max_message_size", "16000"},
- {strip_last_msg, "strip_last_msg", "true"}
+ {strip_last_msg, "strip_last_msg", "true"},
+ {filter_fields, "filter_fields", "[pid, registered_name, error_info, messages]"}
].
@@ -104,4 +105,23 @@ transform(strip_last_msg, "false") ->
false;
transform(strip_last_msg, _) ->
- true.
+ true;
+
+transform(filter_fields, FieldsStr) ->
+ Default = [pid, registered_name, error_info, messages],
+ case parse_term(FieldsStr) of
+ {ok, List} when is_list(List) ->
+ case lists:all(fun erlang:is_atom/1, List) of
+ true ->
+ List;
+ false ->
+ Default
+ end;
+ _ ->
+ Default
+ end.
+
+
+parse_term(List) ->
+ {ok, Tokens, _} = erl_scan:string(List ++ "."),
+ erl_parse:parse_term(Tokens).
diff --git a/src/couch_log/src/couch_log_config_dyn.erl b/src/couch_log/src/couch_log_config_dyn.erl
index b39dcf2f5..1e1c927ae 100644
--- a/src/couch_log/src/couch_log_config_dyn.erl
+++ b/src/couch_log/src/couch_log_config_dyn.erl
@@ -26,4 +26,5 @@
get(level) -> info;
get(level_int) -> 2;
get(max_message_size) -> 16000;
-get(strip_last_msg) -> true.
+get(strip_last_msg) -> true;
+get(filter_fields) -> [pid, registered_name, error_info, messages].
diff --git a/src/couch_log/src/couch_log_formatter.erl b/src/couch_log/src/couch_log_formatter.erl
index 26997a8a6..3553666f6 100644
--- a/src/couch_log/src/couch_log_formatter.erl
+++ b/src/couch_log/src/couch_log_formatter.erl
@@ -199,7 +199,7 @@ format_crash_report(Report, Neighbours) ->
MsgFmt = "Process ~s with ~w neighbors ~s with reason: ~s",
Args = [Name, length(Neighbours), Type, ReasonStr],
Msg = io_lib:format(MsgFmt, Args),
- case filter_silly_list(Report, [pid, registered_name, error_info]) of
+ case filter_silly_list(Report) of
[] ->
Msg;
Rest ->
@@ -431,6 +431,11 @@ print_val(Val) ->
{Str, _} = couch_log_trunc_io:print(Val, 500),
Str.
+filter_silly_list(KV) ->
+ %% The complete list of fields is from here
+ %% https://github.com/erlang/otp/blob/7ca7a6c59543db8a6d26b95ae434e61a044b0800/lib/stdlib/src/proc_lib.erl#L539:L553
+ FilterFields = couch_log_config:get(filter_fields),
+ filter_silly_list(KV, FilterFields).
filter_silly_list([], _) ->
[];
diff --git a/src/couch_log/src/couch_log_monitor.erl b/src/couch_log/src/couch_log_monitor.erl
index ab0ae115f..d7620e290 100644
--- a/src/couch_log/src/couch_log_monitor.erl
+++ b/src/couch_log/src/couch_log_monitor.erl
@@ -37,11 +37,28 @@ start_link() ->
gen_server:start_link(?MODULE, [], []).
+% OTP_RELEASE defined in OTP >= 21 only
+-ifdef(OTP_RELEASE).
+
+init(_) ->
+ % See https://erlang.org/doc/man/error_logger.html#add_report_handler-1
+ % however that call doesn't call a supervised handler so we do the same
+ % thing add_report_handler/1 does but call gen_event:add_sup_handler/3
+ % instead of gen_event:add_handler/3.
+ Opts = #{level => info, filter_default => log},
+ _ = logger:add_handler(error_logger, error_logger, Opts),
+ ok = gen_event:add_sup_handler(error_logger, ?HANDLER_MOD, []),
+ {ok, nil}.
+
+-else.
+
init(_) ->
error_logger:start(),
ok = gen_event:add_sup_handler(error_logger, ?HANDLER_MOD, []),
{ok, nil}.
+-endif.
+
terminate(_, _) ->
ok.
diff --git a/src/couch_log/src/couch_log_sup.erl b/src/couch_log/src/couch_log_sup.erl
index fc1ac7812..6cb8d7395 100644
--- a/src/couch_log/src/couch_log_sup.erl
+++ b/src/couch_log/src/couch_log_sup.erl
@@ -65,6 +65,8 @@ handle_config_change("log", Key, _, _, S) ->
couch_log_config:reconfigure();
"strip_last_msg" ->
couch_log_config:reconfigure();
+ "filter_fields" ->
+ couch_log_config:reconfigure();
_ ->
% Someone may have changed the config for
% the writer so we need to re-initialize.
diff --git a/src/couch_log/test/eunit/couch_log_config_listener_test.erl b/src/couch_log/test/eunit/couch_log_config_listener_test.erl
index 07abae1ff..042066e5d 100644
--- a/src/couch_log/test/eunit/couch_log_config_listener_test.erl
+++ b/src/couch_log/test/eunit/couch_log_config_listener_test.erl
@@ -24,7 +24,8 @@ couch_log_config_test_() ->
fun couch_log_test_util:stop/1,
[
fun check_restart_listener/0,
- fun check_ignore_non_log/0
+ fun check_ignore_non_log/0,
+ fun check_only_one_couch_error_handler/0
]
}.
@@ -67,6 +68,10 @@ check_ignore_non_log() ->
end,
?assertError(config_change_timeout, Run()).
+check_only_one_couch_error_handler() ->
+ Handlers = gen_event:which_handlers(error_logger),
+ CouchHandlers = [H || H <- Handlers, H =:= couch_log_error_logger_h],
+ ?assertEqual(1, length(CouchHandlers)).
get_handler() ->
FoldFun = fun
diff --git a/src/couch_log/test/eunit/couch_log_config_test.erl b/src/couch_log/test/eunit/couch_log_config_test.erl
index a4c4bcff2..e47a52bc2 100644
--- a/src/couch_log/test/eunit/couch_log_config_test.erl
+++ b/src/couch_log/test/eunit/couch_log_config_test.erl
@@ -16,18 +16,21 @@
-include_lib("couch_log/include/couch_log.hrl").
-include_lib("eunit/include/eunit.hrl").
+-define(T(Name), {atom_to_list(Name), fun Name/0}).
couch_log_config_test_() ->
{setup,
fun couch_log_test_util:start/0,
fun couch_log_test_util:stop/1,
[
- fun check_level/0,
- fun check_max_message_size/0,
- fun check_bad_level/0,
- fun check_bad_max_message_size/0,
- fun check_strip_last_msg/0,
- fun check_bad_strip_last_msg/0
+ ?T(check_level),
+ ?T(check_max_message_size),
+ ?T(check_bad_level),
+ ?T(check_bad_max_message_size),
+ ?T(check_strip_last_msg),
+ ?T(check_bad_strip_last_msg),
+ ?T(check_filter_fields),
+ ?T(check_bad_filter_fields)
]
}.
@@ -143,3 +146,36 @@ check_bad_strip_last_msg() ->
couch_log_test_util:wait_for_config(),
?assertEqual(true, couch_log_config:get(strip_last_msg))
end).
+
+
+check_filter_fields() ->
+ Default = [pid, registered_name, error_info, messages],
+ ?assertEqual(Default, couch_log_config:get(filter_fields)),
+
+ couch_log_test_util:with_config_listener(fun() ->
+ config:set("log", "filter_fields", "[foo, bar, baz]"),
+ couch_log_test_util:wait_for_config(),
+ ?assertEqual([foo, bar, baz], couch_log_config:get(filter_fields)),
+
+ config:delete("log", "filter_fields"),
+ couch_log_test_util:wait_for_config(),
+ ?assertEqual(Default, couch_log_config:get(filter_fields))
+ end).
+
+check_bad_filter_fields() ->
+ Default = [pid, registered_name, error_info, messages],
+ ?assertEqual(Default, couch_log_config:get(filter_fields)),
+
+ couch_log_test_util:with_config_listener(fun() ->
+ config:set("log", "filter_fields", "[foo, bar, baz]"),
+ couch_log_test_util:wait_for_config(),
+ ?assertEqual([foo, bar, baz], couch_log_config:get(filter_fields)),
+
+ config:set("log", "filter_fields", "not a list of atoms"),
+ couch_log_test_util:wait_for_config(),
+ ?assertEqual(Default, couch_log_config:get(filter_fields)),
+
+ config:delete("log", "filter_fields"),
+ couch_log_test_util:wait_for_config(),
+ ?assertEqual(Default, couch_log_config:get(filter_fields))
+ end).
diff --git a/src/couch_mrview/LICENSE b/src/couch_mrview/LICENSE
deleted file mode 100644
index f6cd2bc80..000000000
--- a/src/couch_mrview/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/couch_mrview/include/couch_mrview.hrl b/src/couch_mrview/include/couch_mrview.hrl
deleted file mode 100644
index e0f80df81..000000000
--- a/src/couch_mrview/include/couch_mrview.hrl
+++ /dev/null
@@ -1,114 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(mrst, {
- sig=nil,
- fd=nil,
- fd_monitor,
- db_name,
- idx_name,
- language,
- design_opts=[],
- partitioned=false,
- lib,
- views,
- id_btree=nil,
- update_seq=0,
- purge_seq=0,
- first_build,
- partial_resp_pid,
- doc_acc,
- doc_queue,
- write_queue,
- qserver=nil
-}).
-
-
--record(mrview, {
- id_num,
- update_seq=0,
- purge_seq=0,
- map_names=[],
- reduce_funs=[],
- def,
- btree=nil,
- options=[]
-}).
-
-
--record(mrheader, {
- seq=0,
- purge_seq=0,
- id_btree_state=nil,
- view_states=nil
-}).
-
--define(MAX_VIEW_LIMIT, 16#10000000).
-
--record(mrargs, {
- view_type,
- reduce,
-
- preflight_fun,
-
- start_key,
- start_key_docid,
- end_key,
- end_key_docid,
- keys,
-
- direction = fwd,
- limit = ?MAX_VIEW_LIMIT,
- skip = 0,
- group_level = 0,
- group = undefined,
- stable = false,
- update = true,
- multi_get = false,
- inclusive_end = true,
- include_docs = false,
- doc_options = [],
- update_seq=false,
- conflicts,
- callback,
- sorted = true,
- extra = [],
- page_size = undefined,
- bookmark=nil
-}).
-
--record(vacc, {
- db,
- req,
- resp,
- prepend,
- etag,
- should_close = false,
- buffer = [],
- bufsize = 0,
- threshold = 1490,
- row_sent = false,
- meta_sent = false,
- paginated = false,
- meta = #{}
-}).
-
--record(lacc, {
- db,
- req,
- resp,
- qserver,
- lname,
- etag,
- code,
- headers
-}).
diff --git a/src/couch_mrview/priv/stats_descriptions.cfg b/src/couch_mrview/priv/stats_descriptions.cfg
deleted file mode 100644
index 95634670d..000000000
--- a/src/couch_mrview/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,24 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
-% Style guide for descriptions: Start with a lowercase letter & do not add
-% a trailing full-stop / period
-% Please keep this in alphabetical order
-
-{[couchdb, mrview, map_doc], [
- {type, counter},
- {desc, <<"number of documents mapped in the view server">>}
-]}.
-{[couchdb, mrview, emits], [
- {type, counter},
- {desc, <<"number of invocations of `emit' in map functions in the view server">>}
-]}.
diff --git a/src/couch_mrview/rebar.config b/src/couch_mrview/rebar.config
deleted file mode 100644
index e0d18443b..000000000
--- a/src/couch_mrview/rebar.config
+++ /dev/null
@@ -1,2 +0,0 @@
-{cover_enabled, true}.
-{cover_print_enabled, true}.
diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl
deleted file mode 100644
index 880dfa725..000000000
--- a/src/couch_mrview/src/couch_mrview.erl
+++ /dev/null
@@ -1,701 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview).
-
--export([validate/2]).
--export([query_all_docs/2, query_all_docs/4]).
--export([query_view/3, query_view/4, query_view/6, get_view_index_pid/4]).
--export([get_info/2]).
--export([trigger_update/2, trigger_update/3]).
--export([get_view_info/3]).
--export([refresh/2]).
--export([compact/2, compact/3, cancel_compaction/2]).
--export([cleanup/1]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--record(mracc, {
- db,
- meta_sent=false,
- total_rows,
- offset,
- limit,
- skip,
- group_level,
- doc_info,
- callback,
- user_acc,
- last_go=ok,
- reduce_fun,
- finalizer,
- update_seq,
- args
-}).
-
-
-
-validate_ddoc_fields(DDoc) ->
- MapFuncType = map_function_type(DDoc),
- lists:foreach(fun(Path) ->
- validate_ddoc_fields(DDoc, Path)
- end, [
- [{<<"filters">>, object}, {any, [object, string]}],
- [{<<"language">>, string}],
- [{<<"lists">>, object}, {any, [object, string]}],
- [{<<"options">>, object}],
- [{<<"options">>, object}, {<<"include_design">>, boolean}],
- [{<<"options">>, object}, {<<"local_seq">>, boolean}],
- [{<<"options">>, object}, {<<"partitioned">>, boolean}],
- [{<<"rewrites">>, [string, array]}],
- [{<<"shows">>, object}, {any, [object, string]}],
- [{<<"updates">>, object}, {any, [object, string]}],
- [{<<"validate_doc_update">>, string}],
- [{<<"views">>, object}, {<<"lib">>, object}],
- [{<<"views">>, object}, {any, object}, {<<"map">>, MapFuncType}],
- [{<<"views">>, object}, {any, object}, {<<"reduce">>, string}]
- ]),
- require_map_function_for_views(DDoc),
- ok.
-
-require_map_function_for_views({Props}) ->
- case couch_util:get_value(<<"views">>, Props) of
- undefined -> ok;
- {Views} ->
- lists:foreach(fun
- ({<<"lib">>, _}) -> ok;
- ({Key, {Value}}) ->
- case couch_util:get_value(<<"map">>, Value) of
- undefined -> throw({invalid_design_doc,
- <<"View `", Key/binary, "` must contain map function">>});
- _ -> ok
- end
- end, Views),
- ok
- end.
-
-validate_ddoc_fields(DDoc, Path) ->
- case validate_ddoc_fields(DDoc, Path, []) of
- ok -> ok;
- {error, {FailedPath0, Type0}} ->
- FailedPath = iolist_to_binary(join(FailedPath0, <<".">>)),
- Type = format_type(Type0),
- throw({invalid_design_doc,
- <<"`", FailedPath/binary, "` field must have ",
- Type/binary, " type">>})
- end.
-
-validate_ddoc_fields(undefined, _, _) ->
- ok;
-validate_ddoc_fields(_, [], _) ->
- ok;
-validate_ddoc_fields({KVS}=Props, [{any, Type} | Rest], Acc) ->
- lists:foldl(fun
- ({Key, _}, ok) ->
- validate_ddoc_fields(Props, [{Key, Type} | Rest], Acc);
- ({_, _}, {error, _}=Error) ->
- Error
- end, ok, KVS);
-validate_ddoc_fields({KVS}=Props, [{Key, Type} | Rest], Acc) ->
- case validate_ddoc_field(Props, {Key, Type}) of
- ok ->
- validate_ddoc_fields(couch_util:get_value(Key, KVS),
- Rest,
- [Key | Acc]);
- error ->
- {error, {[Key | Acc], Type}};
- {error, Key1} ->
- {error, {[Key1 | Acc], Type}}
- end.
-
-validate_ddoc_field(undefined, Type) when is_atom(Type) ->
- ok;
-validate_ddoc_field(_, any) ->
- ok;
-validate_ddoc_field(Value, Types) when is_list(Types) ->
- lists:foldl(fun
- (_, ok) -> ok;
- (Type, _) -> validate_ddoc_field(Value, Type)
- end, error, Types);
-validate_ddoc_field(Value, string) when is_binary(Value) ->
- ok;
-validate_ddoc_field(Value, array) when is_list(Value) ->
- ok;
-validate_ddoc_field({Value}, object) when is_list(Value) ->
- ok;
-validate_ddoc_field(Value, boolean) when is_boolean(Value) ->
- ok;
-validate_ddoc_field({Props}, {any, Type}) ->
- validate_ddoc_field1(Props, Type);
-validate_ddoc_field({Props}, {Key, Type}) ->
- validate_ddoc_field(couch_util:get_value(Key, Props), Type);
-validate_ddoc_field(_, _) ->
- error.
-
-validate_ddoc_field1([], _) ->
- ok;
-validate_ddoc_field1([{Key, Value} | Rest], Type) ->
- case validate_ddoc_field(Value, Type) of
- ok ->
- validate_ddoc_field1(Rest, Type);
- error ->
- {error, Key}
- end.
-
-map_function_type({Props}) ->
- case couch_util:get_value(<<"language">>, Props) of
- <<"query">> -> object;
- _ -> string
- end.
-
-format_type(Type) when is_atom(Type) ->
- ?l2b(atom_to_list(Type));
-format_type(Types) when is_list(Types) ->
- iolist_to_binary(join(lists:map(fun atom_to_list/1, Types), <<" or ">>)).
-
-join(L, Sep) ->
- join(L, Sep, []).
-join([H|[]], _, Acc) ->
- [H | Acc];
-join([H|T], Sep, Acc) ->
- join(T, Sep, [Sep, H | Acc]).
-
-validate(#{} = Db, DDoc) ->
- DbName = fabric2_db:name(Db),
- IsPartitioned = fabric2_db:is_partitioned(Db),
- validate(DbName, IsPartitioned, DDoc);
-
-validate(Db, DDoc) ->
- DbName = couch_db:name(Db),
- IsPartitioned = couch_db:is_partitioned(Db),
- validate(DbName, IsPartitioned, DDoc).
-
-
-validate(DbName, _IsDbPartitioned, DDoc) ->
- ok = validate_ddoc_fields(DDoc#doc.body),
- GetName = fun
- (#mrview{map_names = [Name | _]}) -> Name;
- (#mrview{reduce_funs = [{Name, _} | _]}) -> Name;
- (_) -> null
- end,
- ValidateView = fun(Proc, #mrview{def=MapSrc, reduce_funs=Reds}=View) ->
- couch_query_servers:try_compile(Proc, map, GetName(View), MapSrc),
- lists:foreach(fun
- ({_RedName, <<"_sum", _/binary>>}) ->
- ok;
- ({_RedName, <<"_count", _/binary>>}) ->
- ok;
- ({_RedName, <<"_stats", _/binary>>}) ->
- ok;
- ({_RedName, <<"_approx_count_distinct", _/binary>>}) ->
- ok;
- ({_RedName, <<"_", _/binary>> = Bad}) ->
- Msg = ["`", Bad, "` is not a supported reduce function."],
- throw({invalid_design_doc, Msg});
- ({RedName, RedSrc}) ->
- couch_query_servers:try_compile(Proc, reduce, RedName, RedSrc)
- end, Reds)
- end,
- {ok, #mrst{
- language = Lang,
- views = Views
- }} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
-
- try Views =/= [] andalso couch_query_servers:get_os_process(Lang) of
- false ->
- ok;
- Proc ->
- try
- lists:foreach(fun(V) -> ValidateView(Proc, V) end, Views)
- after
- couch_query_servers:ret_os_process(Proc)
- end
- catch {unknown_query_language, _Lang} ->
- %% Allow users to save ddocs written in unknown languages
- ok
- end.
-
-
-query_all_docs(Db, Args) ->
- query_all_docs(Db, Args, fun default_cb/2, []).
-
-
-query_all_docs(Db, Args, Callback, Acc) when is_list(Args) ->
- query_all_docs(Db, to_mrargs(Args), Callback, Acc);
-query_all_docs(Db, Args0, Callback, Acc) ->
- Sig = couch_util:with_db(Db, fun(WDb) ->
- {ok, Info} = couch_db:get_db_info(WDb),
- couch_index_util:hexsig(couch_hash:md5_hash(term_to_binary(Info)))
- end),
- Args1 = Args0#mrargs{view_type=map},
- Args2 = couch_mrview_util:validate_all_docs_args(Db, Args1),
- {ok, Acc1} = case Args2#mrargs.preflight_fun of
- PFFun when is_function(PFFun, 2) -> PFFun(Sig, Acc);
- _ -> {ok, Acc}
- end,
- all_docs_fold(Db, Args2, Callback, Acc1).
-
-
-query_view(Db, DDoc, VName) ->
- query_view(Db, DDoc, VName, #mrargs{}).
-
-
-query_view(Db, DDoc, VName, Args) when is_list(Args) ->
- query_view(Db, DDoc, VName, to_mrargs(Args), fun default_cb/2, []);
-query_view(Db, DDoc, VName, Args) ->
- query_view(Db, DDoc, VName, Args, fun default_cb/2, []).
-
-
-query_view(Db, DDoc, VName, Args, Callback, Acc) when is_list(Args) ->
- query_view(Db, DDoc, VName, to_mrargs(Args), Callback, Acc);
-query_view(Db, DDoc, VName, Args0, Callback, Acc0) ->
- case couch_mrview_util:get_view(Db, DDoc, VName, Args0) of
- {ok, VInfo, Sig, Args} ->
- {ok, Acc1} = case Args#mrargs.preflight_fun of
- PFFun when is_function(PFFun, 2) -> PFFun(Sig, Acc0);
- _ -> {ok, Acc0}
- end,
- query_view(Db, VInfo, Args, Callback, Acc1);
- ddoc_updated ->
- Callback(ok, ddoc_updated)
- end.
-
-
-get_view_index_pid(Db, DDoc, ViewName, Args0) ->
- couch_mrview_util:get_view_index_pid(Db, DDoc, ViewName, Args0).
-
-
-query_view(Db, {Type, View, Ref}, Args, Callback, Acc) ->
- try
- case Type of
- map -> map_fold(Db, View, Args, Callback, Acc);
- red -> red_fold(Db, View, Args, Callback, Acc)
- end
- after
- erlang:demonitor(Ref, [flush])
- end.
-
-
-get_info(Db, DDoc) ->
- {ok, Pid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
- couch_index:get_info(Pid).
-
-
-trigger_update(Db, DDoc) ->
- trigger_update(Db, DDoc, couch_db:get_update_seq(Db)).
-
-trigger_update(Db, DDoc, UpdateSeq) ->
- {ok, Pid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
- couch_index:trigger_update(Pid, UpdateSeq).
-
-%% get informations on a view
-get_view_info(Db, DDoc, VName) ->
- {ok, {_, View, _}, _, _Args} = couch_mrview_util:get_view(Db, DDoc, VName,
- #mrargs{}),
-
- %% get the total number of rows
- {ok, TotalRows} = couch_mrview_util:get_row_count(View),
-
- {ok, [{update_seq, View#mrview.update_seq},
- {purge_seq, View#mrview.purge_seq},
- {total_rows, TotalRows}]}.
-
-
-%% @doc refresh a view index
-refresh(DbName, DDoc) when is_binary(DbName)->
- UpdateSeq = couch_util:with_db(DbName, fun(WDb) ->
- couch_db:get_update_seq(WDb)
- end),
-
- case couch_index_server:get_index(couch_mrview_index, DbName, DDoc) of
- {ok, Pid} ->
- case catch couch_index:get_state(Pid, UpdateSeq) of
- {ok, _} -> ok;
- Error -> {error, Error}
- end;
- Error ->
- {error, Error}
- end;
-
-refresh(Db, DDoc) ->
- refresh(couch_db:name(Db), DDoc).
-
-compact(Db, DDoc) ->
- compact(Db, DDoc, []).
-
-
-compact(Db, DDoc, Opts) ->
- {ok, Pid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
- couch_index:compact(Pid, Opts).
-
-
-cancel_compaction(Db, DDoc) ->
- {ok, IPid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
- {ok, CPid} = couch_index:get_compactor_pid(IPid),
- ok = couch_index_compactor:cancel(CPid),
-
- % Cleanup the compaction file if it exists
- {ok, #mrst{sig=Sig, db_name=DbName}} = couch_index:get_state(IPid, 0),
- couch_mrview_util:delete_compaction_file(DbName, Sig),
- ok.
-
-
-cleanup(Db) ->
- couch_mrview_cleanup:run(Db).
-
-
-all_docs_fold(Db, #mrargs{keys=undefined}=Args, Callback, UAcc) ->
- ReduceFun = get_reduce_fun(Args),
- Total = get_total_rows(Db, Args),
- UpdateSeq = get_update_seq(Db, Args),
- Acc = #mracc{
- db=Db,
- total_rows=Total,
- limit=Args#mrargs.limit,
- skip=Args#mrargs.skip,
- callback=Callback,
- user_acc=UAcc,
- reduce_fun=ReduceFun,
- update_seq=UpdateSeq,
- args=Args
- },
- [Opts1] = couch_mrview_util:all_docs_key_opts(Args),
- % TODO: This is a terrible hack for now. We'll probably have
- % to rewrite _all_docs to not be part of mrview and not expect
- % a btree. For now non-btree's will just have to pass 0 or
- % some fake reductions to get an offset.
- Opts2 = [include_reductions | Opts1],
- FunName = case couch_util:get_value(namespace, Args#mrargs.extra) of
- <<"_design">> -> fold_design_docs;
- <<"_local">> -> fold_local_docs;
- _ -> fold_docs
- end,
- {ok, Offset, FinalAcc} = couch_db:FunName(Db, fun map_fold/3, Acc, Opts2),
- finish_fold(FinalAcc, [{total, Total}, {offset, Offset}]);
-all_docs_fold(Db, #mrargs{direction=Dir, keys=Keys0}=Args, Callback, UAcc) ->
- ReduceFun = get_reduce_fun(Args),
- Total = get_total_rows(Db, Args),
- UpdateSeq = get_update_seq(Db, Args),
- Acc = #mracc{
- db=Db,
- total_rows=Total,
- limit=Args#mrargs.limit,
- skip=Args#mrargs.skip,
- callback=Callback,
- user_acc=UAcc,
- reduce_fun=ReduceFun,
- update_seq=UpdateSeq,
- args=Args
- },
- % Backwards compatibility hack. The old _all_docs iterates keys
- % in reverse if descending=true was passed. Here we'll just
- % reverse the list instead.
- Keys = if Dir =:= fwd -> Keys0; true -> lists:reverse(Keys0) end,
-
- FoldFun = fun(Key, Acc0) ->
- DocInfo = (catch couch_db:get_doc_info(Db, Key)),
- {Doc, Acc1} = case DocInfo of
- {ok, #doc_info{id=Id, revs=[RevInfo | _RestRevs]}=DI} ->
- Rev = couch_doc:rev_to_str(RevInfo#rev_info.rev),
- Props = [{rev, Rev}] ++ case RevInfo#rev_info.deleted of
- true -> [{deleted, true}];
- false -> []
- end,
- {{{Id, Id}, {Props}}, Acc0#mracc{doc_info=DI}};
- not_found ->
- {{{Key, error}, not_found}, Acc0}
- end,
- {_, Acc2} = map_fold(Doc, {[], [{0, 0, 0}]}, Acc1),
- Acc2
- end,
- FinalAcc = lists:foldl(FoldFun, Acc, Keys),
- finish_fold(FinalAcc, [{total, Total}]).
-
-
-map_fold(Db, View, Args, Callback, UAcc) ->
- {ok, Total} = couch_mrview_util:get_row_count(View),
- Acc = #mracc{
- db=Db,
- total_rows=Total,
- limit=Args#mrargs.limit,
- skip=Args#mrargs.skip,
- callback=Callback,
- user_acc=UAcc,
- reduce_fun=fun couch_mrview_util:reduce_to_count/1,
- update_seq=View#mrview.update_seq,
- args=Args
- },
- OptList = couch_mrview_util:key_opts(Args),
- {Reds, Acc2} = lists:foldl(fun(Opts, {_, Acc0}) ->
- {ok, R, A} = couch_mrview_util:fold(View, fun map_fold/3, Acc0, Opts),
- {R, A}
- end, {nil, Acc}, OptList),
- Offset = couch_mrview_util:reduce_to_count(Reds),
- finish_fold(Acc2, [{total, Total}, {offset, Offset}]).
-
-
-map_fold(#full_doc_info{} = FullDocInfo, OffsetReds, Acc) ->
- % matches for _all_docs and translates #full_doc_info{} -> KV pair
- case couch_doc:to_doc_info(FullDocInfo) of
- #doc_info{id=Id, revs=[#rev_info{deleted=false, rev=Rev}|_]} = DI ->
- Value = {[{rev, couch_doc:rev_to_str(Rev)}]},
- map_fold({{Id, Id}, Value}, OffsetReds, Acc#mracc{doc_info=DI});
- #doc_info{revs=[#rev_info{deleted=true}|_]} ->
- {ok, Acc}
- end;
-map_fold(_KV, _Offset, #mracc{skip=N}=Acc) when N > 0 ->
- {ok, Acc#mracc{skip=N-1, last_go=ok}};
-map_fold(KV, OffsetReds, #mracc{offset=undefined}=Acc) ->
- #mracc{
- total_rows=Total,
- callback=Callback,
- user_acc=UAcc0,
- reduce_fun=Reduce,
- update_seq=UpdateSeq,
- args=Args
- } = Acc,
- Offset = Reduce(OffsetReds),
- Meta = make_meta(Args, UpdateSeq, [{total, Total}, {offset, Offset}]),
- {Go, UAcc1} = Callback(Meta, UAcc0),
- Acc1 = Acc#mracc{meta_sent=true, offset=Offset, user_acc=UAcc1, last_go=Go},
- case Go of
- ok -> map_fold(KV, OffsetReds, Acc1);
- stop -> {stop, Acc1}
- end;
-map_fold(_KV, _Offset, #mracc{limit=0}=Acc) ->
- {stop, Acc};
-map_fold({{Key, Id}, Val}, _Offset, Acc) ->
- #mracc{
- db=Db,
- limit=Limit,
- doc_info=DI,
- callback=Callback,
- user_acc=UAcc0,
- args=Args
- } = Acc,
- Doc = case DI of
- #doc_info{} -> couch_mrview_util:maybe_load_doc(Db, DI, Args);
- _ -> couch_mrview_util:maybe_load_doc(Db, Id, Val, Args)
- end,
- Row = [{id, Id}, {key, Key}, {value, Val}] ++ Doc,
- {Go, UAcc1} = Callback({row, Row}, UAcc0),
- {Go, Acc#mracc{
- limit=Limit-1,
- doc_info=undefined,
- user_acc=UAcc1,
- last_go=Go
- }};
-map_fold(#doc{id = <<"_local/", _/binary>>} = Doc, _Offset, #mracc{} = Acc) ->
- #mracc{
- limit=Limit,
- callback=Callback,
- user_acc=UAcc0,
- args=Args
- } = Acc,
- #doc{
- id = DocId,
- revs = {Pos, [RevId | _]}
- } = Doc,
- Rev = {Pos, RevId},
- Row = [
- {id, DocId},
- {key, DocId},
- {value, {[{rev, couch_doc:rev_to_str(Rev)}]}}
- ] ++ if not Args#mrargs.include_docs -> []; true ->
- [{doc, couch_doc:to_json_obj(Doc, Args#mrargs.doc_options)}]
- end,
- {Go, UAcc1} = Callback({row, Row}, UAcc0),
- {Go, Acc#mracc{
- limit=Limit-1,
- reduce_fun=undefined,
- doc_info=undefined,
- user_acc=UAcc1,
- last_go=Go
- }}.
-
-red_fold(Db, {NthRed, _Lang, View}=RedView, Args, Callback, UAcc) ->
- Finalizer = case couch_util:get_value(finalizer, Args#mrargs.extra) of
- undefined ->
- {_, FunSrc} = lists:nth(NthRed, View#mrview.reduce_funs),
- FunSrc;
- CustomFun->
- CustomFun
- end,
- Acc = #mracc{
- db=Db,
- total_rows=null,
- limit=Args#mrargs.limit,
- skip=Args#mrargs.skip,
- group_level=Args#mrargs.group_level,
- callback=Callback,
- user_acc=UAcc,
- update_seq=View#mrview.update_seq,
- finalizer=Finalizer,
- args=Args
- },
- Grouping = {key_group_level, Args#mrargs.group_level},
- OptList = couch_mrview_util:key_opts(Args, [Grouping]),
- Acc2 = lists:foldl(fun(Opts, Acc0) ->
- {ok, Acc1} =
- couch_mrview_util:fold_reduce(RedView, fun red_fold/3, Acc0, Opts),
- Acc1
- end, Acc, OptList),
- finish_fold(Acc2, []).
-
-red_fold({p, _Partition, Key}, Red, Acc) ->
- red_fold(Key, Red, Acc);
-red_fold(_Key, _Red, #mracc{skip=N}=Acc) when N > 0 ->
- {ok, Acc#mracc{skip=N-1, last_go=ok}};
-red_fold(Key, Red, #mracc{meta_sent=false}=Acc) ->
- #mracc{
- args=Args,
- callback=Callback,
- user_acc=UAcc0,
- update_seq=UpdateSeq
- } = Acc,
- Meta = make_meta(Args, UpdateSeq, []),
- {Go, UAcc1} = Callback(Meta, UAcc0),
- Acc1 = Acc#mracc{user_acc=UAcc1, meta_sent=true, last_go=Go},
- case Go of
- ok -> red_fold(Key, Red, Acc1);
- _ -> {Go, Acc1}
- end;
-red_fold(_Key, _Red, #mracc{limit=0} = Acc) ->
- {stop, Acc};
-red_fold(_Key, Red, #mracc{group_level=0} = Acc) ->
- #mracc{
- finalizer=Finalizer,
- limit=Limit,
- callback=Callback,
- user_acc=UAcc0
- } = Acc,
- Row = [{key, null}, {value, maybe_finalize(Red, Finalizer)}],
- {Go, UAcc1} = Callback({row, Row}, UAcc0),
- {Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}};
-red_fold(Key, Red, #mracc{group_level=exact} = Acc) ->
- #mracc{
- finalizer=Finalizer,
- limit=Limit,
- callback=Callback,
- user_acc=UAcc0
- } = Acc,
- Row = [{key, Key}, {value, maybe_finalize(Red, Finalizer)}],
- {Go, UAcc1} = Callback({row, Row}, UAcc0),
- {Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}};
-red_fold(K, Red, #mracc{group_level=I} = Acc) when I > 0, is_list(K) ->
- #mracc{
- finalizer=Finalizer,
- limit=Limit,
- callback=Callback,
- user_acc=UAcc0
- } = Acc,
- Row = [{key, lists:sublist(K, I)}, {value, maybe_finalize(Red, Finalizer)}],
- {Go, UAcc1} = Callback({row, Row}, UAcc0),
- {Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}};
-red_fold(K, Red, #mracc{group_level=I} = Acc) when I > 0 ->
- #mracc{
- finalizer=Finalizer,
- limit=Limit,
- callback=Callback,
- user_acc=UAcc0
- } = Acc,
- Row = [{key, K}, {value, maybe_finalize(Red, Finalizer)}],
- {Go, UAcc1} = Callback({row, Row}, UAcc0),
- {Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}}.
-
-maybe_finalize(Red, null) ->
- Red;
-maybe_finalize(Red, RedSrc) ->
- {ok, Finalized} = couch_query_servers:finalize(RedSrc, Red),
- Finalized.
-
-finish_fold(#mracc{last_go=ok, update_seq=UpdateSeq}=Acc, ExtraMeta) ->
- #mracc{callback=Callback, user_acc=UAcc, args=Args}=Acc,
- % Possible send meta info
- Meta = make_meta(Args, UpdateSeq, ExtraMeta),
- {Go, UAcc1} = case Acc#mracc.meta_sent of
- false -> Callback(Meta, UAcc);
- _ -> {ok, Acc#mracc.user_acc}
- end,
- % Notify callback that the fold is complete.
- {_, UAcc2} = case Go of
- ok -> Callback(complete, UAcc1);
- _ -> {ok, UAcc1}
- end,
- {ok, UAcc2};
-finish_fold(#mracc{user_acc=UAcc}, _ExtraMeta) ->
- {ok, UAcc}.
-
-
-make_meta(Args, UpdateSeq, Base) ->
- case Args#mrargs.update_seq of
- true -> {meta, Base ++ [{update_seq, UpdateSeq}]};
- _ -> {meta, Base}
- end.
-
-
-get_reduce_fun(#mrargs{extra = Extra}) ->
- case couch_util:get_value(namespace, Extra) of
- <<"_local">> ->
- fun(_) -> null end;
- _ ->
- fun couch_mrview_util:all_docs_reduce_to_count/1
- end.
-
-
-get_total_rows(Db, #mrargs{extra = Extra}) ->
- case couch_util:get_value(namespace, Extra) of
- <<"_local">> ->
- null;
- <<"_design">> ->
- {ok, N} = couch_db:get_design_doc_count(Db),
- N;
- _ ->
- {ok, Info} = couch_db:get_db_info(Db),
- couch_util:get_value(doc_count, Info)
- end.
-
-
-get_update_seq(Db, #mrargs{extra = Extra}) ->
- case couch_util:get_value(namespace, Extra) of
- <<"_local">> ->
- null;
- _ ->
- couch_db:get_update_seq(Db)
- end.
-
-
-default_cb(complete, Acc) ->
- {ok, lists:reverse(Acc)};
-default_cb({final, Info}, []) ->
- {ok, [Info]};
-default_cb({final, _}, Acc) ->
- {ok, Acc};
-default_cb(ok, ddoc_updated) ->
- {ok, ddoc_updated};
-default_cb(Row, Acc) ->
- {ok, [Row | Acc]}.
-
-
-to_mrargs(KeyList) ->
- lists:foldl(fun({Key, Value}, Acc) ->
- Index = lookup_index(couch_util:to_existing_atom(Key)),
- setelement(Index, Acc, Value)
- end, #mrargs{}, KeyList).
-
-
-lookup_index(Key) ->
- Index = lists:zip(
- record_info(fields, mrargs), lists:seq(2, record_info(size, mrargs))
- ),
- couch_util:get_value(Key, Index).
diff --git a/src/couch_mrview/src/couch_mrview_cleanup.erl b/src/couch_mrview/src/couch_mrview_cleanup.erl
deleted file mode 100644
index e0cb1c64f..000000000
--- a/src/couch_mrview/src/couch_mrview_cleanup.erl
+++ /dev/null
@@ -1,59 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_cleanup).
-
--export([run/1]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-
-run(Db) ->
- RootDir = couch_index_util:root_dir(),
- DbName = couch_db:name(Db),
-
- {ok, DesignDocs} = couch_db:get_design_docs(Db),
- SigFiles = lists:foldl(fun(DDocInfo, SFAcc) ->
- {ok, DDoc} = couch_db:open_doc_int(Db, DDocInfo, [ejson_body]),
- {ok, InitState} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
- Sig = InitState#mrst.sig,
- IFName = couch_mrview_util:index_file(DbName, Sig),
- CFName = couch_mrview_util:compaction_file(DbName, Sig),
- [IFName, CFName | SFAcc]
- end, [], [DD || DD <- DesignDocs, DD#full_doc_info.deleted == false]),
-
- IdxDir = couch_index_util:index_dir(mrview, DbName),
- DiskFiles = filelib:wildcard(filename:join(IdxDir, "*")),
-
- % We need to delete files that have no ddoc.
- ToDelete = DiskFiles -- SigFiles,
-
- lists:foreach(fun(FN) ->
- couch_log:debug("Deleting stale view file: ~s", [FN]),
- couch_file:delete(RootDir, FN, [sync]),
- case couch_mrview_util:verify_view_filename(FN) of
- true ->
- Sig = couch_mrview_util:get_signature_from_filename(FN),
- DocId = couch_mrview_util:get_local_purge_doc_id(Sig),
- case couch_db:open_doc(Db, DocId, []) of
- {ok, LocalPurgeDoc} ->
- couch_db:update_doc(Db,
- LocalPurgeDoc#doc{deleted=true}, [?ADMIN_CTX]);
- {not_found, _} ->
- ok
- end;
- false -> ok
- end
- end, ToDelete),
- ok.
diff --git a/src/couch_mrview/src/couch_mrview_compactor.erl b/src/couch_mrview/src/couch_mrview_compactor.erl
deleted file mode 100644
index d42edc054..000000000
--- a/src/couch_mrview/src/couch_mrview_compactor.erl
+++ /dev/null
@@ -1,294 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_compactor).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--export([compact/3, swap_compacted/2, remove_compacted/1]).
-
--record(acc, {
- btree = nil,
- last_id = nil,
- kvs = [],
- kvs_size = 0,
- changes = 0,
- total_changes
-}).
-
--define(DEFAULT_RECOMPACT_RETRY_COUNT, 3).
-
-compact(_Db, State, Opts) ->
- case lists:member(recompact, Opts) of
- false -> compact(State);
- true -> recompact(State)
- end.
-
-compact(State) ->
- #mrst{
- db_name=DbName,
- idx_name=IdxName,
- sig=Sig,
- update_seq=Seq,
- id_btree=IdBtree,
- views=Views
- } = State,
- erlang:put(io_priority, {view_compact, DbName, IdxName}),
-
- {EmptyState, NumDocIds} = couch_util:with_db(DbName, fun(Db) ->
- CompactFName = couch_mrview_util:compaction_file(DbName, Sig),
- {ok, Fd} = couch_mrview_util:open_file(CompactFName),
- ESt = couch_mrview_util:reset_index(Db, Fd, State),
-
- {ok, Count} = couch_db:get_doc_count(Db),
-
- {ESt, Count}
- end),
-
- #mrst{
- id_btree = EmptyIdBtree,
- views = EmptyViews
- } = EmptyState,
-
- TotalChanges = lists:foldl(
- fun(View, Acc) ->
- {ok, Kvs} = couch_mrview_util:get_row_count(View),
- Acc + Kvs
- end,
- NumDocIds, Views),
-
- couch_task_status:add_task([
- {type, view_compaction},
- {database, DbName},
- {design_document, IdxName},
- {progress, 0},
- {changes_done, 0},
- {total_changes, TotalChanges}
- ]),
-
- BufferSize0 = config:get(
- "view_compaction", "keyvalue_buffer_size", "2097152"
- ),
- BufferSize = list_to_integer(BufferSize0),
-
- FoldFun = fun({DocId, ViewIdKeys} = KV, Acc) ->
- #acc{btree = Bt, kvs = Kvs, kvs_size = KvsSize} = Acc,
- NewKvs = case Kvs of
- [{DocId, OldViewIdKeys} | Rest] ->
- couch_log:error("Dupes of ~s in ~s ~s",
- [DocId, DbName, IdxName]),
- [{DocId, ViewIdKeys ++ OldViewIdKeys} | Rest];
- _ ->
- [KV | Kvs]
- end,
- KvsSize2 = KvsSize + ?term_size(KV),
- case KvsSize2 >= BufferSize of
- true ->
- {ok, Bt2} = couch_btree:add(Bt, lists:reverse(NewKvs)),
- Acc2 = update_task(Acc, length(NewKvs)),
- {ok, Acc2#acc{
- btree = Bt2, kvs = [], kvs_size = 0, last_id = DocId}};
- _ ->
- {ok, Acc#acc{
- kvs = NewKvs, kvs_size = KvsSize2, last_id = DocId}}
- end
- end,
-
- InitAcc = #acc{total_changes = TotalChanges, btree = EmptyIdBtree},
- {ok, _, FinalAcc} = couch_btree:foldl(IdBtree, FoldFun, InitAcc),
- #acc{btree = Bt3, kvs = Uncopied} = FinalAcc,
- {ok, NewIdBtree} = couch_btree:add(Bt3, lists:reverse(Uncopied)),
- FinalAcc2 = update_task(FinalAcc, length(Uncopied)),
-
-
- {NewViews, _} = lists:mapfoldl(fun({View, EmptyView}, Acc) ->
- compact_view(View, EmptyView, BufferSize, Acc)
- end, FinalAcc2, lists:zip(Views, EmptyViews)),
-
- unlink(EmptyState#mrst.fd),
- {ok, EmptyState#mrst{
- id_btree=NewIdBtree,
- views=NewViews,
- update_seq=Seq
- }}.
-
-
-recompact(State) ->
- recompact(State, recompact_retry_count()).
-
-recompact(#mrst{db_name=DbName, idx_name=IdxName}, 0) ->
- erlang:error({exceeded_recompact_retry_count,
- [{db_name, DbName}, {idx_name, IdxName}]});
-
-recompact(State, RetryCount) ->
- Self = self(),
- link(State#mrst.fd),
- {Pid, Ref} = erlang:spawn_monitor(fun() ->
- couch_index_updater:update(Self, couch_mrview_index, State)
- end),
- recompact_loop(Pid, Ref, State, RetryCount).
-
-recompact_loop(Pid, Ref, State, RetryCount) ->
- receive
- {'$gen_cast', {new_state, State2}} ->
- % We've made progress so reset RetryCount
- recompact_loop(Pid, Ref, State2, recompact_retry_count());
- {'DOWN', Ref, _, _, {updated, Pid, State2}} ->
- unlink(State#mrst.fd),
- {ok, State2};
- {'DOWN', Ref, _, _, Reason} ->
- unlink(State#mrst.fd),
- couch_log:warning("Error during recompaction: ~r", [Reason]),
- recompact(State, RetryCount - 1)
- end.
-
-recompact_retry_count() ->
- config:get_integer(
- "view_compaction",
- "recompact_retry_count",
- ?DEFAULT_RECOMPACT_RETRY_COUNT
- ).
-
-
-%% @spec compact_view(View, EmptyView, Retry, Acc) -> {CompactView, NewAcc}
-compact_view(#mrview{id_num=VID}=View, EmptyView, BufferSize, Acc0) ->
-
- {NewBt, FinalAcc} = compact_view_btree(View#mrview.btree,
- EmptyView#mrview.btree,
- VID, BufferSize, Acc0),
-
- {EmptyView#mrview{btree=NewBt,
- update_seq=View#mrview.update_seq,
- purge_seq=View#mrview.purge_seq}, FinalAcc}.
-
-compact_view_btree(Btree, EmptyBtree, VID, BufferSize, Acc0) ->
- Fun = fun(KV, #acc{btree = Bt, kvs = Kvs, kvs_size = KvsSize} = Acc) ->
- KvsSize2 = KvsSize + ?term_size(KV),
- if KvsSize2 >= BufferSize ->
- {ok, Bt2} = couch_btree:add(Bt, lists:reverse([KV | Kvs])),
- Acc2 = update_task(VID, Acc, 1 + length(Kvs)),
- {ok, Acc2#acc{btree = Bt2, kvs = [], kvs_size = 0}};
- true ->
- {ok, Acc#acc{kvs = [KV | Kvs], kvs_size = KvsSize2}}
- end
- end,
-
- InitAcc = Acc0#acc{kvs = [], kvs_size = 0, btree = EmptyBtree},
- {ok, _, FinalAcc} = couch_btree:foldl(Btree, Fun, InitAcc),
- #acc{btree = Bt3, kvs = Uncopied} = FinalAcc,
- {ok, NewBt} = couch_btree:add(Bt3, lists:reverse(Uncopied)),
- FinalAcc2 = update_task(VID, FinalAcc, length(Uncopied)),
- {NewBt, FinalAcc2}.
-
-update_task(Acc, ChangesInc) ->
- update_task(null, Acc, ChangesInc).
-
-
-update_task(VID, #acc{changes=Changes, total_changes=Total}=Acc, ChangesInc) ->
- Phase = if is_integer(VID) -> view; true -> ids end,
- Changes2 = Changes + ChangesInc,
- Progress = if Total == 0 -> 0; true -> (Changes2 * 100) div Total end,
- couch_task_status:update([
- {phase, Phase},
- {view, VID},
- {changes_done, Changes2},
- {total_changes, Total},
- {progress, Progress}
- ]),
- Acc#acc{changes = Changes2}.
-
-
-swap_compacted(OldState, NewState) ->
- #mrst{
- fd = Fd
- } = OldState,
- #mrst{
- sig=Sig,
- db_name=DbName,
- fd=NewFd
- } = NewState,
-
- link(NewState#mrst.fd),
- Ref = erlang:monitor(process, NewState#mrst.fd),
-
- RootDir = couch_index_util:root_dir(),
- IndexFName = couch_mrview_util:index_file(DbName, Sig),
- CompactFName = couch_mrview_util:compaction_file(DbName, Sig),
-
- {ok, Pre} = couch_file:bytes(Fd),
- {ok, Post} = couch_file:bytes(NewFd),
- couch_log:notice("Compaction swap for view ~s ~p ~p", [IndexFName,
- Pre, Post]),
- ok = couch_file:delete(RootDir, IndexFName),
- ok = file:rename(CompactFName, IndexFName),
-
- unlink(OldState#mrst.fd),
- erlang:demonitor(OldState#mrst.fd_monitor, [flush]),
-
- {ok, NewState#mrst{fd_monitor=Ref}}.
-
-
-remove_compacted(#mrst{sig = Sig, db_name = DbName} = State) ->
- RootDir = couch_index_util:root_dir(),
- CompactFName = couch_mrview_util:compaction_file(DbName, Sig),
- ok = couch_file:delete(RootDir, CompactFName),
- {ok, State}.
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-setup_all() ->
- meck:new(couch_index_updater),
- meck:new(couch_log).
-
-teardown_all(_) ->
- meck:unload().
-
-recompact_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- recompact_success_after_progress(),
- recompact_exceeded_retry_count()
- ]
- }.
-
-recompact_success_after_progress() ->
- ?_test(begin
- ok = meck:expect(couch_index_updater, update, fun
- (Pid, _, #mrst{update_seq=0} = State) ->
- Pid ! {'$gen_cast', {new_state, State#mrst{update_seq = 1}}},
- timer:sleep(100),
- exit({updated, self(), State#mrst{update_seq = 2}})
- end),
- State = #mrst{fd=self(), update_seq=0},
- ?assertEqual({ok, State#mrst{update_seq = 2}}, recompact(State))
- end).
-
-recompact_exceeded_retry_count() ->
- ?_test(begin
- ok = meck:expect(couch_index_updater, update,
- fun(_, _, _) ->
- exit(error)
- end),
- ok = meck:expect(couch_log, warning, fun(_, _) -> ok end),
- State = #mrst{fd=self(), db_name=foo, idx_name=bar},
- ExpectedError = {exceeded_recompact_retry_count,
- [{db_name, foo}, {idx_name, bar}]},
- ?assertError(ExpectedError, recompact(State))
- end).
-
--endif.
diff --git a/src/couch_mrview/src/couch_mrview_http.erl b/src/couch_mrview/src/couch_mrview_http.erl
deleted file mode 100644
index e1ba9d656..000000000
--- a/src/couch_mrview/src/couch_mrview_http.erl
+++ /dev/null
@@ -1,650 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_http).
-
--export([
- handle_all_docs_req/2,
- handle_local_docs_req/2,
- handle_design_docs_req/2,
- handle_reindex_req/3,
- handle_view_req/3,
- handle_temp_view_req/2,
- handle_info_req/3,
- handle_compact_req/3,
- handle_cleanup_req/2
-]).
-
--export([
- parse_boolean/1,
- parse_int/1,
- parse_pos_int/1,
- prepend_val/1,
- parse_body_and_query/2,
- parse_body_and_query/3,
- parse_params/2,
- parse_params/3,
- parse_params/4,
- view_cb/2,
- row_to_obj/1,
- row_to_obj/2,
- row_to_json/1,
- row_to_json/2,
- check_view_etag/3
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-
-handle_all_docs_req(#httpd{method='GET'}=Req, Db) ->
- all_docs_req(Req, Db, undefined);
-handle_all_docs_req(#httpd{method='POST'}=Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- Keys = couch_mrview_util:get_view_keys(chttpd:json_body_obj(Req)),
- all_docs_req(Req, Db, Keys);
-handle_all_docs_req(Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_local_docs_req(#httpd{method='GET'}=Req, Db) ->
- all_docs_req(Req, Db, undefined, <<"_local">>);
-handle_local_docs_req(#httpd{method='POST'}=Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- Keys = couch_mrview_util:get_view_keys(chttpd:json_body_obj(Req)),
- all_docs_req(Req, Db, Keys, <<"_local">>);
-handle_local_docs_req(Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_design_docs_req(#httpd{method='GET'}=Req, Db) ->
- all_docs_req(Req, Db, undefined, <<"_design">>);
-handle_design_docs_req(#httpd{method='POST'}=Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- Keys = couch_mrview_util:get_view_keys(chttpd:json_body_obj(Req)),
- all_docs_req(Req, Db, Keys, <<"_design">>);
-handle_design_docs_req(Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_reindex_req(#httpd{method='POST',
- path_parts=[_, _, DName,<<"_reindex">>]}=Req,
- Db, _DDoc) ->
- chttpd:validate_ctype(Req, "application/json"),
- ok = couch_db:check_is_admin(Db),
- couch_mrview:trigger_update(Db, <<"_design/", DName/binary>>),
- chttpd:send_json(Req, 201, {[{<<"ok">>, true}]});
-handle_reindex_req(Req, _Db, _DDoc) ->
- chttpd:send_method_not_allowed(Req, "POST").
-
-
-handle_view_req(#httpd{method='GET',
- path_parts=[_, _, DDocName, _, VName, <<"_info">>]}=Req,
- Db, _DDoc) ->
- DbName = couch_db:name(Db),
- DDocId = <<"_design/", DDocName/binary >>,
- {ok, Info} = couch_mrview:get_view_info(DbName, DDocId, VName),
-
- FinalInfo = [{db_name, DbName},
- {ddoc, DDocId},
- {view, VName}] ++ Info,
- chttpd:send_json(Req, 200, {FinalInfo});
-handle_view_req(#httpd{method='GET'}=Req, Db, DDoc) ->
- [_, _, _, _, ViewName] = Req#httpd.path_parts,
- couch_stats:increment_counter([couchdb, httpd, view_reads]),
- design_doc_view(Req, Db, DDoc, ViewName, undefined);
-handle_view_req(#httpd{method='POST'}=Req, Db, DDoc) ->
- chttpd:validate_ctype(Req, "application/json"),
- [_, _, _, _, ViewName] = Req#httpd.path_parts,
- Props = chttpd:json_body_obj(Req),
- Keys = couch_mrview_util:get_view_keys(Props),
- Queries = couch_mrview_util:get_view_queries(Props),
- case {Queries, Keys} of
- {Queries, undefined} when is_list(Queries) ->
- IncrBy = length(Queries),
- couch_stats:increment_counter([couchdb, httpd, view_reads], IncrBy),
- multi_query_view(Req, Db, DDoc, ViewName, Queries);
- {undefined, Keys} when is_list(Keys) ->
- couch_stats:increment_counter([couchdb, httpd, view_reads]),
- design_doc_view(Req, Db, DDoc, ViewName, Keys);
- {undefined, undefined} ->
- throw({
- bad_request,
- "POST body must contain `keys` or `queries` field"
- });
- {_, _} ->
- throw({bad_request, "`keys` and `queries` are mutually exclusive"})
- end;
-handle_view_req(Req, _Db, _DDoc) ->
- chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-
-handle_temp_view_req(#httpd{method='POST'}=Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- ok = couch_db:check_is_admin(Db),
- {Body} = chttpd:json_body_obj(Req),
- DDoc = couch_mrview_util:temp_view_to_ddoc({Body}),
- Keys = couch_mrview_util:get_view_keys({Body}),
- couch_stats:increment_counter([couchdb, httpd, temporary_view_reads]),
- design_doc_view(Req, Db, DDoc, <<"temp">>, Keys);
-handle_temp_view_req(Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "POST").
-
-
-handle_info_req(#httpd{method='GET'}=Req, Db, DDoc) ->
- [_, _, Name, _] = Req#httpd.path_parts,
- {ok, Info} = couch_mrview:get_info(Db, DDoc),
- chttpd:send_json(Req, 200, {[
- {name, Name},
- {view_index, {Info}}
- ]});
-handle_info_req(Req, _Db, _DDoc) ->
- chttpd:send_method_not_allowed(Req, "GET").
-
-
-handle_compact_req(#httpd{method='POST'}=Req, Db, DDoc) ->
- chttpd:validate_ctype(Req, "application/json"),
- ok = couch_db:check_is_admin(Db),
- ok = couch_mrview:compact(Db, DDoc),
- chttpd:send_json(Req, 202, {[{ok, true}]});
-handle_compact_req(Req, _Db, _DDoc) ->
- chttpd:send_method_not_allowed(Req, "POST").
-
-
-handle_cleanup_req(#httpd{method='POST'}=Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- ok = couch_db:check_is_admin(Db),
- ok = couch_mrview:cleanup(Db),
- chttpd:send_json(Req, 202, {[{ok, true}]});
-handle_cleanup_req(Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "POST").
-
-
-all_docs_req(Req, Db, Keys) ->
- all_docs_req(Req, Db, Keys, undefined).
-
-all_docs_req(Req, Db, Keys, NS) ->
- case is_restricted(Db, NS) of
- true ->
- case (catch couch_db:check_is_admin(Db)) of
- ok ->
- do_all_docs_req(Req, Db, Keys, NS);
- _ when NS == <<"_local">> ->
- throw({forbidden, <<"Only admins can access _local_docs">>});
- _ ->
- case is_public_fields_configured(Db) of
- true ->
- do_all_docs_req(Req, Db, Keys, NS);
- false ->
- throw({forbidden, <<"Only admins can access _all_docs",
- " of system databases.">>})
- end
- end;
- false ->
- do_all_docs_req(Req, Db, Keys, NS)
- end.
-
-is_restricted(_Db, <<"_local">>) ->
- true;
-is_restricted(Db, _) ->
- couch_db:is_system_db(Db).
-
-is_public_fields_configured(Db) ->
- DbName = ?b2l(couch_db:name(Db)),
- case config:get("couch_httpd_auth", "authentication_db", "_users") of
- DbName ->
- UsersDbPublic = config:get("couch_httpd_auth", "users_db_public", "false"),
- PublicFields = config:get("couch_httpd_auth", "public_fields"),
- case {UsersDbPublic, PublicFields} of
- {"true", PublicFields} when PublicFields =/= undefined ->
- true;
- {_, _} ->
- false
- end;
- _ ->
- false
- end.
-
-do_all_docs_req(Req, Db, Keys, NS) ->
- Args0 = couch_mrview_http:parse_body_and_query(Req, Keys),
- Args1 = set_namespace(NS, Args0),
- ETagFun = fun(Sig, Acc0) ->
- check_view_etag(Sig, Acc0, Req)
- end,
- Args = Args1#mrargs{preflight_fun=ETagFun},
- {ok, Resp} = couch_httpd:etag_maybe(Req, fun() ->
- Max = chttpd:chunked_response_buffer_size(),
- VAcc0 = #vacc{db=Db, req=Req, threshold=Max},
- DbName = ?b2l(couch_db:name(Db)),
- UsersDbName = config:get("couch_httpd_auth",
- "authentication_db",
- "_users"),
- IsAdmin = is_admin(Db),
- Callback = get_view_callback(DbName, UsersDbName, IsAdmin),
- couch_mrview:query_all_docs(Db, Args, Callback, VAcc0)
- end),
- case is_record(Resp, vacc) of
- true -> {ok, Resp#vacc.resp};
- _ -> {ok, Resp}
- end.
-
-set_namespace(NS, #mrargs{extra = Extra} = Args) ->
- Args#mrargs{extra = [{namespace, NS} | Extra]}.
-
-is_admin(Db) ->
- case catch couch_db:check_is_admin(Db) of
- {unauthorized, _} ->
- false;
- ok ->
- true
- end.
-
-
-% admin users always get all fields
-get_view_callback(_, _, true) ->
- fun view_cb/2;
-% if we are operating on the users db and we aren't
-% admin, filter the view
-get_view_callback(_DbName, _DbName, false) ->
- fun filtered_view_cb/2;
-% non _users databases get all fields
-get_view_callback(_, _, _) ->
- fun view_cb/2.
-
-
-design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
- Args0 = parse_params(Req, Keys),
- ETagFun = fun(Sig, Acc0) ->
- check_view_etag(Sig, Acc0, Req)
- end,
- Args = Args0#mrargs{preflight_fun=ETagFun},
- {ok, Resp} = couch_httpd:etag_maybe(Req, fun() ->
- Max = chttpd:chunked_response_buffer_size(),
- VAcc0 = #vacc{db=Db, req=Req, threshold=Max},
- couch_mrview:query_view(Db, DDoc, ViewName, Args, fun view_cb/2, VAcc0)
- end),
- case is_record(Resp, vacc) of
- true -> {ok, Resp#vacc.resp};
- _ -> {ok, Resp}
- end.
-
-
-multi_query_view(Req, Db, DDoc, ViewName, Queries) ->
- Args0 = parse_params(Req, undefined),
- {ok, _, _, Args1} = couch_mrview_util:get_view(Db, DDoc, ViewName, Args0),
- ArgQueries = lists:map(fun({Query}) ->
- QueryArg = parse_params(Query, undefined, Args1),
- couch_mrview_util:validate_args(Db, DDoc, QueryArg)
- end, Queries),
- {ok, Resp2} = couch_httpd:etag_maybe(Req, fun() ->
- Max = chttpd:chunked_response_buffer_size(),
- VAcc0 = #vacc{db=Db, req=Req, prepend="\r\n", threshold=Max},
- %% TODO: proper calculation of etag
- Etag = [$", couch_uuids:new(), $"],
- Headers = [{"ETag", Etag}],
- FirstChunk = "{\"results\":[",
- {ok, Resp0} = chttpd:start_delayed_json_response(VAcc0#vacc.req, 200, Headers, FirstChunk),
- VAcc1 = VAcc0#vacc{resp=Resp0},
- VAcc2 = lists:foldl(fun(Args, Acc0) ->
- {ok, Acc1} = couch_mrview:query_view(Db, DDoc, ViewName, Args, fun view_cb/2, Acc0),
- Acc1
- end, VAcc1, ArgQueries),
- {ok, Resp1} = chttpd:send_delayed_chunk(VAcc2#vacc.resp, "\r\n]}"),
- {ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
- {ok, VAcc2#vacc{resp=Resp2}}
- end),
- case is_record(Resp2, vacc) of
- true -> {ok, Resp2#vacc.resp};
- _ -> {ok, Resp2}
- end.
-
-filtered_view_cb({row, Row0}, Acc) ->
- Row1 = lists:map(fun({doc, null}) ->
- {doc, null};
- ({doc, Body}) ->
- Doc = couch_users_db:strip_non_public_fields(#doc{body=Body}),
- {doc, Doc#doc.body};
- (KV) ->
- KV
- end, Row0),
- view_cb({row, Row1}, Acc);
-filtered_view_cb(Obj, Acc) ->
- view_cb(Obj, Acc).
-
-
-%% these clauses start (and possibly end) the response
-view_cb({error, Reason}, #vacc{resp=undefined}=Acc) ->
- {ok, Resp} = chttpd:send_error(Acc#vacc.req, Reason),
- {ok, Acc#vacc{resp=Resp}};
-
-view_cb(complete, #vacc{resp=undefined}=Acc) ->
- % Nothing in view
- {ok, Resp} = chttpd:send_json(Acc#vacc.req, 200, {[{rows, []}]}),
- {ok, Acc#vacc{resp=Resp}};
-
-view_cb(Msg, #vacc{resp=undefined}=Acc) ->
- %% Start response
- Headers = [],
- {ok, Resp} = chttpd:start_delayed_json_response(Acc#vacc.req, 200, Headers),
- view_cb(Msg, Acc#vacc{resp=Resp, should_close=true});
-
-%% ---------------------------------------------------
-
-%% From here on down, the response has been started.
-
-view_cb({error, Reason}, #vacc{resp=Resp}=Acc) ->
- {ok, Resp1} = chttpd:send_delayed_error(Resp, Reason),
- {ok, Acc#vacc{resp=Resp1}};
-
-view_cb(complete, #vacc{resp=Resp, buffer=Buf, threshold=Max}=Acc) ->
- % Finish view output and possibly end the response
- {ok, Resp1} = chttpd:close_delayed_json_object(Resp, Buf, "\r\n]}", Max),
- case Acc#vacc.should_close of
- true ->
- {ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
- {ok, Acc#vacc{resp=Resp2}};
- _ ->
- {ok, Acc#vacc{resp=Resp1, meta_sent=false, row_sent=false,
- prepend=",\r\n", buffer=[], bufsize=0}}
- end;
-
-view_cb({meta, Meta}, #vacc{meta_sent=false, row_sent=false}=Acc) ->
- % Sending metadata as we've not sent it or any row yet
- Parts = case couch_util:get_value(total, Meta) of
- undefined -> [];
- Total -> [io_lib:format("\"total_rows\":~p", [Total])]
- end ++ case couch_util:get_value(offset, Meta) of
- undefined -> [];
- Offset -> [io_lib:format("\"offset\":~p", [Offset])]
- end ++ case couch_util:get_value(update_seq, Meta) of
- undefined -> [];
- null ->
- ["\"update_seq\":null"];
- UpdateSeq when is_integer(UpdateSeq) ->
- [io_lib:format("\"update_seq\":~B", [UpdateSeq])];
- UpdateSeq when is_binary(UpdateSeq) ->
- [io_lib:format("\"update_seq\":\"~s\"", [UpdateSeq])]
- end ++ ["\"rows\":["],
- Chunk = [prepend_val(Acc), "{", string:join(Parts, ","), "\r\n"],
- {ok, AccOut} = maybe_flush_response(Acc, Chunk, iolist_size(Chunk)),
- {ok, AccOut#vacc{prepend="", meta_sent=true}};
-
-view_cb({meta, _Meta}, #vacc{}=Acc) ->
- %% ignore metadata
- {ok, Acc};
-
-view_cb({row, Row}, #vacc{meta_sent=false}=Acc) ->
- %% sorted=false and row arrived before meta
- % Adding another row
- Chunk = [prepend_val(Acc), "{\"rows\":[\r\n", row_to_json(Row)],
- maybe_flush_response(Acc#vacc{meta_sent=true, row_sent=true}, Chunk, iolist_size(Chunk));
-
-view_cb({row, Row}, #vacc{meta_sent=true}=Acc) ->
- % Adding another row
- Chunk = [prepend_val(Acc), row_to_json(Row)],
- maybe_flush_response(Acc#vacc{row_sent=true}, Chunk, iolist_size(Chunk)).
-
-
-maybe_flush_response(#vacc{bufsize=Size, threshold=Max} = Acc, Data, Len)
- when Size > 0 andalso (Size + Len) > Max ->
- #vacc{buffer = Buffer, resp = Resp} = Acc,
- {ok, R1} = chttpd:send_delayed_chunk(Resp, Buffer),
- {ok, Acc#vacc{prepend = ",\r\n", buffer = Data, bufsize = Len, resp = R1}};
-maybe_flush_response(Acc0, Data, Len) ->
- #vacc{buffer = Buf, bufsize = Size} = Acc0,
- Acc = Acc0#vacc{
- prepend = ",\r\n",
- buffer = [Buf | Data],
- bufsize = Size + Len
- },
- {ok, Acc}.
-
-prepend_val(#vacc{prepend=Prepend}) ->
- case Prepend of
- undefined ->
- "";
- _ ->
- Prepend
- end.
-
-
-row_to_json(Row) ->
- ?JSON_ENCODE(row_to_obj(Row)).
-
-
-row_to_json(Kind, Row) ->
- ?JSON_ENCODE(row_to_obj(Kind, Row)).
-
-
-row_to_obj(Row) ->
- Id = couch_util:get_value(id, Row),
- row_to_obj(Id, Row).
-
-
-row_to_obj(error, Row) ->
- % Special case for _all_docs request with KEYS to
- % match prior behavior.
- Key = couch_util:get_value(key, Row),
- Val = couch_util:get_value(value, Row),
- Reason = couch_util:get_value(reason, Row),
- ReasonProp = if Reason == undefined -> []; true ->
- [{reason, Reason}]
- end,
- {[{key, Key}, {error, Val}] ++ ReasonProp};
-row_to_obj(Id0, Row) ->
- Id = case Id0 of
- undefined -> [];
- Id0 -> [{id, Id0}]
- end,
- Key = couch_util:get_value(key, Row, null),
- Val = couch_util:get_value(value, Row),
- Doc = case couch_util:get_value(doc, Row) of
- undefined -> [];
- Doc0 -> [{doc, Doc0}]
- end,
- {Id ++ [{key, Key}, {value, Val}] ++ Doc}.
-
-
-parse_params(#httpd{}=Req, Keys) ->
- parse_params(chttpd:qs(Req), Keys);
-parse_params(Props, Keys) ->
- Args = #mrargs{},
- parse_params(Props, Keys, Args).
-
-
-parse_params(Props, Keys, Args) ->
- parse_params(Props, Keys, Args, []).
-
-parse_params(Props, Keys, #mrargs{}=Args0, Options) ->
- IsDecoded = lists:member(decoded, Options),
- Args1 = case lists:member(keep_group_level, Options) of
- true ->
- Args0;
- _ ->
- % group_level set to undefined to detect if explicitly set by user
- Args0#mrargs{keys=Keys, group=undefined, group_level=undefined}
- end,
- lists:foldl(fun({K, V}, Acc) ->
- parse_param(K, V, Acc, IsDecoded)
- end, Args1, Props).
-
-
-parse_body_and_query(#httpd{method='POST'} = Req, Keys) ->
- Props = chttpd:json_body_obj(Req),
- parse_body_and_query(Req, Props, Keys);
-
-parse_body_and_query(Req, Keys) ->
- parse_params(chttpd:qs(Req), Keys, #mrargs{keys=Keys, group=undefined,
- group_level=undefined}, [keep_group_level]).
-
-parse_body_and_query(Req, {Props}, Keys) ->
- Args = #mrargs{keys=Keys, group=undefined, group_level=undefined},
- BodyArgs = parse_params(Props, Keys, Args, [decoded]),
- parse_params(chttpd:qs(Req), Keys, BodyArgs, [keep_group_level]).
-
-parse_param(Key, Val, Args, IsDecoded) when is_binary(Key) ->
- parse_param(binary_to_list(Key), Val, Args, IsDecoded);
-parse_param(Key, Val, Args, IsDecoded) ->
- case Key of
- "" ->
- Args;
- "reduce" ->
- Args#mrargs{reduce=parse_boolean(Val)};
- "key" when IsDecoded ->
- Args#mrargs{start_key=Val, end_key=Val};
- "key" ->
- JsonKey = ?JSON_DECODE(Val),
- Args#mrargs{start_key=JsonKey, end_key=JsonKey};
- "keys" when IsDecoded ->
- Args#mrargs{keys=Val};
- "keys" ->
- Args#mrargs{keys=?JSON_DECODE(Val)};
- "startkey" when IsDecoded ->
- Args#mrargs{start_key=Val};
- "start_key" when IsDecoded ->
- Args#mrargs{start_key=Val};
- "startkey" ->
- Args#mrargs{start_key=?JSON_DECODE(Val)};
- "start_key" ->
- Args#mrargs{start_key=?JSON_DECODE(Val)};
- "startkey_docid" ->
- Args#mrargs{start_key_docid=couch_util:to_binary(Val)};
- "start_key_doc_id" ->
- Args#mrargs{start_key_docid=couch_util:to_binary(Val)};
- "endkey" when IsDecoded ->
- Args#mrargs{end_key=Val};
- "end_key" when IsDecoded ->
- Args#mrargs{end_key=Val};
- "endkey" ->
- Args#mrargs{end_key=?JSON_DECODE(Val)};
- "end_key" ->
- Args#mrargs{end_key=?JSON_DECODE(Val)};
- "endkey_docid" ->
- Args#mrargs{end_key_docid=couch_util:to_binary(Val)};
- "end_key_doc_id" ->
- Args#mrargs{end_key_docid=couch_util:to_binary(Val)};
- "limit" ->
- Args#mrargs{limit=parse_pos_int(Val)};
- "page_size" ->
- Args#mrargs{page_size=parse_pos_int(Val)};
- "stale" when Val == "ok" orelse Val == <<"ok">> ->
- Args#mrargs{stable=true, update=false};
- "stale" when Val == "update_after" orelse Val == <<"update_after">> ->
- Args#mrargs{stable=true, update=lazy};
- "stale" ->
- throw({query_parse_error, <<"Invalid value for `stale`.">>});
- "stable" when Val == "true" orelse Val == <<"true">> ->
- Args#mrargs{stable=true};
- "stable" when Val == "false" orelse Val == <<"false">> ->
- Args#mrargs{stable=false};
- "stable" ->
- throw({query_parse_error, <<"Invalid value for `stable`.">>});
- "update" when Val == "true" orelse Val == <<"true">> ->
- Args#mrargs{update=true};
- "update" when Val == "false" orelse Val == <<"false">> ->
- Args#mrargs{update=false};
- "update" when Val == "lazy" orelse Val == <<"lazy">> ->
- Args#mrargs{update=lazy};
- "update" ->
- throw({query_parse_error, <<"Invalid value for `update`.">>});
- "descending" ->
- case parse_boolean(Val) of
- true -> Args#mrargs{direction=rev};
- _ -> Args#mrargs{direction=fwd}
- end;
- "skip" ->
- Args#mrargs{skip=parse_pos_int(Val)};
- "group" ->
- Args#mrargs{group=parse_boolean(Val)};
- "group_level" ->
- Args#mrargs{group_level=parse_pos_int(Val)};
- "inclusive_end" ->
- Args#mrargs{inclusive_end=parse_boolean(Val)};
- "include_docs" ->
- Args#mrargs{include_docs=parse_boolean(Val)};
- "attachments" ->
- case parse_boolean(Val) of
- true ->
- Opts = Args#mrargs.doc_options,
- Args#mrargs{doc_options=[attachments|Opts]};
- false ->
- Args
- end;
- "att_encoding_info" ->
- case parse_boolean(Val) of
- true ->
- Opts = Args#mrargs.doc_options,
- Args#mrargs{doc_options=[att_encoding_info|Opts]};
- false ->
- Args
- end;
- "update_seq" ->
- Args#mrargs{update_seq=parse_boolean(Val)};
- "conflicts" ->
- Args#mrargs{conflicts=parse_boolean(Val)};
- "callback" ->
- Args#mrargs{callback=couch_util:to_binary(Val)};
- "sorted" ->
- Args#mrargs{sorted=parse_boolean(Val)};
- "partition" ->
- Partition = couch_util:to_binary(Val),
- couch_partition:validate_partition(Partition),
- couch_mrview_util:set_extra(Args, partition, Partition);
- _ ->
- BKey = couch_util:to_binary(Key),
- BVal = couch_util:to_binary(Val),
- Args#mrargs{extra=[{BKey, BVal} | Args#mrargs.extra]}
- end.
-
-
-parse_boolean(true) ->
- true;
-parse_boolean(false) ->
- false;
-
-parse_boolean(Val) when is_binary(Val) ->
- parse_boolean(?b2l(Val));
-
-parse_boolean(Val) ->
- case string:to_lower(Val) of
- "true" -> true;
- "false" -> false;
- _ ->
- Msg = io_lib:format("Invalid boolean parameter: ~p", [Val]),
- throw({query_parse_error, ?l2b(Msg)})
- end.
-
-parse_int(Val) when is_integer(Val) ->
- Val;
-parse_int(Val) ->
- case (catch list_to_integer(Val)) of
- IntVal when is_integer(IntVal) ->
- IntVal;
- _ ->
- Msg = io_lib:format("Invalid value for integer: ~p", [Val]),
- throw({query_parse_error, ?l2b(Msg)})
- end.
-
-parse_pos_int(Val) ->
- case parse_int(Val) of
- IntVal when IntVal >= 0 ->
- IntVal;
- _ ->
- Fmt = "Invalid value for positive integer: ~p",
- Msg = io_lib:format(Fmt, [Val]),
- throw({query_parse_error, ?l2b(Msg)})
- end.
-
-
-check_view_etag(Sig, Acc0, Req) ->
- ETag = chttpd:make_etag(Sig),
- case chttpd:etag_match(Req, ETag) of
- true -> throw({etag_match, ETag});
- false -> {ok, Acc0#vacc{etag=ETag}}
- end.
diff --git a/src/couch_mrview/src/couch_mrview_index.erl b/src/couch_mrview/src/couch_mrview_index.erl
deleted file mode 100644
index 8e844e80c..000000000
--- a/src/couch_mrview/src/couch_mrview_index.erl
+++ /dev/null
@@ -1,329 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_index).
-
-
--export([get/2]).
--export([init/2, open/2, close/1, reset/1, delete/1, shutdown/1]).
--export([start_update/4, purge/4, process_doc/3, finish_update/1, commit/1]).
--export([compact/3, swap_compacted/2, remove_compacted/1]).
--export([index_file_exists/1]).
--export([update_local_purge_doc/2, verify_index_exists/2]).
--export([ensure_local_purge_docs/2]).
--export([format_status/2]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-
-get(db_name, #mrst{db_name = DbName}) ->
- DbName;
-get(idx_name, #mrst{idx_name = IdxName}) ->
- IdxName;
-get(signature, #mrst{sig = Signature}) ->
- Signature;
-get(update_seq, #mrst{update_seq = UpdateSeq}) ->
- UpdateSeq;
-get(purge_seq, #mrst{purge_seq = PurgeSeq}) ->
- PurgeSeq;
-get(update_options, #mrst{design_opts = Opts}) ->
- IncDesign = couch_util:get_value(<<"include_design">>, Opts, false),
- LocalSeq = couch_util:get_value(<<"local_seq">>, Opts, false),
- Partitioned = couch_util:get_value(<<"partitioned">>, Opts, false),
- if IncDesign -> [include_design]; true -> [] end
- ++ if LocalSeq -> [local_seq]; true -> [] end
- ++ if Partitioned -> [partitioned]; true -> [] end;
-get(fd, #mrst{fd = Fd}) ->
- Fd;
-get(language, #mrst{language = Language}) ->
- Language;
-get(views, #mrst{views = Views}) ->
- Views;
-get(info, State) ->
- #mrst{
- fd = Fd,
- sig = Sig,
- id_btree = IdBtree,
- language = Lang,
- update_seq = UpdateSeq,
- purge_seq = PurgeSeq,
- views = Views
- } = State,
- {ok, FileSize} = couch_file:bytes(Fd),
- {ok, ExternalSize} = couch_mrview_util:calculate_external_size(Views),
- {ok, ActiveViewSize} = couch_mrview_util:calculate_active_size(Views),
- ActiveSize = couch_btree:size(IdBtree) + ActiveViewSize,
-
- UpdateOptions0 = get(update_options, State),
- UpdateOptions = [atom_to_binary(O, latin1) || O <- UpdateOptions0],
-
- {ok, [
- {signature, list_to_binary(couch_index_util:hexsig(Sig))},
- {language, Lang},
- {sizes, {[
- {file, FileSize},
- {active, ActiveSize},
- {external, ExternalSize}
- ]}},
- {update_seq, UpdateSeq},
- {purge_seq, PurgeSeq},
- {update_options, UpdateOptions}
- ]};
-get(Other, _) ->
- throw({unknown_index_property, Other}).
-
-
-init(Db, DDoc) ->
- {ok, State} = couch_mrview_util:ddoc_to_mrst(couch_db:name(Db), DDoc),
- {ok, set_partitioned(Db, State)}.
-
-
-open(Db, State0) ->
- #mrst{
- db_name=DbName,
- sig=Sig
- } = State = set_partitioned(Db, State0),
- IndexFName = couch_mrview_util:index_file(DbName, Sig),
-
- % If we are upgrading from <= 2.x, we upgrade the view
- % index file on the fly, avoiding an index reset.
- % We are making commit with a new state
- % right after the upgrade to ensure
- % that we have a proper sig in the header
- % when open the view next time
- %
- % OldSig is `ok` if no upgrade happened.
- %
- % To remove support for 2.x auto-upgrades in the
- % future, just remove the next line and the code
- % between "upgrade code for <= 2.x" and
- % "end of upgrade code for <= 2.x" and the corresponding
- % code in couch_mrview_util
-
- OldSig = couch_mrview_util:maybe_update_index_file(State),
-
- case couch_mrview_util:open_file(IndexFName) of
- {ok, Fd} ->
- case couch_file:read_header(Fd) of
- % upgrade code for <= 2.x
- {ok, {OldSig, Header}} ->
- % Matching view signatures.
- NewSt = couch_mrview_util:init_state(Db, Fd, State, Header),
- ok = commit(NewSt),
- ensure_local_purge_doc(Db, NewSt),
- {ok, NewSt};
- % end of upgrade code for <= 2.x
- {ok, {Sig, Header}} ->
- % Matching view signatures.
- NewSt = couch_mrview_util:init_state(Db, Fd, State, Header),
- ensure_local_purge_doc(Db, NewSt),
- {ok, NewSt};
- {ok, {WrongSig, _}} ->
- couch_log:error("~s has the wrong signature: expected: ~p but got ~p",
- [IndexFName, Sig, WrongSig]),
- NewSt = couch_mrview_util:reset_index(Db, Fd, State),
- ensure_local_purge_doc(Db, NewSt),
- {ok, NewSt};
- {ok, Else} ->
- couch_log:error("~s has a bad header: got ~p",
- [IndexFName, Else]),
- NewSt = couch_mrview_util:reset_index(Db, Fd, State),
- ensure_local_purge_doc(Db, NewSt),
- {ok, NewSt};
- no_valid_header ->
- NewSt = couch_mrview_util:reset_index(Db, Fd, State),
- ensure_local_purge_doc(Db, NewSt),
- {ok, NewSt}
- end;
- {error, Reason} = Error ->
- couch_log:error("Failed to open view file '~s': ~s",
- [IndexFName, file:format_error(Reason)]),
- Error
- end.
-
-
-close(State) ->
- erlang:demonitor(State#mrst.fd_monitor, [flush]),
- couch_file:close(State#mrst.fd).
-
-
-% This called after ddoc_updated event occurrs, and
-% before we shutdown couch_index process.
-% We unlink couch_index from corresponding couch_file and demonitor it.
-% This allows all outstanding queries that are currently streaming
-% data from couch_file finish successfully.
-% couch_file will be closed automatically after all
-% outstanding queries are done.
-shutdown(State) ->
- erlang:demonitor(State#mrst.fd_monitor, [flush]),
- unlink(State#mrst.fd).
-
-
-delete(#mrst{db_name=DbName, sig=Sig}=State) ->
- couch_file:close(State#mrst.fd),
- catch couch_mrview_util:delete_files(DbName, Sig).
-
-
-reset(State) ->
- couch_util:with_db(State#mrst.db_name, fun(Db) ->
- NewState = couch_mrview_util:reset_index(Db, State#mrst.fd, State),
- {ok, NewState}
- end).
-
-
-start_update(PartialDest, State, NumChanges, NumChangesDone) ->
- couch_mrview_updater:start_update(
- PartialDest,
- State,
- NumChanges,
- NumChangesDone
- ).
-
-
-purge(Db, PurgeSeq, PurgedIdRevs, State) ->
- couch_mrview_updater:purge(Db, PurgeSeq, PurgedIdRevs, State).
-
-
-process_doc(Doc, Seq, State) ->
- couch_mrview_updater:process_doc(Doc, Seq, State).
-
-
-finish_update(State) ->
- couch_mrview_updater:finish_update(State).
-
-
-commit(State) ->
- Header = {State#mrst.sig, couch_mrview_util:make_header(State)},
- couch_file:write_header(State#mrst.fd, Header).
-
-
-compact(Db, State, Opts) ->
- couch_mrview_compactor:compact(Db, State, Opts).
-
-
-swap_compacted(OldState, NewState) ->
- couch_mrview_compactor:swap_compacted(OldState, NewState).
-
-
-remove_compacted(State) ->
- couch_mrview_compactor:remove_compacted(State).
-
-
-index_file_exists(State) ->
- #mrst{
- db_name=DbName,
- sig=Sig
- } = State,
- IndexFName = couch_mrview_util:index_file(DbName, Sig),
- filelib:is_file(IndexFName).
-
-
-verify_index_exists(DbName, Props) ->
- try
- Type = couch_util:get_value(<<"type">>, Props),
- if Type =/= <<"mrview">> -> false; true ->
- DDocId = couch_util:get_value(<<"ddoc_id">>, Props),
- couch_util:with_db(DbName, fun(Db) ->
- case couch_db:get_design_doc(Db, DDocId) of
- {ok, #doc{} = DDoc} ->
- {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(
- DbName, DDoc),
- IdxSig = IdxState#mrst.sig,
- SigInLocal = couch_util:get_value(
- <<"signature">>, Props),
- couch_index_util:hexsig(IdxSig) == SigInLocal;
- {not_found, _} ->
- false
- end
- end)
- end
- catch _:_ ->
- false
- end.
-
-
-set_partitioned(Db, State) ->
- #mrst{
- design_opts = DesignOpts
- } = State,
- DbPartitioned = couch_db:is_partitioned(Db),
- ViewPartitioned = couch_util:get_value(
- <<"partitioned">>, DesignOpts, DbPartitioned),
- IsPartitioned = DbPartitioned andalso ViewPartitioned,
- State#mrst{partitioned = IsPartitioned}.
-
-
-ensure_local_purge_docs(DbName, DDocs) ->
- couch_util:with_db(DbName, fun(Db) ->
- lists:foreach(fun(DDoc) ->
- try couch_mrview_util:ddoc_to_mrst(DbName, DDoc) of
- {ok, MRSt} ->
- ensure_local_purge_doc(Db, MRSt)
- catch _:_ ->
- ok
- end
- end, DDocs)
- end).
-
-
-ensure_local_purge_doc(Db, #mrst{}=State) ->
- Sig = couch_index_util:hexsig(get(signature, State)),
- DocId = couch_mrview_util:get_local_purge_doc_id(Sig),
- case couch_db:open_doc(Db, DocId, []) of
- {not_found, _} ->
- create_local_purge_doc(Db, State);
- {ok, _} ->
- ok
- end.
-
-
-create_local_purge_doc(Db, State) ->
- PurgeSeq = couch_db:get_purge_seq(Db),
- update_local_purge_doc(Db, State, PurgeSeq).
-
-
-update_local_purge_doc(Db, State) ->
- update_local_purge_doc(Db, State, get(purge_seq, State)).
-
-
-update_local_purge_doc(Db, State, PSeq) ->
- Sig = couch_index_util:hexsig(State#mrst.sig),
- DocId = couch_mrview_util:get_local_purge_doc_id(Sig),
- {Mega, Secs, _} = os:timestamp(),
- NowSecs = Mega * 1000000 + Secs,
- BaseDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DocId},
- {<<"type">>, <<"mrview">>},
- {<<"purge_seq">>, PSeq},
- {<<"updated_on">>, NowSecs},
- {<<"ddoc_id">>, get(idx_name, State)},
- {<<"signature">>, Sig}
- ]}),
- Doc = case couch_db:open_doc(Db, DocId, []) of
- {ok, #doc{revs = Revs}} ->
- BaseDoc#doc{revs = Revs};
- {not_found, _} ->
- BaseDoc
- end,
- couch_db:update_doc(Db, Doc, []).
-
-format_status(_Opt, [_PDict, State]) ->
- Scrubbed = State#mrst{
- lib = nil,
- views = nil,
- id_btree = nil,
- doc_acc = nil,
- doc_queue = nil,
- write_queue = nil
- },
- ?record_to_keyval(mrst, Scrubbed).
diff --git a/src/couch_mrview/src/couch_mrview_show.erl b/src/couch_mrview/src/couch_mrview_show.erl
deleted file mode 100644
index 9056907fa..000000000
--- a/src/couch_mrview/src/couch_mrview_show.erl
+++ /dev/null
@@ -1,468 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_show).
-
--export([
- handle_doc_show_req/3,
- handle_doc_update_req/3,
- handle_view_list_req/3,
- list_cb/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-% /db/_design/foo/_show/bar/docid
-% show converts a json doc to a response of any content-type.
-% it looks up the doc an then passes it to the query server.
-% then it sends the response from the query server to the http client.
-
-maybe_open_doc(Db, DocId) ->
- case catch couch_httpd_db:couch_doc_open(Db, DocId, nil, [conflicts]) of
- #doc{} = Doc -> Doc;
- {not_found, _} -> nil
- end.
-
-handle_doc_show_req(#httpd{
- path_parts=[_, _, _, _, ShowName, DocId]
- }=Req, Db, DDoc) ->
-
- % open the doc
- Doc = maybe_open_doc(Db, DocId),
-
- % we don't handle revs here b/c they are an internal api
- % returns 404 if there is no doc with DocId
- handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId);
-
-handle_doc_show_req(#httpd{
- path_parts=[_, _, _, _, ShowName, DocId|Rest]
- }=Req, Db, DDoc) ->
-
- DocParts = [DocId|Rest],
- DocId1 = ?l2b(string:join([?b2l(P)|| P <- DocParts], "/")),
-
- % open the doc
- Doc = maybe_open_doc(Db, DocId1),
-
- % we don't handle revs here b/c they are an internal api
- % pass 404 docs to the show function
- handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId1);
-
-handle_doc_show_req(#httpd{
- path_parts=[_, _, _, _, ShowName]
- }=Req, Db, DDoc) ->
- % with no docid the doc is nil
- handle_doc_show(Req, Db, DDoc, ShowName, nil);
-
-handle_doc_show_req(Req, _Db, _DDoc) ->
- chttpd:send_error(Req, 404, <<"show_error">>, <<"Invalid path.">>).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc) ->
- handle_doc_show(Req, Db, DDoc, ShowName, Doc, null).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId) ->
- % get responder for ddoc/showname
- CurrentEtag = show_etag(Req, Doc, DDoc, []),
- chttpd:etag_respond(Req, CurrentEtag, fun() ->
- JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
- JsonDoc = couch_query_servers:json_doc(Doc),
- [<<"resp">>, ExternalResp] =
- couch_query_servers:ddoc_prompt(DDoc, [<<"shows">>, ShowName],
- [JsonDoc, JsonReq]),
- JsonResp = apply_etag(ExternalResp, CurrentEtag),
- chttpd_external:send_external_response(Req, JsonResp)
- end).
-
-
-show_etag(#httpd{user_ctx=UserCtx}=Req, Doc, DDoc, More) ->
- Accept = chttpd:header_value(Req, "Accept"),
- DocPart = case Doc of
- nil -> nil;
- Doc -> chttpd:doc_etag(Doc)
- end,
- chttpd:make_etag({chttpd:doc_etag(DDoc), DocPart, Accept,
- {UserCtx#user_ctx.name, UserCtx#user_ctx.roles}, More}).
-
-% updates a doc based on a request
-% handle_doc_update_req(#httpd{method = 'GET'}=Req, _Db, _DDoc) ->
-% % anything but GET
-% send_method_not_allowed(Req, "POST,PUT,DELETE,ETC");
-
-% This call is creating a new doc using an _update function to
-% modify the provided request body.
-% /db/_design/foo/_update/bar
-handle_doc_update_req(#httpd{
- path_parts=[_, _, _, _, UpdateName]
- }=Req, Db, DDoc) ->
- send_doc_update_response(Req, Db, DDoc, UpdateName, nil, null);
-
-% /db/_design/foo/_update/bar/docid
-handle_doc_update_req(#httpd{
- path_parts=[_, _, _, _, UpdateName | DocIdParts]
- }=Req, Db, DDoc) ->
- DocId = ?l2b(string:join([?b2l(P) || P <- DocIdParts], "/")),
- Doc = maybe_open_doc(Db, DocId),
- send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId);
-
-
-handle_doc_update_req(Req, _Db, _DDoc) ->
- chttpd:send_error(Req, 404, <<"update_error">>, <<"Invalid path.">>).
-
-send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
- JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
- JsonDoc = couch_query_servers:json_doc(Doc),
- Cmd = [<<"updates">>, UpdateName],
- UpdateResp = couch_query_servers:ddoc_prompt(DDoc, Cmd, [JsonDoc, JsonReq]),
- JsonResp = case UpdateResp of
- [<<"up">>, {NewJsonDoc}, {JsonResp0}] ->
- case chttpd:header_value(
- Req, "X-Couch-Full-Commit", "false") of
- "true" ->
- Options = [full_commit, {user_ctx, Req#httpd.user_ctx}];
- _ ->
- Options = [{user_ctx, Req#httpd.user_ctx}]
- end,
- NewDoc = couch_db:doc_from_json_obj_validate(Db, {NewJsonDoc}),
- {ok, NewRev} = couch_db:update_doc(Db, NewDoc, Options),
- NewRevStr = couch_doc:rev_to_str(NewRev),
- {JsonResp1} = apply_headers(JsonResp0, [
- {<<"X-Couch-Update-NewRev">>, NewRevStr},
- {<<"X-Couch-Id">>, couch_util:url_encode(NewDoc#doc.id)}
- ]),
- {[{<<"code">>, 201} | JsonResp1]};
- [<<"up">>, _Other, {JsonResp0}] ->
- {[{<<"code">>, 200} | JsonResp0]}
- end,
- % todo set location field
- chttpd_external:send_external_response(Req, JsonResp).
-
-
-handle_view_list_req(#httpd{method=Method}=Req, Db, DDoc)
- when Method =:= 'GET' orelse Method =:= 'OPTIONS' ->
- case Req#httpd.path_parts of
- [_, _, _DName, _, LName, VName] ->
- % Same design doc for view and list
- handle_view_list(Req, Db, DDoc, LName, DDoc, VName, undefined);
- [_, _, _, _, LName, DName, VName] ->
- % Different design docs for view and list
- VDocId = <<"_design/", DName/binary>>,
- {ok, VDDoc} = couch_db:open_doc(Db, VDocId, [ejson_body]),
- handle_view_list(Req, Db, DDoc, LName, VDDoc, VName, undefined);
- _ ->
- chttpd:send_error(Req, 404, <<"list_error">>, <<"Bad path.">>)
- end;
-handle_view_list_req(#httpd{method='POST'}=Req, Db, DDoc) ->
- chttpd:validate_ctype(Req, "application/json"),
- {Props} = chttpd:json_body_obj(Req),
- Keys = proplists:get_value(<<"keys">>, Props),
- case Req#httpd.path_parts of
- [_, _, _DName, _, LName, VName] ->
- handle_view_list(Req, Db, DDoc, LName, DDoc, VName, Keys);
- [_, _, _, _, LName, DName, VName] ->
- % Different design docs for view and list
- VDocId = <<"_design/", DName/binary>>,
- {ok, VDDoc} = couch_db:open_doc(Db, VDocId, [ejson_body]),
- handle_view_list(Req, Db, DDoc, LName, VDDoc, VName, Keys);
- _ ->
- chttpd:send_error(Req, 404, <<"list_error">>, <<"Bad path.">>)
- end;
-handle_view_list_req(Req, _Db, _DDoc) ->
- chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-
-handle_view_list(Req, Db, DDoc, LName, VDDoc, VName, Keys) ->
- Args0 = couch_mrview_http:parse_body_and_query(Req, Keys),
- ETagFun = fun(BaseSig, Acc0) ->
- UserCtx = Req#httpd.user_ctx,
- Name = UserCtx#user_ctx.name,
- Roles = UserCtx#user_ctx.roles,
- Accept = chttpd:header_value(Req, "Accept"),
- Parts = {chttpd:doc_etag(DDoc), Accept, {Name, Roles}},
- ETag = chttpd:make_etag({BaseSig, Parts}),
- case chttpd:etag_match(Req, ETag) of
- true -> throw({etag_match, ETag});
- false -> {ok, Acc0#lacc{etag=ETag}}
- end
- end,
- Args = Args0#mrargs{preflight_fun=ETagFun},
- couch_httpd:etag_maybe(Req, fun() ->
- couch_query_servers:with_ddoc_proc(DDoc, fun(QServer) ->
- Acc = #lacc{db=Db, req=Req, qserver=QServer, lname=LName},
- case VName of
- <<"_all_docs">> ->
- couch_mrview:query_all_docs(Db, Args, fun list_cb/2, Acc);
- _ ->
- couch_mrview:query_view(Db, VDDoc, VName, Args, fun list_cb/2, Acc)
- end
- end)
- end).
-
-
-list_cb({meta, Meta}, #lacc{code=undefined} = Acc) ->
- MetaProps = case couch_util:get_value(total, Meta) of
- undefined -> [];
- Total -> [{total_rows, Total}]
- end ++ case couch_util:get_value(offset, Meta) of
- undefined -> [];
- Offset -> [{offset, Offset}]
- end ++ case couch_util:get_value(update_seq, Meta) of
- undefined -> [];
- UpdateSeq -> [{update_seq, UpdateSeq}]
- end,
- start_list_resp({MetaProps}, Acc);
-list_cb({row, Row}, #lacc{code=undefined} = Acc) ->
- {ok, NewAcc} = start_list_resp({[]}, Acc),
- send_list_row(Row, NewAcc);
-list_cb({row, Row}, Acc) ->
- send_list_row(Row, Acc);
-list_cb(complete, Acc) ->
- #lacc{qserver = {Proc, _}, req = Req, resp = Resp0} = Acc,
- if Resp0 =:= nil ->
- {ok, #lacc{resp = Resp}} = start_list_resp({[]}, Acc);
- true ->
- Resp = Resp0
- end,
- case couch_query_servers:proc_prompt(Proc, [<<"list_end">>]) of
- [<<"end">>, Data, Headers] ->
- Acc2 = fixup_headers(Headers, Acc#lacc{resp=Resp}),
- #lacc{resp = Resp2} = send_non_empty_chunk(Acc2, Data);
- [<<"end">>, Data] ->
- #lacc{resp = Resp2} = send_non_empty_chunk(Acc#lacc{resp=Resp}, Data)
- end,
- last_chunk(Req, Resp2),
- {ok, Resp2}.
-
-start_list_resp(Head, Acc) ->
- #lacc{db=Db, req=Req, qserver=QServer, lname=LName} = Acc,
- JsonReq = json_req_obj(Req, Db),
-
- [<<"start">>,Chunk,JsonResp] = couch_query_servers:ddoc_proc_prompt(QServer,
- [<<"lists">>, LName], [Head, JsonReq]),
- Acc2 = send_non_empty_chunk(fixup_headers(JsonResp, Acc), Chunk),
- {ok, Acc2}.
-
-fixup_headers(Headers, #lacc{etag=ETag} = Acc) ->
- Headers2 = apply_etag(Headers, ETag),
- #extern_resp_args{
- code = Code,
- ctype = CType,
- headers = ExtHeaders
- } = chttpd_external:parse_external_response(Headers2),
- Headers3 = chttpd_external:default_or_content_type(CType, ExtHeaders),
- Acc#lacc{code=Code, headers=Headers3}.
-
-send_list_row(Row, #lacc{qserver = {Proc, _}, req = Req, resp = Resp} = Acc) ->
- RowObj = case couch_util:get_value(id, Row) of
- undefined -> [];
- Id -> [{id, Id}]
- end ++ case couch_util:get_value(key, Row) of
- undefined -> [];
- Key -> [{key, Key}]
- end ++ case couch_util:get_value(value, Row) of
- undefined -> [];
- Val -> [{value, Val}]
- end ++ case couch_util:get_value(doc, Row) of
- undefined -> [];
- Doc -> [{doc, Doc}]
- end,
- try couch_query_servers:proc_prompt(Proc, [<<"list_row">>, {RowObj}]) of
- [<<"chunks">>, Chunk, Headers] ->
- Acc2 = send_non_empty_chunk(fixup_headers(Headers, Acc), Chunk),
- {ok, Acc2};
- [<<"chunks">>, Chunk] ->
- Acc2 = send_non_empty_chunk(Acc, Chunk),
- {ok, Acc2};
- [<<"end">>, Chunk, Headers] ->
- #lacc{resp = Resp2} = send_non_empty_chunk(fixup_headers(Headers, Acc), Chunk),
- {ok, Resp3} = last_chunk(Req, Resp2),
- {stop, Resp3};
- [<<"end">>, Chunk] ->
- #lacc{resp = Resp2} = send_non_empty_chunk(Acc, Chunk),
- {ok, Resp3} = last_chunk(Req, Resp2),
- {stop, Resp3}
- catch Error ->
- {ok, Resp2} = case Resp of
- undefined ->
- {Code, _, _} = chttpd:error_info(Error),
- #lacc{req=Req, headers=Headers} = Acc,
- chttpd:start_chunked_response(Req, Code, Headers);
- _ ->
- {ok, Resp}
- end,
- {ok, Resp3} = chttpd:send_chunked_error(Resp2, Error),
- {stop, Resp3}
- end.
-
-send_non_empty_chunk(Acc, []) ->
- Acc;
-send_non_empty_chunk(#lacc{resp=undefined} = Acc, Chunk) ->
- #lacc{req=Req, code=Code, headers=Headers} = Acc,
- {ok, Resp} = chttpd:start_chunked_response(Req, Code, Headers),
- send_non_empty_chunk(Acc#lacc{resp = Resp}, Chunk);
-send_non_empty_chunk(#lacc{resp=Resp} = Acc, Chunk) ->
- chttpd:send_chunk(Resp, Chunk),
- Acc.
-
-
-apply_etag(JsonResp, undefined) ->
- JsonResp;
-apply_etag({ExternalResponse}, CurrentEtag) ->
- % Here we embark on the delicate task of replacing or creating the
- % headers on the JsonResponse object. We need to control the Etag and
- % Vary headers. If the external function controls the Etag, we'd have to
- % run it to check for a match, which sort of defeats the purpose.
- apply_headers(ExternalResponse, [
- {<<"ETag">>, CurrentEtag},
- {<<"Vary">>, <<"Accept">>}
- ]).
-
-apply_headers(JsonResp, []) ->
- JsonResp;
-apply_headers(JsonResp, NewHeaders) ->
- case couch_util:get_value(<<"headers">>, JsonResp) of
- undefined ->
- {[{<<"headers">>, {NewHeaders}}| JsonResp]};
- JsonHeaders ->
- Headers = apply_headers1(JsonHeaders, NewHeaders),
- NewKV = {<<"headers">>, Headers},
- {lists:keyreplace(<<"headers">>, 1, JsonResp, NewKV)}
- end.
-apply_headers1(JsonHeaders, [{Key, Value} | Rest]) ->
- NewJsonHeaders = json_apply_field({Key, Value}, JsonHeaders),
- apply_headers1(NewJsonHeaders, Rest);
-apply_headers1(JsonHeaders, []) ->
- JsonHeaders.
-
-
-% Maybe this is in the proplists API
-% todo move to couch_util
-json_apply_field(H, {L}) ->
- json_apply_field(H, L, []).
-
-
-json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) ->
- % drop matching keys
- json_apply_field({Key, NewValue}, Headers, Acc);
-json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
- % something else is next, leave it alone.
- json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
-json_apply_field({Key, NewValue}, [], Acc) ->
- % end of list, add ours
- {[{Key, NewValue}|Acc]}.
-
-
-% This loads the db info if we have a fully loaded db record, but we might not
-% have the db locally on this node, so then load the info through fabric.
-json_req_obj(Req, Db) ->
- case couch_db:is_clustered(Db) of
- true ->
- % use a separate process because we're already in a receive loop,
- % and json_req_obj calls fabric:get_db_info()
- JRO = fun() -> exit(chttpd_external:json_req_obj(Req, Db)) end,
- {Pid, Ref} = spawn_monitor(JRO),
- receive {'DOWN', Ref, process, Pid, JsonReq} -> JsonReq end;
- false ->
- chttpd_external:json_req_obj(Req, Db)
- end.
-
-last_chunk(Req, undefined) ->
- chttpd:send_response(Req, 200, [], <<"">>);
-last_chunk(_Req, Resp) ->
- chttpd:send_chunk(Resp, []).
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-apply_headers_test_() ->
- [
- should_apply_headers(),
- should_apply_headers_with_merge(),
- should_apply_headers_with_merge_overwrite()
- ].
-
-should_apply_headers() ->
- ?_test(begin
- JsonResp = [{<<"code">>, 201}],
- Headers = [{<<"foo">>, <<"bar">>}],
- {Props} = apply_headers(JsonResp, Headers),
- JsonHeaders = couch_util:get_value(<<"headers">>, Props),
- ?assertEqual({Headers}, JsonHeaders)
- end).
-
-should_apply_headers_with_merge() ->
- ?_test(begin
- BaseHeaders = [{<<"bar">>, <<"baz">>}],
- NewHeaders = [{<<"foo">>, <<"bar">>}],
- JsonResp = [
- {<<"code">>, 201},
- {<<"headers">>, {BaseHeaders}}
- ],
- {Props} = apply_headers(JsonResp, NewHeaders),
- JsonHeaders = couch_util:get_value(<<"headers">>, Props),
- ExpectedHeaders = {NewHeaders ++ BaseHeaders},
- ?assertEqual(ExpectedHeaders, JsonHeaders)
- end).
-
-should_apply_headers_with_merge_overwrite() ->
- ?_test(begin
- BaseHeaders = [{<<"foo">>, <<"bar">>}],
- NewHeaders = [{<<"foo">>, <<"baz">>}],
- JsonResp = [
- {<<"code">>, 201},
- {<<"headers">>, {BaseHeaders}}
- ],
- {Props} = apply_headers(JsonResp, NewHeaders),
- JsonHeaders = couch_util:get_value(<<"headers">>, Props),
- ?assertEqual({NewHeaders}, JsonHeaders)
- end).
-
-
-send_list_row_test_() ->
- Cases = couch_tests_combinatorics:product([
- [
- {"[<<\"end\">>, [], []]", fun(_, _) -> [<<"end">>, [], []] end},
- {"[<<\"end\">>, []]", fun(_, _) -> [<<"end">>, []] end},
- {"throw(timeout)", fun(_, _) -> throw(timeout) end}
- ],
- [
- req,
- undefined
- ]]),
- {"Ensure send_list_row returns a valid response on end or error",
- {setup, fun setup/0, fun(_) -> meck:unload() end, [
- {
- lists:flatten(io_lib:format("~s -- ~p", [N, R])),
- should_return_valid_response(F, R)
- } || [{N, F}, R] <- Cases
- ]}
- }.
-
-setup() ->
- ok = meck:expect(chttpd, send_chunk,
- fun(Resp, _) -> {ok, Resp} end),
- ok = meck:expect(chttpd, send_chunked_error,
- fun(Resp, _) -> {ok, Resp} end),
- ok = meck:expect(chttpd, start_chunked_response,
- fun(_, _, _) -> {ok, resp} end),
- ok = meck:expect(chttpd_external, parse_external_response, 1,
- #extern_resp_args{headers = []}).
-
-should_return_valid_response(Spec, Req) ->
- ?_test(begin
- ok = meck:expect(couch_query_servers, proc_prompt, Spec),
- Acc = #lacc{qserver = {proc, undefined}, req = Req, resp = resp},
- ?assertEqual({stop, resp}, send_list_row([], Acc))
- end).
-
--endif.
diff --git a/src/couch_mrview/src/couch_mrview_test_util.erl b/src/couch_mrview/src/couch_mrview_test_util.erl
deleted file mode 100644
index 2dfa64e61..000000000
--- a/src/couch_mrview/src/couch_mrview_test_util.erl
+++ /dev/null
@@ -1,123 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_test_util).
-
--compile(export_all).
--compile(nowarn_export_all).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch/include/couch_eunit.hrl").
-
-
-init_db(Name, Type) ->
- init_db(Name, Type, 10).
-
-
-init_db(Name, Type, Count) ->
- {ok, Db} = new_db(Name, Type),
- Docs = make_docs(Type, Count),
- save_docs(Db, Docs).
-
-
-new_db(Name, Type) when Type == local; Type == design ->
- couch_server:delete(Name, [?ADMIN_CTX]),
- couch_db:create(Name, [?ADMIN_CTX]);
-new_db(Name, Type) ->
- couch_server:delete(Name, [?ADMIN_CTX]),
- {ok, Db} = couch_db:create(Name, [?ADMIN_CTX]),
- save_docs(Db, [ddoc(Type)]).
-
-delete_db(Name) ->
- couch_server:delete(Name, [?ADMIN_CTX]).
-
-save_docs(Db, Docs) ->
- {ok, _} = couch_db:update_docs(Db, Docs, []),
- couch_db:reopen(Db).
-
-
-make_docs(local, Count) ->
- [local_doc(I) || I <- lists:seq(1, Count)];
-make_docs(design, Count) ->
- lists:foldl(fun(I, Acc) ->
- [doc(I), ddoc(I) | Acc]
- end, [], lists:seq(1, Count));
-make_docs(_, Count) ->
- [doc(I) || I <- lists:seq(1, Count)].
-
-
-make_docs(_, Since, Count) ->
- [doc(I) || I <- lists:seq(Since, Count)].
-
-
-ddoc(map) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/bar">>},
- {<<"views">>, {[
- {<<"baz">>, {[
- {<<"map">>, <<"function(doc) {emit(doc.val, doc.val);}">>}
- ]}},
- {<<"bing">>, {[
- {<<"map">>, <<"function(doc) {}">>}
- ]}},
- {<<"zing">>, {[
- {<<"map">>, <<
- "function(doc) {\n"
- " if(doc.foo !== undefined)\n"
- " emit(doc.foo, 0);\n"
- "}"
- >>}
- ]}}
- ]}}
- ]});
-ddoc(red) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/red">>},
- {<<"views">>, {[
- {<<"baz">>, {[
- {<<"map">>, <<
- "function(doc) {\n"
- " emit([doc.val % 2, doc.val], doc.val);\n"
- "}\n"
- >>},
- {<<"reduce">>, <<"function(keys, vals) {return sum(vals);}">>}
- ]}},
- {<<"zing">>, {[
- {<<"map">>, <<
- "function(doc) {\n"
- " if(doc.foo !== undefined)\n"
- " emit(doc.foo, null);\n"
- "}"
- >>},
- {<<"reduce">>, <<"_count">>}
- ]}}
- ]}}
- ]});
-ddoc(Id) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, list_to_binary(io_lib:format("_design/bar~2..0b", [Id]))},
- {<<"views">>, {[]}}
- ]}).
-
-
-doc(Id) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, list_to_binary(integer_to_list(Id))},
- {<<"val">>, Id}
- ]}).
-
-
-local_doc(Id) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, list_to_binary(io_lib:format("_local/~b", [Id]))},
- {<<"val">>, Id}
- ]}).
diff --git a/src/couch_mrview/src/couch_mrview_update_notifier.erl b/src/couch_mrview/src/couch_mrview_update_notifier.erl
deleted file mode 100644
index 803d39747..000000000
--- a/src/couch_mrview/src/couch_mrview_update_notifier.erl
+++ /dev/null
@@ -1,49 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_update_notifier).
-
--behaviour(gen_event).
-
--export([start_link/1, notify/1]).
--export([init/1, terminate/2, handle_event/2, handle_call/2, handle_info/2, code_change/3, stop/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
-start_link(Exec) ->
- couch_event_sup:start_link(couch_mrview_update, {couch_mrview_update_notifier, make_ref()}, Exec).
-
-notify(Event) ->
- gen_event:notify(couch_mrview_update, Event).
-
-stop(Pid) ->
- couch_event_sup:stop(Pid).
-
-init(Fun) ->
- {ok, Fun}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_event(Event, Fun) ->
- Fun(Event),
- {ok, Fun}.
-
-handle_call(_Request, State) ->
- {ok, ok, State}.
-
-handle_info({'EXIT', Pid, Reason}, Pid) ->
- couch_log:error("View update notification process ~p died: ~p", [Pid, Reason]),
- remove_handler.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/src/couch_mrview/src/couch_mrview_updater.erl b/src/couch_mrview/src/couch_mrview_updater.erl
deleted file mode 100644
index 522367c1d..000000000
--- a/src/couch_mrview/src/couch_mrview_updater.erl
+++ /dev/null
@@ -1,373 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_updater).
-
--export([start_update/4, purge/4, process_doc/3, finish_update/1]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--define(REM_VAL, removed).
-
-start_update(Partial, State, NumChanges, NumChangesDone) ->
- MaxSize = config:get_integer("view_updater", "queue_memory_cap", 100000),
- MaxItems = config:get_integer("view_updater", "queue_item_cap", 500),
- QueueOpts = [{max_size, MaxSize}, {max_items, MaxItems}],
- {ok, DocQueue} = couch_work_queue:new(QueueOpts),
- {ok, WriteQueue} = couch_work_queue:new(QueueOpts),
- InitState = State#mrst{
- first_build=State#mrst.update_seq==0,
- partial_resp_pid=Partial,
- doc_acc=[],
- doc_queue=DocQueue,
- write_queue=WriteQueue
- },
-
- Self = self(),
-
- MapFun = fun() ->
- erlang:put(io_priority,
- {view_update, State#mrst.db_name, State#mrst.idx_name}),
- Progress = case NumChanges of
- 0 -> 0;
- _ -> (NumChangesDone * 100) div NumChanges
- end,
- couch_task_status:add_task([
- {indexer_pid, ?l2b(pid_to_list(Partial))},
- {type, indexer},
- {database, State#mrst.db_name},
- {design_document, State#mrst.idx_name},
- {progress, Progress},
- {changes_done, NumChangesDone},
- {total_changes, NumChanges}
- ]),
- couch_task_status:set_update_frequency(500),
- map_docs(Self, InitState)
- end,
- WriteFun = fun() ->
- erlang:put(io_priority,
- {view_update, State#mrst.db_name, State#mrst.idx_name}),
- write_results(Self, InitState)
- end,
- spawn_link(MapFun),
- spawn_link(WriteFun),
-
- {ok, InitState}.
-
-
-purge(_Db, PurgeSeq, PurgedIdRevs, State) ->
- #mrst{
- id_btree=IdBtree,
- views=Views,
- partitioned=Partitioned
- } = State,
-
- Ids = [Id || {Id, _Revs} <- PurgedIdRevs],
- {ok, Lookups, IdBtree2} = couch_btree:query_modify(IdBtree, Ids, [], Ids),
-
- MakeDictFun = fun
- ({ok, {DocId, ViewNumRowKeys}}, DictAcc) ->
- FoldFun = fun
- ({ViewNum, {Key, Seq, _Op}}, DictAcc2) ->
- dict:append(ViewNum, {Key, Seq, DocId}, DictAcc2);
- ({ViewNum, RowKey0}, DictAcc2) ->
- RowKey = if not Partitioned -> RowKey0; true ->
- [{RK, _}] = inject_partition([{RowKey0, DocId}]),
- RK
- end,
- dict:append(ViewNum, {RowKey, DocId}, DictAcc2)
- end,
- lists:foldl(FoldFun, DictAcc, ViewNumRowKeys);
- ({not_found, _}, DictAcc) ->
- DictAcc
- end,
- KeysToRemove = lists:foldl(MakeDictFun, dict:new(), Lookups),
-
- RemKeysFun = fun(#mrview{id_num=ViewId}=View) ->
- ToRem = couch_util:dict_find(ViewId, KeysToRemove, []),
- {ok, VBtree2} = couch_btree:add_remove(View#mrview.btree, [], ToRem),
- NewPurgeSeq = case VBtree2 =/= View#mrview.btree of
- true -> PurgeSeq;
- _ -> View#mrview.purge_seq
- end,
- View#mrview{btree=VBtree2, purge_seq=NewPurgeSeq}
- end,
-
- Views2 = lists:map(RemKeysFun, Views),
- {ok, State#mrst{
- id_btree=IdBtree2,
- views=Views2,
- purge_seq=PurgeSeq
- }}.
-
-
-process_doc(Doc, Seq, #mrst{doc_acc=Acc}=State) when length(Acc) > 100 ->
- couch_work_queue:queue(State#mrst.doc_queue, lists:reverse(Acc)),
- process_doc(Doc, Seq, State#mrst{doc_acc=[]});
-process_doc(nil, Seq, #mrst{doc_acc=Acc}=State) ->
- {ok, State#mrst{doc_acc=[{nil, Seq, nil} | Acc]}};
-process_doc(#doc{id=Id, deleted=true}, Seq, #mrst{doc_acc=Acc}=State) ->
- {ok, State#mrst{doc_acc=[{Id, Seq, deleted} | Acc]}};
-process_doc(#doc{id=Id}=Doc, Seq, #mrst{doc_acc=Acc}=State) ->
- {ok, State#mrst{doc_acc=[{Id, Seq, Doc} | Acc]}}.
-
-
-finish_update(#mrst{doc_acc=Acc}=State) ->
- if Acc /= [] ->
- couch_work_queue:queue(State#mrst.doc_queue, Acc);
- true -> ok
- end,
- couch_work_queue:close(State#mrst.doc_queue),
- receive
- {new_state, NewState} ->
- {ok, NewState#mrst{
- first_build=undefined,
- partial_resp_pid=undefined,
- doc_acc=undefined,
- doc_queue=undefined,
- write_queue=undefined,
- qserver=nil
- }}
- end.
-
-
-map_docs(Parent, #mrst{db_name = DbName, idx_name = IdxName} = State0) ->
- erlang:put(io_priority, {view_update, DbName, IdxName}),
- case couch_work_queue:dequeue(State0#mrst.doc_queue) of
- closed ->
- couch_query_servers:stop_doc_map(State0#mrst.qserver),
- couch_work_queue:close(State0#mrst.write_queue);
- {ok, Dequeued} ->
- % Run all the non deleted docs through the view engine and
- % then pass the results on to the writer process.
- State1 = case State0#mrst.qserver of
- nil -> start_query_server(State0);
- _ -> State0
- end,
- QServer = State1#mrst.qserver,
- DocFun = fun
- ({nil, Seq, _}, {SeqAcc, Results}) ->
- {erlang:max(Seq, SeqAcc), Results};
- ({Id, Seq, deleted}, {SeqAcc, Results}) ->
- {erlang:max(Seq, SeqAcc), [{Id, []} | Results]};
- ({Id, Seq, Doc}, {SeqAcc, Results}) ->
- couch_stats:increment_counter([couchdb, mrview, map_doc]),
- {ok, Res} = couch_query_servers:map_doc_raw(QServer, Doc),
- {erlang:max(Seq, SeqAcc), [{Id, Res} | Results]}
- end,
- FoldFun = fun(Docs, Acc) ->
- update_task(length(Docs)),
- lists:foldl(DocFun, Acc, Docs)
- end,
- Results = lists:foldl(FoldFun, {0, []}, Dequeued),
- couch_work_queue:queue(State1#mrst.write_queue, Results),
- map_docs(Parent, State1)
- end.
-
-
-write_results(Parent, #mrst{} = State) ->
- case accumulate_writes(State, State#mrst.write_queue, nil) of
- stop ->
- Parent ! {new_state, State};
- {Go, {Seq, ViewKVs, DocIdKeys}} ->
- NewState = write_kvs(State, Seq, ViewKVs, DocIdKeys),
- if Go == stop ->
- Parent ! {new_state, NewState};
- true ->
- send_partial(NewState#mrst.partial_resp_pid, NewState),
- write_results(Parent, NewState)
- end
- end.
-
-
-start_query_server(State) ->
- #mrst{
- language=Language,
- lib=Lib,
- views=Views
- } = State,
- Defs = [View#mrview.def || View <- Views],
- {ok, QServer} = couch_query_servers:start_doc_map(Language, Defs, Lib),
- State#mrst{qserver=QServer}.
-
-
-accumulate_writes(State, W, Acc0) ->
- {Seq, ViewKVs, DocIdKVs} = case Acc0 of
- nil -> {0, [{V#mrview.id_num, []} || V <- State#mrst.views], []};
- _ -> Acc0
- end,
- case couch_work_queue:dequeue(W) of
- closed when Seq == 0 ->
- stop;
- closed ->
- {stop, {Seq, ViewKVs, DocIdKVs}};
- {ok, Info} ->
- {_, _, NewIds} = Acc = merge_results(Info, Seq, ViewKVs, DocIdKVs),
- case accumulate_more(length(NewIds), Acc) of
- true -> accumulate_writes(State, W, Acc);
- false -> {ok, Acc}
- end
- end.
-
-
-accumulate_more(NumDocIds, Acc) ->
- % check if we have enough items now
- MinItems = config:get("view_updater", "min_writer_items", "100"),
- MinSize = config:get("view_updater", "min_writer_size", "16777216"),
- CurrMem = ?term_size(Acc),
- NumDocIds < list_to_integer(MinItems)
- andalso CurrMem < list_to_integer(MinSize).
-
-
-merge_results([], SeqAcc, ViewKVs, DocIdKeys) ->
- {SeqAcc, ViewKVs, DocIdKeys};
-merge_results([{Seq, Results} | Rest], SeqAcc, ViewKVs, DocIdKeys) ->
- Fun = fun(RawResults, {VKV, DIK}) ->
- merge_results(RawResults, VKV, DIK)
- end,
- {ViewKVs1, DocIdKeys1} = lists:foldl(Fun, {ViewKVs, DocIdKeys}, Results),
- merge_results(Rest, erlang:max(Seq, SeqAcc), ViewKVs1, DocIdKeys1).
-
-
-merge_results({DocId, []}, ViewKVs, DocIdKeys) ->
- {ViewKVs, [{DocId, []} | DocIdKeys]};
-merge_results({DocId, RawResults}, ViewKVs, DocIdKeys) ->
- JsonResults = couch_query_servers:raw_to_ejson(RawResults),
- Results = [[list_to_tuple(Res) || Res <- FunRs] || FunRs <- JsonResults],
- case lists:flatten(Results) of
- [] ->
- {ViewKVs, [{DocId, []} | DocIdKeys]};
- _ ->
- {ViewKVs1, ViewIdKeys} = insert_results(DocId, Results, ViewKVs, [], []),
- {ViewKVs1, [ViewIdKeys | DocIdKeys]}
- end.
-
-
-insert_results(DocId, [], [], ViewKVs, ViewIdKeys) ->
- {lists:reverse(ViewKVs), {DocId, ViewIdKeys}};
-insert_results(DocId, [KVs | RKVs], [{Id, VKVs} | RVKVs], VKVAcc, VIdKeys) ->
- CombineDupesFun = fun
- ({Key, Val}, {[{Key, {dups, Vals}} | Rest], IdKeys}) ->
- {[{Key, {dups, [Val | Vals]}} | Rest], IdKeys};
- ({Key, Val1}, {[{Key, Val2} | Rest], IdKeys}) ->
- {[{Key, {dups, [Val1, Val2]}} | Rest], IdKeys};
- ({Key, Value}, {Rest, IdKeys}) ->
- {[{Key, Value} | Rest], [{Id, Key} | IdKeys]}
- end,
- InitAcc = {[], VIdKeys},
- couch_stats:increment_counter([couchdb, mrview, emits], length(KVs)),
- {Duped, VIdKeys0} = lists:foldl(CombineDupesFun, InitAcc,
- lists:sort(KVs)),
- FinalKVs = [{{Key, DocId}, Val} || {Key, Val} <- Duped] ++ VKVs,
- insert_results(DocId, RKVs, RVKVs, [{Id, FinalKVs} | VKVAcc], VIdKeys0).
-
-
-write_kvs(State, UpdateSeq, ViewKVs, DocIdKeys) ->
- #mrst{
- id_btree=IdBtree,
- first_build=FirstBuild,
- partitioned=Partitioned
- } = State,
-
- {ok, ToRemove, IdBtree2} = update_id_btree(IdBtree, DocIdKeys, FirstBuild),
- ToRemByView = collapse_rem_keys(ToRemove, dict:new()),
-
- UpdateView = fun(#mrview{id_num=ViewId}=View, {ViewId, KVs0}) ->
- ToRem0 = couch_util:dict_find(ViewId, ToRemByView, []),
- {KVs, ToRem} = case Partitioned of
- true ->
- KVs1 = inject_partition(KVs0),
- ToRem1 = inject_partition(ToRem0),
- {KVs1, ToRem1};
- false ->
- {KVs0, ToRem0}
- end,
- {ok, VBtree2} = couch_btree:add_remove(View#mrview.btree, KVs, ToRem),
- NewUpdateSeq = case VBtree2 =/= View#mrview.btree of
- true -> UpdateSeq;
- _ -> View#mrview.update_seq
- end,
-
- View2 = View#mrview{btree=VBtree2, update_seq=NewUpdateSeq},
- maybe_notify(State, View2, KVs, ToRem),
- View2
- end,
-
- State#mrst{
- views=lists:zipwith(UpdateView, State#mrst.views, ViewKVs),
- update_seq=UpdateSeq,
- id_btree=IdBtree2
- }.
-
-
-inject_partition(Rows) ->
- lists:map(fun
- ({{Key, DocId}, Value}) ->
- % Adding a row to the view
- {Partition, _} = couch_partition:extract(DocId),
- {{{p, Partition, Key}, DocId}, Value};
- ({Key, DocId}) ->
- % Removing a row based on values in id_tree
- {Partition, _} = couch_partition:extract(DocId),
- {{p, Partition, Key}, DocId}
- end, Rows).
-
-
-update_id_btree(Btree, DocIdKeys, true) ->
- ToAdd = [{Id, DIKeys} || {Id, DIKeys} <- DocIdKeys, DIKeys /= []],
- couch_btree:query_modify(Btree, [], ToAdd, []);
-update_id_btree(Btree, DocIdKeys, _) ->
- ToFind = [Id || {Id, _} <- DocIdKeys],
- ToAdd = [{Id, DIKeys} || {Id, DIKeys} <- DocIdKeys, DIKeys /= []],
- ToRem = [Id || {Id, DIKeys} <- DocIdKeys, DIKeys == []],
- couch_btree:query_modify(Btree, ToFind, ToAdd, ToRem).
-
-
-collapse_rem_keys([], Acc) ->
- Acc;
-collapse_rem_keys([{ok, {DocId, ViewIdKeys}} | Rest], Acc) ->
- NewAcc = lists:foldl(fun({ViewId, Key}, Acc2) ->
- dict:append(ViewId, {Key, DocId}, Acc2)
- end, Acc, ViewIdKeys),
- collapse_rem_keys(Rest, NewAcc);
-collapse_rem_keys([{not_found, _} | Rest], Acc) ->
- collapse_rem_keys(Rest, Acc).
-
-
-send_partial(Pid, State) when is_pid(Pid) ->
- gen_server:cast(Pid, {new_state, State});
-send_partial(_, _) ->
- ok.
-
-
-update_task(NumChanges) ->
- [Changes, Total] = couch_task_status:get([changes_done, total_changes]),
- Changes2 = Changes + NumChanges,
- Progress = case Total of
- 0 ->
- % updater restart after compaction finishes
- 0;
- _ ->
- (Changes2 * 100) div Total
- end,
- couch_task_status:update([{progress, Progress}, {changes_done, Changes2}]).
-
-
-maybe_notify(State, View, KVs, ToRem) ->
- Updated = fun() ->
- [Key || {{Key, _}, _} <- KVs]
- end,
- Removed = fun() ->
- [Key || {Key, _DocId} <- ToRem]
- end,
- couch_index_plugin:index_update(State, View, Updated, Removed).
diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl
deleted file mode 100644
index fe6e6bd60..000000000
--- a/src/couch_mrview/src/couch_mrview_util.erl
+++ /dev/null
@@ -1,1180 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_util).
-
--export([get_view/4, get_view_index_pid/4]).
--export([get_local_purge_doc_id/1, get_value_from_options/2]).
--export([verify_view_filename/1, get_signature_from_filename/1]).
--export([ddoc_to_mrst/2, init_state/4, reset_index/3]).
--export([make_header/1]).
--export([index_file/2, compaction_file/2, open_file/1]).
--export([delete_files/2, delete_index_file/2, delete_compaction_file/2]).
--export([get_row_count/1, all_docs_reduce_to_count/1, reduce_to_count/1]).
--export([all_docs_key_opts/1, all_docs_key_opts/2, key_opts/1, key_opts/2]).
--export([fold/4, fold_reduce/4]).
--export([temp_view_to_ddoc/1]).
--export([calculate_external_size/1]).
--export([calculate_active_size/1]).
--export([validate_all_docs_args/2, validate_args/1, validate_args/3]).
--export([maybe_load_doc/3, maybe_load_doc/4]).
--export([maybe_update_index_file/1]).
--export([extract_view/4, extract_view_reduce/1]).
--export([get_view_keys/1, get_view_queries/1]).
--export([set_view_type/3]).
--export([set_extra/3, get_extra/2, get_extra/3]).
-
--define(MOD, couch_mrview_index).
--define(GET_VIEW_RETRY_COUNT, 1).
--define(GET_VIEW_RETRY_DELAY, 50).
--define(LOWEST_KEY, null).
--define(HIGHEST_KEY, {<<255, 255, 255, 255>>}).
--define(LOWEST(A, B), (if A < B -> A; true -> B end)).
--define(HIGHEST(A, B), (if A > B -> A; true -> B end)).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-
-get_local_purge_doc_id(Sig) ->
- ?l2b(?LOCAL_DOC_PREFIX ++ "purge-mrview-" ++ Sig).
-
-
-get_value_from_options(Key, Options) ->
- case couch_util:get_value(Key, Options) of
- undefined ->
- Reason = <<"'", Key/binary, "' must exists in options.">>,
- throw({bad_request, Reason});
- Value -> Value
- end.
-
-
-verify_view_filename(FileName) ->
- FilePathList = filename:split(FileName),
- PureFN = lists:last(FilePathList),
- case filename:extension(PureFN) of
- ".view" ->
- Sig = filename:basename(PureFN),
- case [Ch || Ch <- Sig, not (((Ch >= $0) and (Ch =< $9))
- orelse ((Ch >= $a) and (Ch =< $f))
- orelse ((Ch >= $A) and (Ch =< $F)))] == [] of
- true -> true;
- false -> false
- end;
- _ -> false
- end.
-
-get_signature_from_filename(FileName) ->
- FilePathList = filename:split(FileName),
- PureFN = lists:last(FilePathList),
- filename:basename(PureFN, ".view").
-
-get_view(Db, DDoc, ViewName, Args0) ->
- case get_view_index_state(Db, DDoc, ViewName, Args0) of
- {ok, State, Args2} ->
- Ref = erlang:monitor(process, State#mrst.fd),
- #mrst{language=Lang, views=Views} = State,
- {Type, View, Args3} = extract_view(Lang, Args2, ViewName, Views),
- check_range(Args3, view_cmp(View)),
- Sig = view_sig(Db, State, View, Args3),
- {ok, {Type, View, Ref}, Sig, Args3};
- ddoc_updated ->
- ddoc_updated
- end.
-
-
-get_view_index_pid(Db, DDoc, ViewName, Args0) ->
- ArgCheck = fun(InitState) ->
- Args1 = set_view_type(Args0, ViewName, InitState#mrst.views),
- {ok, validate_args(InitState, Args1)}
- end,
- couch_index_server:get_index(?MOD, Db, DDoc, ArgCheck).
-
-
-get_view_index_state(Db, DDoc, ViewName, Args0) ->
- get_view_index_state(Db, DDoc, ViewName, Args0, ?GET_VIEW_RETRY_COUNT).
-
-get_view_index_state(_, DDoc, _, _, RetryCount) when RetryCount < 0 ->
- couch_log:warning("DDoc '~s' recreated too frequently", [DDoc#doc.id]),
- throw({get_view_state, exceeded_retry_count});
-get_view_index_state(Db, DDoc, ViewName, Args0, RetryCount) ->
- try
- {ok, Pid, Args} = get_view_index_pid(Db, DDoc, ViewName, Args0),
- UpdateSeq = couch_util:with_db(Db, fun(WDb) ->
- couch_db:get_update_seq(WDb)
- end),
- State = case Args#mrargs.update of
- lazy ->
- spawn(fun() ->
- catch couch_index:get_state(Pid, UpdateSeq)
- end),
- couch_index:get_state(Pid, 0);
- false ->
- couch_index:get_state(Pid, 0);
- _ ->
- couch_index:get_state(Pid, UpdateSeq)
- end,
- case State of
- {ok, State0} -> {ok, State0, Args};
- ddoc_updated -> ddoc_updated;
- Else -> throw(Else)
- end
- catch
- exit:{Reason, _} when Reason == noproc; Reason == normal ->
- timer:sleep(?GET_VIEW_RETRY_DELAY),
- get_view_index_state(Db, DDoc, ViewName, Args0, RetryCount - 1);
- error:{badmatch, Error} ->
- throw(Error);
- Error ->
- throw(Error)
- end.
-
-
-ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
- MakeDict = fun({Name, {MRFuns}}, DictBySrcAcc) ->
- case couch_util:get_value(<<"map">>, MRFuns) of
- MapSrc when MapSrc /= undefined ->
- RedSrc = couch_util:get_value(<<"reduce">>, MRFuns, null),
- {ViewOpts} = couch_util:get_value(<<"options">>, MRFuns, {[]}),
- View = case dict:find({MapSrc, ViewOpts}, DictBySrcAcc) of
- {ok, View0} -> View0;
- error -> #mrview{def=MapSrc, options=ViewOpts}
- end,
- {MapNames, RedSrcs} = case RedSrc of
- null ->
- MNames = [Name | View#mrview.map_names],
- {MNames, View#mrview.reduce_funs};
- _ ->
- RedFuns = [{Name, RedSrc} | View#mrview.reduce_funs],
- {View#mrview.map_names, RedFuns}
- end,
- View2 = View#mrview{map_names=MapNames, reduce_funs=RedSrcs},
- dict:store({MapSrc, ViewOpts}, View2, DictBySrcAcc);
- undefined ->
- DictBySrcAcc
- end;
- ({Name, Else}, DictBySrcAcc) ->
- couch_log:error("design_doc_to_view_group ~s views ~p",
- [Name, Else]),
- DictBySrcAcc
- end,
- {DesignOpts} = proplists:get_value(<<"options">>, Fields, {[]}),
- Partitioned = proplists:get_value(<<"partitioned">>, DesignOpts, false),
-
- {RawViews} = couch_util:get_value(<<"views">>, Fields, {[]}),
- BySrc = lists:foldl(MakeDict, dict:new(), RawViews),
-
- NumViews = fun({_, View}, N) ->
- {View#mrview{id_num=N}, N+1}
- end,
- {Views, _} = lists:mapfoldl(NumViews, 0, lists:sort(dict:to_list(BySrc))),
-
- Language = couch_util:get_value(<<"language">>, Fields, <<"javascript">>),
- Lib = couch_util:get_value(<<"lib">>, RawViews, {[]}),
-
- IdxState = #mrst{
- db_name=DbName,
- idx_name=Id,
- lib=Lib,
- views=Views,
- language=Language,
- design_opts=DesignOpts,
- partitioned=Partitioned
- },
- SigInfo = {Views, Language, DesignOpts, couch_index_util:sort_lib(Lib)},
- {ok, IdxState#mrst{sig=couch_hash:md5_hash(term_to_binary(SigInfo))}}.
-
-
-set_view_type(_Args, _ViewName, []) ->
- throw({not_found, missing_named_view});
-set_view_type(Args, ViewName, [View | Rest]) ->
- RedNames = [N || {N, _} <- View#mrview.reduce_funs],
- case lists:member(ViewName, RedNames) of
- true ->
- case Args#mrargs.reduce of
- false -> Args#mrargs{view_type=map};
- _ -> Args#mrargs{view_type=red}
- end;
- false ->
- case lists:member(ViewName, View#mrview.map_names) of
- true -> Args#mrargs{view_type=map};
- false -> set_view_type(Args, ViewName, Rest)
- end
- end.
-
-
-set_extra(#mrargs{} = Args, Key, Value) ->
- Extra0 = Args#mrargs.extra,
- Extra1 = lists:ukeysort(1, [{Key, Value} | Extra0]),
- Args#mrargs{extra = Extra1}.
-
-
-get_extra(#mrargs{} = Args, Key) ->
- couch_util:get_value(Key, Args#mrargs.extra).
-
-get_extra(#mrargs{} = Args, Key, Default) ->
- couch_util:get_value(Key, Args#mrargs.extra, Default).
-
-
-extract_view(_Lang, _Args, _ViewName, []) ->
- throw({not_found, missing_named_view});
-extract_view(Lang, #mrargs{view_type=map}=Args, Name, [View | Rest]) ->
- Names = View#mrview.map_names ++ [N || {N, _} <- View#mrview.reduce_funs],
- case lists:member(Name, Names) of
- true -> {map, View, Args};
- _ -> extract_view(Lang, Args, Name, Rest)
- end;
-extract_view(Lang, #mrargs{view_type=red}=Args, Name, [View | Rest]) ->
- RedNames = [N || {N, _} <- View#mrview.reduce_funs],
- case lists:member(Name, RedNames) of
- true -> {red, {index_of(Name, RedNames), Lang, View}, Args};
- false -> extract_view(Lang, Args, Name, Rest)
- end.
-
-
-view_sig(Db, State, View, #mrargs{include_docs=true}=Args) ->
- BaseSig = view_sig(Db, State, View, Args#mrargs{include_docs=false}),
- UpdateSeq = couch_db:get_update_seq(Db),
- PurgeSeq = couch_db:get_purge_seq(Db),
- Term = view_sig_term(BaseSig, UpdateSeq, PurgeSeq),
- couch_index_util:hexsig(couch_hash:md5_hash(term_to_binary(Term)));
-view_sig(Db, State, {_Nth, _Lang, View}, Args) ->
- view_sig(Db, State, View, Args);
-view_sig(_Db, State, View, Args0) ->
- Sig = State#mrst.sig,
- UpdateSeq = View#mrview.update_seq,
- PurgeSeq = View#mrview.purge_seq,
- Args = Args0#mrargs{
- preflight_fun=undefined,
- extra=[]
- },
- Term = view_sig_term(Sig, UpdateSeq, PurgeSeq, Args),
- couch_index_util:hexsig(couch_hash:md5_hash(term_to_binary(Term))).
-
-view_sig_term(BaseSig, UpdateSeq, PurgeSeq) ->
- {BaseSig, UpdateSeq, PurgeSeq}.
-
-view_sig_term(BaseSig, UpdateSeq, PurgeSeq, Args) ->
- {BaseSig, UpdateSeq, PurgeSeq, Args}.
-
-
-init_state(Db, Fd, #mrst{views=Views}=State, nil) ->
- PurgeSeq = couch_db:get_purge_seq(Db),
- Header = #mrheader{
- seq=0,
- purge_seq=PurgeSeq,
- id_btree_state=nil,
- view_states=[make_view_state(#mrview{}) || _ <- Views]
- },
- init_state(Db, Fd, State, Header);
-init_state(Db, Fd, State, Header) ->
- #mrst{
- language=Lang,
- views=Views
- } = State,
- #mrheader{
- seq=Seq,
- purge_seq=PurgeSeq,
- id_btree_state=IdBtreeState,
- view_states=ViewStates
- } = maybe_update_header(Header),
-
- IdBtOpts = [
- {compression, couch_compress:get_compression_method()}
- ],
- {ok, IdBtree} = couch_btree:open(IdBtreeState, Fd, IdBtOpts),
-
- OpenViewFun = fun(St, View) -> open_view(Db, Fd, Lang, St, View) end,
- Views2 = lists:zipwith(OpenViewFun, ViewStates, Views),
-
- State#mrst{
- fd=Fd,
- fd_monitor=erlang:monitor(process, Fd),
- update_seq=Seq,
- purge_seq=PurgeSeq,
- id_btree=IdBtree,
- views=Views2
- }.
-
-open_view(_Db, Fd, Lang, ViewState, View) ->
- ReduceFun = make_reduce_fun(Lang, View#mrview.reduce_funs),
- LessFun = maybe_define_less_fun(View),
- Compression = couch_compress:get_compression_method(),
- BTState = get_key_btree_state(ViewState),
- ViewBtOpts = [
- {less, LessFun},
- {reduce, ReduceFun},
- {compression, Compression}
- ],
- {ok, Btree} = couch_btree:open(BTState, Fd, ViewBtOpts),
-
- View#mrview{btree=Btree,
- update_seq=get_update_seq(ViewState),
- purge_seq=get_purge_seq(ViewState)}.
-
-
-temp_view_to_ddoc({Props}) ->
- Language = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
- Options = couch_util:get_value(<<"options">>, Props, {[]}),
- View0 = [{<<"map">>, couch_util:get_value(<<"map">>, Props)}],
- View1 = View0 ++ case couch_util:get_value(<<"reduce">>, Props) of
- RedSrc when is_binary(RedSrc) -> [{<<"reduce">>, RedSrc}];
- _ -> []
- end,
- DDoc = {[
- {<<"_id">>, couch_uuids:random()},
- {<<"language">>, Language},
- {<<"options">>, Options},
- {<<"views">>, {[
- {<<"temp">>, {View1}}
- ]}}
- ]},
- couch_doc:from_json_obj(DDoc).
-
-
-get_row_count(#mrview{btree=Bt}) ->
- Count = case couch_btree:full_reduce(Bt) of
- {ok, {Count0, _Reds, _}} -> Count0;
- {ok, {Count0, _Reds}} -> Count0
- end,
- {ok, Count}.
-
-
-all_docs_reduce_to_count(Reductions) ->
- Reduce = fun couch_bt_engine:id_tree_reduce/2,
- {Count, _, _} = couch_btree:final_reduce(Reduce, Reductions),
- Count.
-
-reduce_to_count(nil) ->
- 0;
-reduce_to_count(Reductions) ->
- CountReduceFun = fun count_reduce/2,
- FinalReduction = couch_btree:final_reduce(CountReduceFun, Reductions),
- get_count(FinalReduction).
-
-
-fold(#mrview{btree=Bt}, Fun, Acc, Opts) ->
- WrapperFun = fun(KV, Reds, Acc2) ->
- fold_fun(Fun, expand_dups([KV], []), Reds, Acc2)
- end,
- {ok, _LastRed, _Acc} = couch_btree:fold(Bt, WrapperFun, Acc, Opts).
-
-fold_fun(_Fun, [], _, Acc) ->
- {ok, Acc};
-fold_fun(Fun, [KV|Rest], {KVReds, Reds}, Acc) ->
- case Fun(KV, {KVReds, Reds}, Acc) of
- {ok, Acc2} ->
- fold_fun(Fun, Rest, {[KV|KVReds], Reds}, Acc2);
- {stop, Acc2} ->
- {stop, Acc2}
- end.
-
-
-fold_reduce({NthRed, Lang, View}, Fun, Acc, Options) ->
- #mrview{
- btree=Bt,
- reduce_funs=RedFuns
- } = View,
-
- ReduceFun = make_user_reds_reduce_fun(Lang, RedFuns, NthRed),
-
- WrapperFun = fun({GroupedKey, _}, PartialReds, Acc0) ->
- FinalReduction = couch_btree:final_reduce(ReduceFun, PartialReds),
- UserReductions = get_user_reds(FinalReduction),
- Fun(GroupedKey, lists:nth(NthRed, UserReductions), Acc0)
- end,
-
- couch_btree:fold_reduce(Bt, WrapperFun, Acc, Options).
-
-
-validate_args(Db, DDoc, Args0) ->
- {ok, State} = couch_mrview_util:ddoc_to_mrst(fabric2_db:name(Db), DDoc),
- Args1 = apply_limit(State#mrst.partitioned, Args0),
- validate_args(State, Args1).
-
-
-validate_args(#mrst{} = State, Args0) ->
- Args = validate_args(Args0),
-
- ViewPartitioned = State#mrst.partitioned,
- Partition = get_extra(Args, partition),
-
- case {ViewPartitioned, Partition} of
- {true, undefined} ->
- Msg1 = <<"`partition` parameter is mandatory "
- "for queries to this view.">>,
- mrverror(Msg1);
- {true, _} ->
- apply_partition(Args, Partition);
- {false, undefined} ->
- Args;
- {false, Value} when is_binary(Value) ->
- Msg2 = <<"`partition` parameter is not "
- "supported in this design doc">>,
- mrverror(Msg2)
- end.
-
-
-apply_limit(ViewPartitioned, Args) ->
- Options = Args#mrargs.extra,
- IgnorePQLimit = lists:keyfind(ignore_partition_query_limit, 1, Options),
- LimitType = case {ViewPartitioned, IgnorePQLimit} of
- {true, false} -> "partition_query_limit";
- {true, _} -> "query_limit";
- {false, _} -> "query_limit"
- end,
-
- MaxLimit = config:get_integer("query_server_config",
- LimitType, ?MAX_VIEW_LIMIT),
-
- % Set the highest limit possible if a user has not
- % specified a limit
- Args1 = case Args#mrargs.limit == ?MAX_VIEW_LIMIT of
- true -> Args#mrargs{limit = MaxLimit};
- false -> Args
- end,
-
- if Args1#mrargs.limit =< MaxLimit -> Args1; true ->
- Fmt = "Limit is too large, must not exceed ~p",
- mrverror(io_lib:format(Fmt, [MaxLimit]))
- end.
-
-
-validate_all_docs_args(Db, Args0) ->
- Args = validate_args(Args0),
-
- DbPartitioned = couch_db:is_partitioned(Db),
- Partition = get_extra(Args, partition),
-
- case {DbPartitioned, Partition} of
- {false, <<_/binary>>} ->
- mrverror(<<"`partition` parameter is not supported on this db">>);
- {_, <<_/binary>>} ->
- Args1 = apply_limit(true, Args),
- apply_all_docs_partition(Args1, Partition);
- _ ->
- Args
- end.
-
-
-validate_args(Args) ->
- GroupLevel = determine_group_level(Args),
- Reduce = Args#mrargs.reduce,
- case Reduce == undefined orelse is_boolean(Reduce) of
- true -> ok;
- _ -> mrverror(<<"Invalid `reduce` value.">>)
- end,
-
- case {Args#mrargs.view_type, Reduce} of
- {map, true} -> mrverror(<<"Reduce is invalid for map-only views.">>);
- _ -> ok
- end,
-
- case {Args#mrargs.view_type, GroupLevel, Args#mrargs.keys} of
- {red, exact, _} -> ok;
- {red, _, KeyList} when is_list(KeyList) ->
- Msg = <<"Multi-key fetchs for reduce views must use `group=true`">>,
- mrverror(Msg);
- _ -> ok
- end,
-
- case Args#mrargs.keys of
- Keys when is_list(Keys) -> ok;
- undefined -> ok;
- _ -> mrverror(<<"`keys` must be an array of strings.">>)
- end,
-
- case {Args#mrargs.keys, Args#mrargs.start_key,
- Args#mrargs.end_key} of
- {undefined, _, _} -> ok;
- {[], _, _} -> ok;
- {[_|_], undefined, undefined} -> ok;
- _ -> mrverror(<<"`keys` is incompatible with `key`"
- ", `start_key` and `end_key`">>)
- end,
-
- case Args#mrargs.start_key_docid of
- undefined -> ok;
- SKDocId0 when is_binary(SKDocId0) -> ok;
- _ -> mrverror(<<"`start_key_docid` must be a string.">>)
- end,
-
- case Args#mrargs.end_key_docid of
- undefined -> ok;
- EKDocId0 when is_binary(EKDocId0) -> ok;
- _ -> mrverror(<<"`end_key_docid` must be a string.">>)
- end,
-
- case Args#mrargs.direction of
- fwd -> ok;
- rev -> ok;
- _ -> mrverror(<<"Invalid direction.">>)
- end,
-
- case {Args#mrargs.limit >= 0, Args#mrargs.limit == undefined} of
- {true, _} -> ok;
- {_, true} -> ok;
- _ -> mrverror(<<"`limit` must be a positive integer.">>)
- end,
-
- case Args#mrargs.skip < 0 of
- true -> mrverror(<<"`skip` must be >= 0">>);
- _ -> ok
- end,
-
- case {Args#mrargs.view_type, GroupLevel} of
- {red, exact} -> ok;
- {_, 0} -> ok;
- {red, Int} when is_integer(Int), Int >= 0 -> ok;
- {red, _} -> mrverror(<<"`group_level` must be >= 0">>);
- {map, _} -> mrverror(<<"Invalid use of grouping on a map view.">>)
- end,
-
- case Args#mrargs.stable of
- true -> ok;
- false -> ok;
- _ -> mrverror(<<"Invalid value for `stable`.">>)
- end,
-
- case Args#mrargs.update of
- true -> ok;
- false -> ok;
- lazy -> ok;
- _ -> mrverror(<<"Invalid value for `update`.">>)
- end,
-
- case is_boolean(Args#mrargs.inclusive_end) of
- true -> ok;
- _ -> mrverror(<<"Invalid value for `inclusive_end`.">>)
- end,
-
- case {Args#mrargs.view_type, Args#mrargs.include_docs} of
- {red, true} -> mrverror(<<"`include_docs` is invalid for reduce">>);
- {_, ID} when is_boolean(ID) -> ok;
- _ -> mrverror(<<"Invalid value for `include_docs`">>)
- end,
-
- case {Args#mrargs.view_type, Args#mrargs.conflicts} of
- {_, undefined} -> ok;
- {map, V} when is_boolean(V) -> ok;
- {red, undefined} -> ok;
- {map, _} -> mrverror(<<"Invalid value for `conflicts`.">>);
- {red, _} -> mrverror(<<"`conflicts` is invalid for reduce views.">>)
- end,
-
- SKDocId = case {Args#mrargs.direction, Args#mrargs.start_key_docid} of
- {fwd, undefined} -> <<>>;
- {rev, undefined} -> <<255>>;
- {_, SKDocId1} -> SKDocId1
- end,
-
- EKDocId = case {Args#mrargs.direction, Args#mrargs.end_key_docid} of
- {fwd, undefined} -> <<255>>;
- {rev, undefined} -> <<>>;
- {_, EKDocId1} -> EKDocId1
- end,
-
- case is_boolean(Args#mrargs.sorted) of
- true -> ok;
- _ -> mrverror(<<"Invalid value for `sorted`.">>)
- end,
-
- case get_extra(Args, partition) of
- undefined -> ok;
- Partition when is_binary(Partition), Partition /= <<>> -> ok;
- _ -> mrverror(<<"Invalid value for `partition`.">>)
- end,
-
- Args#mrargs{
- start_key_docid=SKDocId,
- end_key_docid=EKDocId,
- group_level=GroupLevel
- }.
-
-
-determine_group_level(#mrargs{group=undefined, group_level=undefined}) ->
- 0;
-determine_group_level(#mrargs{group=false, group_level=undefined}) ->
- 0;
-determine_group_level(#mrargs{group=false, group_level=Level}) when Level > 0 ->
- mrverror(<<"Can't specify group=false and group_level>0 at the same time">>);
-determine_group_level(#mrargs{group=true, group_level=undefined}) ->
- exact;
-determine_group_level(#mrargs{group_level=GroupLevel}) ->
- GroupLevel.
-
-apply_partition(#mrargs{keys=[{p, _, _} | _]} = Args, _Partition) ->
- Args; % already applied
-
-apply_partition(#mrargs{keys=Keys} = Args, Partition) when Keys /= undefined ->
- Args#mrargs{keys=[{p, Partition, K} || K <- Keys]};
-
-apply_partition(#mrargs{start_key={p, _, _}, end_key={p, _, _}} = Args, _Partition) ->
- Args; % already applied.
-
-apply_partition(Args, Partition) ->
- #mrargs{
- direction = Dir,
- start_key = StartKey,
- end_key = EndKey
- } = Args,
-
- {DefSK, DefEK} = case Dir of
- fwd -> {?LOWEST_KEY, ?HIGHEST_KEY};
- rev -> {?HIGHEST_KEY, ?LOWEST_KEY}
- end,
-
- SK0 = if StartKey /= undefined -> StartKey; true -> DefSK end,
- EK0 = if EndKey /= undefined -> EndKey; true -> DefEK end,
-
- Args#mrargs{
- start_key = {p, Partition, SK0},
- end_key = {p, Partition, EK0}
- }.
-
-%% all_docs is special as it's not really a view and is already
-%% effectively partitioned as the partition is a prefix of all keys.
-apply_all_docs_partition(#mrargs{} = Args, Partition) ->
- #mrargs{
- direction = Dir,
- start_key = StartKey,
- end_key = EndKey
- } = Args,
-
- {DefSK, DefEK} = case Dir of
- fwd ->
- {
- couch_partition:start_key(Partition),
- couch_partition:end_key(Partition)
- };
- rev ->
- {
- couch_partition:end_key(Partition),
- couch_partition:start_key(Partition)
- }
- end,
-
- SK0 = if StartKey == undefined -> DefSK; true -> StartKey end,
- EK0 = if EndKey == undefined -> DefEK; true -> EndKey end,
-
- {SK1, EK1} = case Dir of
- fwd -> {?HIGHEST(DefSK, SK0), ?LOWEST(DefEK, EK0)};
- rev -> {?LOWEST(DefSK, SK0), ?HIGHEST(DefEK, EK0)}
- end,
-
- Args#mrargs{
- start_key = SK1,
- end_key = EK1
- }.
-
-
-check_range(#mrargs{start_key=undefined}, _Cmp) ->
- ok;
-check_range(#mrargs{end_key=undefined}, _Cmp) ->
- ok;
-check_range(#mrargs{start_key=K, end_key=K}, _Cmp) ->
- ok;
-check_range(Args, Cmp) ->
- #mrargs{
- direction=Dir,
- start_key=SK,
- start_key_docid=SKD,
- end_key=EK,
- end_key_docid=EKD
- } = Args,
- case {Dir, Cmp({SK, SKD}, {EK, EKD})} of
- {fwd, false} ->
- throw({query_parse_error,
- <<"No rows can match your key range, reverse your ",
- "start_key and end_key or set descending=true">>});
- {rev, true} ->
- throw({query_parse_error,
- <<"No rows can match your key range, reverse your ",
- "start_key and end_key or set descending=false">>});
- _ -> ok
- end.
-
-
-view_cmp({_Nth, _Lang, View}) ->
- view_cmp(View);
-view_cmp(View) ->
- fun(A, B) -> couch_btree:less(View#mrview.btree, A, B) end.
-
-
-make_header(State) ->
- #mrst{
- update_seq=Seq,
- purge_seq=PurgeSeq,
- id_btree=IdBtree,
- views=Views
- } = State,
-
- #mrheader{
- seq=Seq,
- purge_seq=PurgeSeq,
- id_btree_state=get_btree_state(IdBtree),
- view_states=[make_view_state(V) || V <- Views]
- }.
-
-
-index_file(DbName, Sig) ->
- FileName = couch_index_util:hexsig(Sig) ++ ".view",
- couch_index_util:index_file(mrview, DbName, FileName).
-
-
-compaction_file(DbName, Sig) ->
- FileName = couch_index_util:hexsig(Sig) ++ ".compact.view",
- couch_index_util:index_file(mrview, DbName, FileName).
-
-
-open_file(FName) ->
- case couch_file:open(FName, [nologifmissing]) of
- {ok, Fd} -> {ok, Fd};
- {error, enoent} -> couch_file:open(FName, [create]);
- Error -> Error
- end.
-
-
-delete_files(DbName, Sig) ->
- delete_index_file(DbName, Sig),
- delete_compaction_file(DbName, Sig).
-
-
-delete_index_file(DbName, Sig) ->
- delete_file(index_file(DbName, Sig)).
-
-
-delete_compaction_file(DbName, Sig) ->
- delete_file(compaction_file(DbName, Sig)).
-
-
-delete_file(FName) ->
- case filelib:is_file(FName) of
- true ->
- RootDir = couch_index_util:root_dir(),
- couch_file:delete(RootDir, FName);
- _ ->
- ok
- end.
-
-
-reset_index(Db, Fd, #mrst{sig=Sig}=State) ->
- ok = couch_file:truncate(Fd, 0),
- ok = couch_file:write_header(Fd, {Sig, nil}),
- init_state(Db, Fd, reset_state(State), nil).
-
-
-reset_state(State) ->
- State#mrst{
- fd=nil,
- qserver=nil,
- update_seq=0,
- id_btree=nil,
- views=[View#mrview{btree=nil} || View <- State#mrst.views]
- }.
-
-
-all_docs_key_opts(#mrargs{extra = Extra} = Args) ->
- all_docs_key_opts(Args, Extra).
-
-all_docs_key_opts(#mrargs{keys=undefined}=Args, Extra) ->
- all_docs_key_opts(Args#mrargs{keys=[]}, Extra);
-all_docs_key_opts(#mrargs{keys=[], direction=Dir}=Args, Extra) ->
- [[{dir, Dir}] ++ ad_skey_opts(Args) ++ ad_ekey_opts(Args) ++ Extra];
-all_docs_key_opts(#mrargs{keys=Keys, direction=Dir}=Args, Extra) ->
- lists:map(fun(K) ->
- [{dir, Dir}]
- ++ ad_skey_opts(Args#mrargs{start_key=K})
- ++ ad_ekey_opts(Args#mrargs{end_key=K})
- ++ Extra
- end, Keys).
-
-
-ad_skey_opts(#mrargs{start_key=SKey}) when is_binary(SKey) ->
- [{start_key, SKey}];
-ad_skey_opts(#mrargs{start_key_docid=SKeyDocId}) ->
- [{start_key, SKeyDocId}].
-
-
-ad_ekey_opts(#mrargs{end_key=EKey}=Args) when is_binary(EKey) ->
- Type = if Args#mrargs.inclusive_end -> end_key; true -> end_key_gt end,
- [{Type, EKey}];
-ad_ekey_opts(#mrargs{end_key_docid=EKeyDocId}=Args) ->
- Type = if Args#mrargs.inclusive_end -> end_key; true -> end_key_gt end,
- [{Type, EKeyDocId}].
-
-
-key_opts(Args) ->
- key_opts(Args, []).
-
-key_opts(#mrargs{keys=undefined, direction=Dir}=Args, Extra) ->
- [[{dir, Dir}] ++ skey_opts(Args) ++ ekey_opts(Args) ++ Extra];
-key_opts(#mrargs{keys=Keys, direction=Dir}=Args, Extra) ->
- lists:map(fun(K) ->
- [{dir, Dir}]
- ++ skey_opts(Args#mrargs{start_key=K})
- ++ ekey_opts(Args#mrargs{end_key=K})
- ++ Extra
- end, Keys).
-
-
-skey_opts(#mrargs{start_key=undefined}) ->
- [];
-skey_opts(#mrargs{start_key=SKey, start_key_docid=SKeyDocId}) ->
- [{start_key, {SKey, SKeyDocId}}].
-
-
-ekey_opts(#mrargs{end_key=undefined}) ->
- [];
-ekey_opts(#mrargs{end_key=EKey, end_key_docid=EKeyDocId}=Args) ->
- case Args#mrargs.inclusive_end of
- true -> [{end_key, {EKey, EKeyDocId}}];
- false -> [{end_key_gt, {EKey, reverse_key_default(EKeyDocId)}}]
- end.
-
-
-reverse_key_default(<<>>) -> <<255>>;
-reverse_key_default(<<255>>) -> <<>>;
-reverse_key_default(Key) -> Key.
-
-
-reduced_external_size(Tree) ->
- case couch_btree:full_reduce(Tree) of
- {ok, {_, _, Size}} -> Size;
- % return 0 for versions of the reduce function without Size
- {ok, {_, _}} -> 0
- end.
-
-
-calculate_external_size(Views) ->
- SumFun = fun
- (#mrview{btree=nil}, Acc) ->
- Acc;
- (#mrview{btree=Bt}, Acc) ->
- Acc + reduced_external_size(Bt)
- end,
- {ok, lists:foldl(SumFun, 0, Views)}.
-
-
-calculate_active_size(Views) ->
- FoldFun = fun
- (#mrview{btree=nil}, Acc) ->
- Acc;
- (#mrview{btree=Bt}, Acc) ->
- Acc + couch_btree:size(Bt)
- end,
- {ok, lists:foldl(FoldFun, 0, Views)}.
-
-
-detuple_kvs([], Acc) ->
- lists:reverse(Acc);
-detuple_kvs([KV | Rest], Acc) ->
- {{Key,Id},Value} = KV,
- NKV = [[Key, Id], Value],
- detuple_kvs(Rest, [NKV | Acc]).
-
-
-expand_dups([], Acc) ->
- lists:reverse(Acc);
-expand_dups([{Key, {dups, Vals}} | Rest], Acc) ->
- Expanded = [{Key, Val} || Val <- Vals],
- expand_dups(Rest, Expanded ++ Acc);
-expand_dups([KV | Rest], Acc) ->
- expand_dups(Rest, [KV | Acc]).
-
-
-maybe_load_doc(_Db, _DI, #mrargs{include_docs=false}) ->
- [];
-maybe_load_doc(Db, #doc_info{}=DI, #mrargs{conflicts=true, doc_options=Opts}) ->
- doc_row(couch_index_util:load_doc(Db, DI, [conflicts]), Opts);
-maybe_load_doc(Db, #doc_info{}=DI, #mrargs{doc_options=Opts}) ->
- doc_row(couch_index_util:load_doc(Db, DI, []), Opts).
-
-
-maybe_load_doc(_Db, _Id, _Val, #mrargs{include_docs=false}) ->
- [];
-maybe_load_doc(Db, Id, Val, #mrargs{conflicts=true, doc_options=Opts}) ->
- doc_row(couch_index_util:load_doc(Db, docid_rev(Id, Val), [conflicts]), Opts);
-maybe_load_doc(Db, Id, Val, #mrargs{doc_options=Opts}) ->
- doc_row(couch_index_util:load_doc(Db, docid_rev(Id, Val), []), Opts).
-
-
-doc_row(null, _Opts) ->
- [{doc, null}];
-doc_row(Doc, Opts) ->
- [{doc, couch_doc:to_json_obj(Doc, Opts)}].
-
-
-docid_rev(Id, {Props}) ->
- DocId = couch_util:get_value(<<"_id">>, Props, Id),
- Rev = case couch_util:get_value(<<"_rev">>, Props, nil) of
- nil -> nil;
- Rev0 -> couch_doc:parse_rev(Rev0)
- end,
- {DocId, Rev};
-docid_rev(Id, _) ->
- {Id, nil}.
-
-
-index_of(Key, List) ->
- index_of(Key, List, 1).
-
-
-index_of(_, [], _) ->
- throw({error, missing_named_view});
-index_of(Key, [Key | _], Idx) ->
- Idx;
-index_of(Key, [_ | Rest], Idx) ->
- index_of(Key, Rest, Idx+1).
-
-
-mrverror(Mesg) ->
- throw({query_parse_error, Mesg}).
-
-
-%% Updates 2.x view files to 3.x or later view files
-%% transparently, the first time the 2.x view file is opened by
-%% 3.x or later.
-%%
-%% Here's how it works:
-%%
-%% Before opening a view index,
-%% If no matching index file is found in the new location:
-%% calculate the <= 2.x view signature
-%% if a file with that signature lives in the old location
-%% rename it to the new location with the new signature in the name.
-%% Then proceed to open the view index as usual.
-
-maybe_update_index_file(State) ->
- DbName = State#mrst.db_name,
- NewIndexFile = index_file(DbName, State#mrst.sig),
- % open in read-only mode so we don't create
- % the file if it doesn't exist.
- case file:open(NewIndexFile, [read, raw]) of
- {ok, Fd_Read} ->
- % the new index file exists, there is nothing to do here.
- file:close(Fd_Read);
- _Error ->
- update_index_file(State)
- end.
-
-update_index_file(State) ->
- Sig = sig_vsn_2x(State),
- DbName = State#mrst.db_name,
- FileName = couch_index_util:hexsig(Sig) ++ ".view",
- IndexFile = couch_index_util:index_file("mrview", DbName, FileName),
-
- % If we have an old index, rename it to the new position.
- case file:read_file_info(IndexFile) of
- {ok, _FileInfo} ->
- % Crash if the rename fails for any reason.
- % If the target exists, e.g. the next request will find the
- % new file and we are good. We might need to catch this
- % further up to avoid a full server crash.
- NewIndexFile = index_file(DbName, State#mrst.sig),
- couch_log:notice("Attempting to update legacy view index file"
- " from ~p to ~s", [IndexFile, NewIndexFile]),
- ok = filelib:ensure_dir(NewIndexFile),
- ok = file:rename(IndexFile, NewIndexFile),
- couch_log:notice("Successfully updated legacy view index file"
- " ~s", [IndexFile]),
- Sig;
- {error, enoent} ->
- % Ignore missing index file
- ok;
- {error, Reason} ->
- couch_log:error("Failed to update legacy view index file"
- " ~s : ~s", [IndexFile, file:format_error(Reason)]),
- ok
- end.
-
-sig_vsn_2x(State) ->
- #mrst{
- lib = Lib,
- language = Language,
- design_opts = DesignOpts
- } = State,
- SI = proplists:get_value(<<"seq_indexed">>, DesignOpts, false),
- KSI = proplists:get_value(<<"keyseq_indexed">>, DesignOpts, false),
- Views = [old_view_format(V, SI, KSI) || V <- State#mrst.views],
- SigInfo = {Views, Language, DesignOpts, couch_index_util:sort_lib(Lib)},
- couch_hash:md5_hash(term_to_binary(SigInfo)).
-
-old_view_format(View, SI, KSI) ->
-{
- mrview,
- View#mrview.id_num,
- View#mrview.update_seq,
- View#mrview.purge_seq,
- View#mrview.map_names,
- View#mrview.reduce_funs,
- View#mrview.def,
- View#mrview.btree,
- nil,
- nil,
- SI,
- KSI,
- View#mrview.options
-}.
-
-maybe_update_header(#mrheader{} = Header) ->
- Header;
-maybe_update_header(Header) when tuple_size(Header) == 6 ->
- #mrheader{
- seq = element(2, Header),
- purge_seq = element(3, Header),
- id_btree_state = element(4, Header),
- view_states = [make_view_state(S) || S <- element(6, Header)]
- }.
-
-%% End of <= 2.x upgrade code.
-
-make_view_state(#mrview{} = View) ->
- BTState = get_btree_state(View#mrview.btree),
- {
- BTState,
- View#mrview.update_seq,
- View#mrview.purge_seq
- };
-make_view_state({BTState, _SeqBTState, _KSeqBTState, UpdateSeq, PurgeSeq}) ->
- {BTState, UpdateSeq, PurgeSeq};
-make_view_state(nil) ->
- {nil, 0, 0}.
-
-
-get_key_btree_state(ViewState) ->
- element(1, ViewState).
-
-get_update_seq(ViewState) ->
- element(2, ViewState).
-
-get_purge_seq(ViewState) ->
- element(3, ViewState).
-
-get_count(Reduction) ->
- element(1, Reduction).
-
-get_user_reds(Reduction) ->
- element(2, Reduction).
-
-
-% This is for backwards compatibility for seq btree reduces
-get_external_size_reds(Reduction) when is_integer(Reduction) ->
- 0;
-
-get_external_size_reds(Reduction) when tuple_size(Reduction) == 2 ->
- 0;
-
-get_external_size_reds(Reduction) when tuple_size(Reduction) == 3 ->
- element(3, Reduction).
-
-
-make_reduce_fun(Lang, ReduceFuns) ->
- FunSrcs = [FunSrc || {_, FunSrc} <- ReduceFuns],
- fun
- (reduce, KVs0) ->
- KVs = detuple_kvs(expand_dups(KVs0, []), []),
- {ok, Result} = couch_query_servers:reduce(Lang, FunSrcs, KVs),
- ExternalSize = kv_external_size(KVs, Result),
- {length(KVs), Result, ExternalSize};
- (rereduce, Reds) ->
- ExtractFun = fun(Red, {CountsAcc0, URedsAcc0, ExtAcc0}) ->
- CountsAcc = CountsAcc0 + get_count(Red),
- URedsAcc = lists:append(URedsAcc0, [get_user_reds(Red)]),
- ExtAcc = ExtAcc0 + get_external_size_reds(Red),
- {CountsAcc, URedsAcc, ExtAcc}
- end,
- {Counts, UReds, ExternalSize} = lists:foldl(ExtractFun,
- {0, [], 0}, Reds),
- {ok, Result} = couch_query_servers:rereduce(Lang, FunSrcs, UReds),
- {Counts, Result, ExternalSize}
- end.
-
-
-maybe_define_less_fun(#mrview{options = Options}) ->
- case couch_util:get_value(<<"collation">>, Options) of
- <<"raw">> -> undefined;
- _ -> fun couch_ejson_compare:less_json_ids/2
- end.
-
-
-count_reduce(reduce, KVs) ->
- CountFun = fun
- ({_, {dups, Vals}}, Acc) -> Acc + length(Vals);
- (_, Acc) -> Acc + 1
- end,
- Count = lists:foldl(CountFun, 0, KVs),
- {Count, []};
-count_reduce(rereduce, Reds) ->
- CountFun = fun(Red, Acc) ->
- Acc + get_count(Red)
- end,
- Count = lists:foldl(CountFun, 0, Reds),
- {Count, []}.
-
-
-make_user_reds_reduce_fun(Lang, ReduceFuns, NthRed) ->
- LPad = lists:duplicate(NthRed - 1, []),
- RPad = lists:duplicate(length(ReduceFuns) - NthRed, []),
- {_, FunSrc} = lists:nth(NthRed, ReduceFuns),
- fun
- (reduce, KVs0) ->
- KVs = detuple_kvs(expand_dups(KVs0, []), []),
- {ok, Result} = couch_query_servers:reduce(Lang, [FunSrc], KVs),
- {0, LPad ++ Result ++ RPad};
- (rereduce, Reds) ->
- ExtractFun = fun(Reds0) ->
- [lists:nth(NthRed, get_user_reds(Reds0))]
- end,
- UReds = lists:map(ExtractFun, Reds),
- {ok, Result} = couch_query_servers:rereduce(Lang, [FunSrc], UReds),
- {0, LPad ++ Result ++ RPad}
- end.
-
-
-get_btree_state(nil) ->
- nil;
-get_btree_state(#btree{} = Btree) ->
- couch_btree:get_state(Btree).
-
-
-extract_view_reduce({red, {N, _Lang, #mrview{reduce_funs=Reds}}, _Ref}) ->
- {_Name, FunSrc} = lists:nth(N, Reds),
- FunSrc.
-
-
-get_view_keys({Props}) ->
- case couch_util:get_value(<<"keys">>, Props) of
- undefined ->
- undefined;
- Keys when is_list(Keys) ->
- Keys;
- _ ->
- throw({bad_request, "`keys` member must be an array."})
- end.
-
-
-get_view_queries({Props}) ->
- case couch_util:get_value(<<"queries">>, Props) of
- undefined ->
- undefined;
- Queries when is_list(Queries) ->
- Queries;
- _ ->
- throw({bad_request, "`queries` member must be an array."})
- end.
-
-
-kv_external_size(KVList, Reduction) ->
- lists:foldl(fun([[Key, _], Value], Acc) ->
- ?term_size(Key) + ?term_size(Value) + Acc
- end, ?term_size(Reduction), KVList).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl
deleted file mode 100644
index bf8eb7e5b..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl
+++ /dev/null
@@ -1,140 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_all_docs_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-
-
-setup() ->
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
- Db.
-
-teardown(Db) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-
-all_docs_test_() ->
- {
- "_all_docs view tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_query/1,
- fun should_query_with_range/1,
- fun should_query_with_range_rev/1,
- fun should_query_with_limit_and_skip/1,
- fun should_query_with_include_docs/1,
- fun should_query_empty_views/1
- ]
- }
- }
- }.
-
-
-should_query(Db) ->
- Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 11}, {offset, 0}]},
- mk_row(<<"1">>, <<"1-08d53a5760b95fce6df2e2c5b008be39">>),
- mk_row(<<"10">>, <<"1-a05b6ea2bc0243949f103d5b4f15f71e">>),
- mk_row(<<"2">>, <<"1-b57c77a9e6f7574ca6469f0d6dcd78bb">>),
- mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>),
- mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
- mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
- mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>),
- mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>),
- mk_row(<<"8">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>),
- mk_row(<<"9">>, <<"1-558c8487d9aee25399a91b5d31d90fe2">>),
- mk_row(<<"_design/bar">>, <<"1-a44e1dd1994a7717bf89c894ebd1f081">>)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_range(Db) ->
- Result = run_query(Db, [{start_key, <<"3">>}, {end_key, <<"5">>}]),
- Expect = {ok, [
- {meta, [{total, 11}, {offset, 3}]},
- mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>),
- mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
- mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_range_rev(Db) ->
- Result = run_query(Db, [
- {direction, rev},
- {start_key, <<"5">>}, {end_key, <<"3">>},
- {inclusive_end, true}
- ]),
- Expect = {ok, [
- {meta, [{total, 11}, {offset, 5}]},
- mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
- mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
- mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_limit_and_skip(Db) ->
- Result = run_query(Db, [
- {start_key, <<"2">>},
- {limit, 3},
- {skip, 3}
- ]),
- Expect = {ok, [
- {meta, [{total, 11}, {offset, 5}]},
- mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
- mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>),
- mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_include_docs(Db) ->
- Result = run_query(Db, [
- {start_key, <<"8">>},
- {end_key, <<"8">>},
- {include_docs, true}
- ]),
- Doc = {[
- {<<"_id">>,<<"8">>},
- {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
- {<<"val">>, 8}
- ]},
- Val = {[{rev, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>}]},
- Expect = {ok, [
- {meta, [{total, 11}, {offset, 8}]},
- {row, [{id, <<"8">>}, {key, <<"8">>}, {value, Val}, {doc, Doc}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_empty_views(Db) ->
- Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>),
- Expect = {ok, [
- {meta, [{total, 0}, {offset, 0}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-
-mk_row(Id, Rev) ->
- {row, [{id, Id}, {key, Id}, {value, {[{rev, Rev}]}}]}.
-
-run_query(Db, Opts) ->
- couch_mrview:query_all_docs(Db, Opts).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl
deleted file mode 100644
index 5c8cb54b1..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl
+++ /dev/null
@@ -1,207 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_collation_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
--define(VALUES, [
- null,
- false,
- true,
-
- 1,
- 2,
- 3.0,
- 4,
-
- <<"a">>,
- <<"A">>,
- <<"aa">>,
- <<"b">>,
- <<"B">>,
- <<"ba">>,
- <<"bb">>,
-
- % U+200B is a zero-width space, which will be ignored by ICU but will cause
- % the raw collator to treat these as three distinct keys
- <<"c">>,
- unicode:characters_to_binary([$c, 16#200B]),
- unicode:characters_to_binary([$c, 16#200B, 16#200B]),
-
- [<<"a">>],
- [<<"b">>],
- [<<"b">>, <<"c">>],
- [<<"b">>, <<"c">>, <<"a">>],
- [<<"b">>, <<"d">>],
- [<<"b">>, <<"d">>, <<"e">>],
-
- {[{<<"a">>, 1}]},
- {[{<<"a">>, 2}]},
- {[{<<"b">>, 1}]},
- {[{<<"b">>, 2}]},
- {[{<<"b">>, 2}, {<<"a">>, 1}]},
- {[{<<"b">>, 2}, {<<"c">>, 2}]}
-]).
-
-
-setup() ->
- {ok, Db1} = couch_mrview_test_util:new_db(?tempdb(), map),
- Docs = [couch_mrview_test_util:ddoc(red) | make_docs()],
- {ok, Db2} = couch_mrview_test_util:save_docs(Db1, Docs),
- Db2.
-
-teardown(Db) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-
-collation_test_() ->
- {
- "Collation tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_collate_fwd/1,
- fun should_collate_rev/1,
- fun should_collate_range_/1,
- fun should_collate_with_inclusive_end_fwd/1,
- fun should_collate_with_inclusive_end_rev/1,
- fun should_collate_without_inclusive_end_fwd/1,
- fun should_collate_without_inclusive_end_rev/1,
- fun should_collate_with_endkey_docid/1,
- fun should_use_collator_for_reduce_grouping/1
- ]
- }
- }
- }.
-
-
-should_collate_fwd(Db) ->
- {ok, Results} = run_query(Db, []),
- Expect = [{meta, [{total, length(?VALUES)}, {offset, 0}]}] ++ rows(),
- ?_assertEquiv(Expect, Results).
-
-should_collate_rev(Db) ->
- {ok, Results} = run_query(Db, [{direction, rev}]),
- Expect = [{meta, [{total, length(?VALUES)}, {offset, 0}]}] ++ lists:reverse(rows()),
- ?_assertEquiv(Expect, Results).
-
-should_collate_range_(Db) ->
- Index = lists:zip(lists:seq(0, length(?VALUES)-1), ?VALUES),
- lists:map(fun(V) ->
- {ok, Results} = run_query(Db, [{start_key, V}, {end_key, V}]),
- Expect = [
- {meta, [{total, length(?VALUES)}, find_offset(Index, V)]} |
- find_matching_rows(Index, V)
- ],
- ?_assertEquiv(Expect, Results)
- end, ?VALUES).
-
-find_offset(Index, Value) ->
- [{Offset, _} | _] = lists:dropwhile(fun({_, V}) ->
- couch_ejson_compare:less(Value, V) =/= 0
- end, Index),
- {offset, Offset}.
-
-find_matching_rows(Index, Value) ->
- Matches = lists:filter(fun({_, V}) ->
- couch_ejson_compare:less(Value, V) =:= 0
- end, Index),
- lists:map(fun({Id, V}) ->
- {row, [{id, list_to_binary(integer_to_list(Id))}, {key, V}, {value, 0}]}
- end, Matches).
-
-should_collate_with_inclusive_end_fwd(Db) ->
- Opts = [{end_key, <<"b">>}, {inclusive_end, true}],
- {ok, Rows0} = run_query(Db, Opts),
- LastRow = lists:last(Rows0),
- Expect = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
- ?_assertEqual(Expect, LastRow).
-
-should_collate_with_inclusive_end_rev(Db) ->
- Opts = [{end_key, <<"b">>}, {inclusive_end, true}, {direction, rev}],
- {ok, Rows} = run_query(Db, Opts),
- LastRow = lists:last(Rows),
- Expect = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
- ?_assertEqual(Expect, LastRow).
-
-should_collate_without_inclusive_end_fwd(Db) ->
- Opts = [{end_key, <<"b">>}, {inclusive_end, false}],
- {ok, Rows0} = run_query(Db, Opts),
- LastRow = lists:last(Rows0),
- Expect = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]},
- ?_assertEqual(Expect, LastRow).
-
-should_collate_without_inclusive_end_rev(Db) ->
- Opts = [{end_key, <<"b">>}, {inclusive_end, false}, {direction, rev}],
- {ok, Rows} = run_query(Db, Opts),
- LastRow = lists:last(Rows),
- Expect = {row, [{id,<<"11">>}, {key,<<"B">>}, {value,0}]},
- ?_assertEqual(Expect, LastRow).
-
-should_collate_with_endkey_docid(Db) ->
- ?_test(begin
- {ok, Rows0} = run_query(Db, [
- {end_key, <<"b">>}, {end_key_docid, <<"10">>},
- {inclusive_end, false}
- ]),
- Result0 = lists:last(Rows0),
- Expect0 = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]},
- ?assertEqual(Expect0, Result0),
-
- {ok, Rows1} = run_query(Db, [
- {end_key, <<"b">>}, {end_key_docid, <<"11">>},
- {inclusive_end, false}
- ]),
- Result1 = lists:last(Rows1),
- Expect1 = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
- ?assertEqual(Expect1, Result1)
- end).
-
-should_use_collator_for_reduce_grouping(Db) ->
- UniqueKeys = lists:usort(fun(A, B) ->
- not couch_ejson_compare:less_json(B, A)
- end, ?VALUES),
- {ok, [{meta,_} | Rows]} = reduce_query(Db, [{group_level, exact}]),
- ?_assertEqual(length(UniqueKeys), length(Rows)).
-
-make_docs() ->
- {Docs, _} = lists:foldl(fun(V, {Docs0, Count}) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, list_to_binary(integer_to_list(Count))},
- {<<"foo">>, V}
- ]}),
- {[Doc | Docs0], Count+1}
- end, {[], 0}, ?VALUES),
- Docs.
-
-rows() ->
- {Rows, _} = lists:foldl(fun(V, {Rows0, Count}) ->
- Id = list_to_binary(integer_to_list(Count)),
- Row = {row, [{id, Id}, {key, V}, {value, 0}]},
- {[Row | Rows0], Count+1}
- end, {[], 0}, ?VALUES),
- lists:reverse(Rows).
-
-run_query(Db, Opts) ->
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"zing">>, Opts).
-
-reduce_query(Db, Opts) ->
- couch_mrview:query_view(Db, <<"_design/red">>, <<"zing">>, Opts).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl
deleted file mode 100644
index 7664becdc..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl
+++ /dev/null
@@ -1,115 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_compact_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-
-setup() ->
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map, 1000),
- ok = meck:new(couch_mrview_compactor, [passthrough]),
- Db.
-
-teardown(Db) ->
- meck:unload(),
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-
-compaction_test_() ->
- {
- "Compaction tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_swap/1,
- fun should_remove/1
- ]
- }
- }
- }.
-
-
-should_swap(Db) ->
- ?_test(begin
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
- {ok, QPid} = start_query(Db),
- {ok, MonRef} = couch_mrview:compact(Db, <<"_design/bar">>, [monitor]),
- receive
- {'DOWN', MonRef, process, _, _} -> ok
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, ?LINE},
- {reason, "compaction failed"}]})
- end,
- QPid ! {self(), continue},
- receive
- {QPid, Count} ->
- ?assertEqual(1000, Count)
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, ?LINE},
- {reason, "query failed"}]})
- end
- end).
-
-
-should_remove(Db) ->
- ?_test(begin
- DDoc = <<"_design/bar">>,
- {ok, _Results} = couch_mrview:query_view(Db, DDoc, <<"baz">>),
- {ok, IndexPid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
- ok = couch_index:compact(IndexPid, []),
- {ok, CompactorPid} = couch_index:get_compactor_pid(IndexPid),
- {ok, CompactingPid} = couch_index_compactor:get_compacting_pid(CompactorPid),
- MonRef = erlang:monitor(process, CompactingPid),
- exit(CompactingPid, crash),
- receive
- {'DOWN', MonRef, process, _, crash} ->
- meck:wait(couch_mrview_compactor, remove_compacted, '_', 100),
- ?assertEqual(1, meck:num_calls(
- couch_mrview_compactor, remove_compacted, '_', IndexPid)),
- ?assert(is_process_alive(IndexPid)),
- ?assert(is_process_alive(CompactorPid))
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed, [
- {module, ?MODULE}, {line, ?LINE},
- {reason, "compaction didn't exit :/"}]})
- end
- end).
-
-
-start_query(Db) ->
- Self = self(),
- Pid = spawn(fun() ->
- CB = fun
- (_, wait) -> receive {Self, continue} -> {ok, 0} end;
- ({row, _}, Count) -> {ok, Count+1};
- (_, Count) -> {ok, Count}
- end,
- {ok, Result} =
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, [], CB, wait),
- Self ! {self(), Result}
- end),
- {ok, Pid}.
diff --git a/src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl
deleted file mode 100644
index 4310157eb..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl
+++ /dev/null
@@ -1,145 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_ddoc_updated_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-
-setup() ->
- Name = ?tempdb(),
- couch_server:delete(Name, [?ADMIN_CTX]),
- {ok, Db} = couch_db:create(Name, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/bar">>},
- {<<"views">>, {[
- {<<"baz">>, {[
- {<<"map">>, <<
- "function(doc) {\n"
- " emit(doc.val, doc.val);\n"
- "}"
- >>}
- ]}}
- ]}}
- ]}),
- [Doc1 | Docs999] = couch_mrview_test_util:make_docs(map, 100),
- {ok, _} = couch_db:update_docs(Db, [DDoc, Doc1], []),
- {ok, Db2} = couch_db:reopen(Db),
-
- % run a query with 1 doc to initialize couch_index process
- CB = fun
- ({row, _}, Count) -> {ok, Count+1};
- (_, Count) -> {ok, Count}
- end,
- {ok, _} =
- couch_mrview:query_view(Db2, <<"_design/bar">>, <<"baz">>, [], CB, 0),
-
- meck:new(couch_index_updater, [passthrough]),
- meck:expect(couch_index_updater, update, fun(Idx, Mod, IdxSt) ->
- timer:sleep(5000),
- meck:passthrough([Idx, Mod, IdxSt])
- end),
-
- % add more docs
- {ok, _} = couch_db:update_docs(Db2, Docs999, []),
- {ok, Db3} = couch_db:reopen(Db2),
- Db3.
-
-teardown(Db) ->
- meck:unload(couch_index_updater),
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-
-ddoc_update_test_() ->
- {
- "Check ddoc update actions",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun check_indexing_stops_on_ddoc_change/1
- ]
- }
- }
- }.
-
-
-check_indexing_stops_on_ddoc_change(Db) ->
- ?_test(begin
- DDocID = <<"_design/bar">>,
-
- IndexesBefore = get_indexes_by_ddoc(DDocID, 1),
- ?assertEqual(1, length(IndexesBefore)),
- AliveBefore = lists:filter(fun erlang:is_process_alive/1, IndexesBefore),
- ?assertEqual(1, length(AliveBefore)),
-
- {ok, DDoc} = couch_db:open_doc(Db, DDocID, [ejson_body, ?ADMIN_CTX]),
- DDocJson2 = couch_doc:from_json_obj({[
- {<<"_id">>, DDocID},
- {<<"_deleted">>, true},
- {<<"_rev">>, couch_doc:rev_to_str(DDoc#doc.revs)}
- ]}),
-
- % spawn a process for query
- Self = self(),
- QPid = spawn(fun() ->
- {ok, Result} = couch_mrview:query_view(
- Db, <<"_design/bar">>, <<"baz">>, []),
- Self ! {self(), Result}
- end),
-
- % while indexing for the query is in progress, delete DDoc
- {ok, _} = couch_db:update_doc(Db, DDocJson2, []),
- receive
- {QPid, Msg} ->
- ?assertEqual(Msg, ddoc_updated)
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed, [{module, ?MODULE}, {line, ?LINE},
- {reason, "test failed"}]})
- end,
-
- %% assert that previously running indexes are gone
- IndexesAfter = get_indexes_by_ddoc(DDocID, 0),
- ?assertEqual(0, length(IndexesAfter)),
- AliveAfter = lists:filter(fun erlang:is_process_alive/1, IndexesBefore),
- ?assertEqual(0, length(AliveAfter))
- end).
-
-
-get_indexes_by_ddoc(DDocID, N) ->
- Indexes = test_util:wait(fun() ->
- Indxs = ets:match_object(
- couchdb_indexes_by_db, {'$1', {DDocID, '$2'}}),
- case length(Indxs) == N of
- true ->
- Indxs;
- false ->
- wait
- end
- end),
- lists:foldl(fun({DbName, {_DDocID, Sig}}, Acc) ->
- case ets:lookup(couchdb_indexes_by_sig, {DbName, Sig}) of
- [{_, Pid}] -> [Pid|Acc];
- _ -> Acc
- end
- end, [], Indexes).
-
-
diff --git a/src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl
deleted file mode 100644
index ce2be8904..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl
+++ /dev/null
@@ -1,422 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_ddoc_validation_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(LIB, {[{<<"mylib">>, {[{<<"lib1">>, <<"x=42">>}]}}]}).
-
-setup() ->
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
- Db.
-
-teardown(Db) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-ddoc_validation_test_() ->
- {
- "ddoc validation tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_reject_invalid_js_map/1,
- fun should_reject_invalid_js_reduce/1,
- fun should_reject_invalid_builtin_reduce/1,
- fun should_reject_non_object_options/1,
- fun should_reject_non_object_filters/1,
- fun should_accept_obj_in_filters/1,
- fun should_reject_non_object_lists/1,
- fun should_accept_obj_in_lists/1,
- fun should_reject_non_object_shows/1,
- fun should_accept_obj_in_shows/1,
- fun should_reject_non_object_updates/1,
- fun should_accept_obj_in_updates/1,
- fun should_reject_non_object_views/1,
- fun should_reject_non_string_language/1,
- fun should_reject_non_string_validate_doc_update/1,
- fun should_accept_string_rewrites/1,
- fun should_reject_bad_rewrites/1,
- fun should_accept_option/1,
- fun should_accept_any_option/1,
- fun should_accept_filter/1,
- fun should_reject_non_string_or_obj_filter_function/1,
- fun should_accept_list/1,
- fun should_reject_non_string_or_obj_list_function/1,
- fun should_accept_show/1,
- fun should_reject_non_string_or_obj_show_function/1,
- fun should_accept_update/1,
- fun should_reject_non_string_or_obj_update_function/1,
- fun should_accept_view/1,
- fun should_accept_view_with_reduce/1,
- fun should_accept_view_with_lib/1,
- fun should_reject_view_that_is_not_an_object/1,
- fun should_reject_view_without_map_function/1,
- fun should_reject_view_with_non_string_map_function/1,
- fun should_reject_view_with_non_string_reduce_function/1,
- fun should_accept_any_in_lib/1,
- fun should_accept_map_object_for_queries/1,
- fun should_reject_map_non_objects_for_queries/1
- ]
- }
- }
- }.
-
-should_reject_invalid_js_map(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_invalid_js_map">>},
- {<<"views">>, {[
- {<<"foo">>, {[
- {<<"map">>, <<"function(doc) }{">>}
- ]}}
- ]}}
- ]}),
- ?_assertThrow(
- {bad_request, compilation_error, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_reject_invalid_js_reduce(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_invalid_js_reduce">>},
- {<<"views">>, {[
- {<<"foo">>, {[
- {<<"map">>, <<"function(doc) { emit(null); }">>},
- {<<"reduce">>, <<"function(k, v, r) }{}">>}
- ]}}
- ]}}
- ]}),
- ?_assertThrow(
- {bad_request, compilation_error, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_reject_invalid_builtin_reduce(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_invalid_builtin_reduce">>},
- {<<"views">>, {[
- {<<"foo">>, {[
- {<<"map">>, <<"function(doc) { emit(null); }">>},
- {<<"reduce">>, <<"_foobar">>}
- ]}}
- ]}}
- ]}),
- ?_assertThrow(
- {bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_object_options(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_object_options">>},
- {<<"options">>, <<"invalid">>}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_object_filters(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_object_filters">>},
- {<<"filters">>, <<"invalid">>}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_accept_obj_in_filters(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_obj_in_filters">>},
- {<<"filters">>, ?LIB}
- ]}),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_object_lists(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_object_lists">>},
- {<<"lists">>, <<"invalid">>}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_object_shows(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_object_shows">>},
- {<<"shows">>, <<"invalid">>}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_accept_obj_in_shows(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_obj_in_shows">>},
- {<<"shows">>, ?LIB}
- ]}),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_object_updates(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_object_updates">>},
- {<<"updates">>, <<"invalid">>}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_accept_obj_in_updates(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_obj_in_updates">>},
- {<<"updates">>, ?LIB}
- ]}),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_object_views(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_object_views">>},
- {<<"views">>, <<"invalid">>}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_string_language(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_string_language">>},
- {<<"language">>, 1}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_string_validate_doc_update(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_string_vdu">>},
- {<<"validate_doc_update">>, 1}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_accept_string_rewrites(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_array_rewrites">>},
- {<<"rewrites">>, <<"function(req){}">>}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_bad_rewrites(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_array_rewrites">>},
- {<<"rewrites">>, 42}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_accept_option(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_options">>},
- {<<"options">>, {[ {<<"option1">>, <<"function(doc,req){}">>} ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
-
-should_accept_any_option(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_any_option">>},
- {<<"options">>, {[ {<<"option1">>, true} ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
-
-should_accept_filter(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_filters">>},
- {<<"filters">>, {[ {<<"filter1">>, <<"function(doc,req){}">>} ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_string_or_obj_filter_function(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_string_or_obj_filter_function">>},
- {<<"filters">>, {[ {<<"filter1">>, 1} ]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_accept_list(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_lists">>},
- {<<"lists">>, {[ {<<"list1">>, <<"function(doc,req){}">>} ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_string_or_obj_list_function(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_string_or_obj_list_function">>},
- {<<"lists">>, {[ {<<"list1">>, 1} ]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_accept_obj_in_lists(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_obj_in_lists">>},
- {<<"lists">>, ?LIB}
- ]}),
- ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
-
-should_accept_show(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_shows">>},
- {<<"shows">>, {[ {<<"show1">>, <<"function(doc,req){}">>} ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_string_or_obj_show_function(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_string_or_obj_show_function">>},
- {<<"shows">>, {[ {<<"show1">>, 1} ]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_accept_update(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_updates">>},
- {<<"updates">>, {[ {<<"update1">>, <<"function(doc,req){}">>} ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_non_string_or_obj_update_function(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_string_or_obj_update_function">>},
- {<<"updates">>, {[ {<<"update1">>, 1} ]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_accept_view(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_view">>},
- {<<"views">>, {[
- {<<"view1">>, {[{<<"map">>, <<"function(d){}">>}]}}
- ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
-
-should_accept_view_with_reduce(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_view_with_reduce">>},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>, <<"function(d){}">>},
- {<<"reduce">>,<<"function(d){}">>}
- ]}}
- ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
-
-should_accept_view_with_lib(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_view_with_lib">>},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>, <<"function(d){}">>}
- ]}},
- {<<"lib">>, {[
- {<<"lib1">>, <<"x=42">>}
- ]}}
- ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
-
-should_reject_view_that_is_not_an_object(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_object_view">>},
- {<<"views">>, {[{<<"view1">>, <<"thisisbad">>}]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_reject_view_without_map_function(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_view_without_map">>},
- {<<"views">>, {[
- {<<"view1">>, {[]}}
- ]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-
-should_reject_view_with_non_string_map_function(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_view_with_nonstr_map">>},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>,{[]}}
- ]}}
- ]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_reject_view_with_non_string_reduce_function(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_view_with_nonstr_reduce">>},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>,<<"function(d){}">>},
- {<<"reduce">>,1}
- ]}}
- ]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
-should_accept_any_in_lib(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_any_in_lib">>},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>, <<"function(d){}">>}
- ]}},
- {<<"lib">>, {[{<<"lib1">>, {[]}}]}}
- ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
-
-
-should_accept_map_object_for_queries(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_map_objects_for_queries">>},
- {<<"language">>, <<"query">>},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>, {[
- {<<"x">>, <<"y">>}
- ]}}
- ]}}
- ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
-
-
-should_reject_map_non_objects_for_queries(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_map_non_objects__with_nonstr_reduce">>},
- {<<"language">>, <<"query">>},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>, <<"function(d){}">>}
- ]}}
- ]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl
deleted file mode 100644
index aedd42865..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl
+++ /dev/null
@@ -1,136 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_design_docs_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-
-
-setup() ->
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), design),
- Db.
-
-teardown(Db) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-
-design_docs_test_() ->
- {
- "_design_docs view tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_query/1,
- fun should_query_with_range/1,
- fun should_query_with_range_rev/1,
- fun should_query_with_limit_and_skip/1,
- fun should_query_with_include_docs/1
- ]
- }
- }
- }.
-
-
-should_query(Db) ->
- Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 10}]},
- mk_row(<<"_design/bar01">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar02">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar06">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar07">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar08">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar09">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar10">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_range(Db) ->
- Result = run_query(Db, [
- {start_key, <<"_design/bar03">>},
- {end_key, <<"_design/bar05">>}
- ]),
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 12}]},
- mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_range_rev(Db) ->
- Result = run_query(Db, [
- {direction, rev},
- {start_key, <<"_design/bar05">>}, {end_key, <<"_design/bar03">>},
- {inclusive_end, true}
- ]),
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 5}]},
- mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_limit_and_skip(Db) ->
- Result = run_query(Db, [
- {start_key, <<"_design/bar02">>},
- {limit, 3},
- {skip, 3}
- ]),
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 14}]},
- mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar06">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar07">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_include_docs(Db) ->
- Result = run_query(Db, [
- {start_key, <<"_design/bar08">>},
- {end_key, <<"_design/bar08">>},
- {include_docs, true}
- ]),
- Doc = {[
- {<<"_id">>,<<"_design/bar08">>},
- {<<"_rev">>,<<"1-0b24e44a44af45e51e562fd124ce3007">>},
- {<<"views">>,{[]}}
- ]},
- Val = {[{rev, <<"1-0b24e44a44af45e51e562fd124ce3007">>}]},
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 17}]},
- {row, [{id, <<"_design/bar08">>}, {key, <<"_design/bar08">>},
- {value, Val}, {doc, Doc}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-
-mk_row(Id, Rev) ->
- {row, [{id, Id}, {key, Id}, {value, {[{rev, Rev}]}}]}.
-
-run_query(Db, Opts0) ->
- Opts = [{extra, [{namespace, <<"_design">>}]} | Opts0],
- couch_mrview:query_all_docs(Db, Opts).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_http_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_http_tests.erl
deleted file mode 100644
index bd11c7ad8..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_http_tests.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_http_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-
-mrview_http_test_() ->
- [
- ?_assertEqual(#mrargs{group_level=undefined, group=true},
- couch_mrview_http:parse_params([{"group", "true"}],
- undefined, #mrargs{})),
-
- ?_assertEqual(#mrargs{group_level=1, group=undefined},
- couch_mrview_http:parse_params([{"group_level", "1"}],
- undefined, #mrargs{}))
- ].
diff --git a/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl
deleted file mode 100644
index c4c765feb..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl
+++ /dev/null
@@ -1,111 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_index_info_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-
-setup() ->
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
- {ok, Info} = couch_mrview:get_info(Db, <<"_design/bar">>),
- {Db, Info}.
-
-
-teardown({Db, _}) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-
-view_info_test_() ->
- {
- "Views index tests",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun sig_is_binary/1,
- fun language_is_js/1,
- fun file_size_is_non_neg_int/1,
- fun active_size_is_non_neg_int/1,
- fun external_size_is_non_neg_int/1,
- fun active_size_less_than_file_size/1,
- fun update_seq_is_non_neg_int/1,
- fun purge_seq_is_non_neg_int/1,
- fun update_opts_is_bin_list/1
- ]
- }
- }
- }.
-
-
-sig_is_binary({_, Info}) ->
- ?_assert(is_binary(prop(signature, Info))).
-
-
-language_is_js({_, Info}) ->
- ?_assertEqual(<<"javascript">>, prop(language, Info)).
-
-
-file_size_is_non_neg_int({_, Info}) ->
- ?_assert(check_non_neg_int([sizes, file], Info)).
-
-
-active_size_is_non_neg_int({_, Info}) ->
- ?_assert(check_non_neg_int([sizes, active], Info)).
-
-
-external_size_is_non_neg_int({_, Info}) ->
- ?_assert(check_non_neg_int([sizes, external], Info)).
-
-
-active_size_less_than_file_size({_, Info}) ->
- ?_assert(prop([sizes, active], Info) < prop([sizes, file], Info)).
-
-
-update_seq_is_non_neg_int({_, Info}) ->
- ?_assert(check_non_neg_int(update_seq, Info)).
-
-
-purge_seq_is_non_neg_int({_, Info}) ->
- ?_assert(check_non_neg_int(purge_seq, Info)).
-
-
-update_opts_is_bin_list({_, Info}) ->
- Opts = prop(update_options, Info),
- ?_assert(is_list(Opts) andalso
- (Opts == [] orelse lists:all([is_binary(B) || B <- Opts]))).
-
-
-check_non_neg_int(Key, Info) ->
- Size = prop(Key, Info),
- is_integer(Size) andalso Size >= 0.
-
-
-prop(Key, {Props}) when is_list(Props) ->
- prop(Key, Props);
-prop([Key], Info) ->
- prop(Key, Info);
-prop([Key | Rest], Info) ->
- prop(Rest, prop(Key, Info));
-prop(Key, Info) when is_atom(Key), is_list(Info) ->
- couch_util:get_value(Key, Info).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl
deleted file mode 100644
index b0d25469a..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl
+++ /dev/null
@@ -1,148 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_local_docs_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-
-
-setup() ->
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), local),
- Db.
-
-teardown(Db) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-
-all_docs_test_() ->
- {
- "_local_docs view tests",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_query/1,
- fun should_query_with_range/1,
- fun should_query_with_range_rev/1,
- fun should_query_with_limit_and_skip/1,
- fun should_query_with_include_docs/1,
- fun should_query_with_update_seq/1
- ]
- }
- }
- }.
-
-
-should_query(Db) ->
- Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, null}, {offset, null}]},
- mk_row(1),
- mk_row(10),
- mk_row(2),
- mk_row(3),
- mk_row(4),
- mk_row(5),
- mk_row(6),
- mk_row(7),
- mk_row(8),
- mk_row(9)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_range(Db) ->
- Result = run_query(Db, [
- {start_key, <<"_local/3">>},
- {end_key, <<"_local/5">>}
- ]),
- Expect = {ok, [
- {meta, [{total, null}, {offset, null}]},
- mk_row(3),
- mk_row(4),
- mk_row(5)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_range_rev(Db) ->
- Result = run_query(Db, [
- {direction, rev},
- {start_key, <<"_local/5">>}, {end_key, <<"_local/3">>},
- {inclusive_end, true}
- ]),
- Expect = {ok, [
- {meta, [{total, null}, {offset, null}]},
- mk_row(5),
- mk_row(4),
- mk_row(3)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_limit_and_skip(Db) ->
- Result = run_query(Db, [
- {start_key, <<"_local/2">>},
- {limit, 3},
- {skip, 3}
- ]),
- Expect = {ok, [
- {meta, [{total, null}, {offset, null}]},
- mk_row(5),
- mk_row(6),
- mk_row(7)
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_include_docs(Db) ->
- Result = run_query(Db, [
- {start_key, <<"_local/8">>},
- {end_key, <<"_local/8">>},
- {include_docs, true}
- ]),
- {row, Doc0} = mk_row(8),
- Doc = Doc0 ++ [{doc, {[
- {<<"_id">>, <<"_local/8">>},
- {<<"_rev">>, <<"0-1">>},
- {<<"val">>, 8}
- ]}}],
- Expect = {ok, [
- {meta, [{total, null}, {offset, null}]},
- {row, Doc}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_query_with_update_seq(Db) ->
- Result = run_query(Db, [
- {start_key, <<"_local/2">>},
- {limit, 1},
- {update_seq, true}
- ]),
- Expect = {ok, [
- {meta, [{total, null}, {offset, null}, {update_seq, null}]},
- mk_row(2)
- ]},
- ?_assertEqual(Expect, Result).
-
-mk_row(IntId) ->
- Id = list_to_binary(io_lib:format("_local/~b", [IntId])),
- {row, [{id, Id}, {key, Id}, {value, {[{rev, <<"0-1">>}]}}]}.
-
-run_query(Db, Opts0) ->
- Opts = [{extra, [{namespace, <<"_local">>}]} | Opts0],
- couch_mrview:query_all_docs(Db, Opts).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl
deleted file mode 100644
index 805dc6c74..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl
+++ /dev/null
@@ -1,144 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_map_views_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-
-setup() ->
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
- Db.
-
-teardown(Db) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-
-map_views_test_() ->
- {
- "Map views",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_map/1,
- fun should_map_with_range/1,
- fun should_map_with_limit_and_skip/1,
- fun should_map_with_include_docs/1,
- fun should_map_empty_views/1,
- fun should_give_ext_size_seq_indexed_test/1
- ]
- }
- }
- }.
-
-
-should_map(Db) ->
- Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
- {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
- {row, [{id, <<"7">>}, {key, 7}, {value, 7}]},
- {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
- {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
- {row, [{id, <<"10">>}, {key, 10}, {value, 10}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_map_with_range(Db) ->
- Result = run_query(Db, [
- {direction, rev},
- {start_key, 5}, {end_key, 3},
- {inclusive_end, true}
- ]),
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 5}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_map_with_limit_and_skip(Db) ->
- Result = run_query(Db, [
- {start_key, 2},
- {limit, 3},
- {skip, 3}
- ]),
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
- {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
- {row, [{id, <<"7">>}, {key, 7}, {value, 7}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_map_with_include_docs(Db) ->
- Result = run_query(Db, [
- {start_key, 8},
- {end_key, 8},
- {include_docs, true}
- ]),
- Doc = {[
- {<<"_id">>,<<"8">>},
- {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
- {<<"val">>,8}
- ]},
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 7}]},
- {row, [{id, <<"8">>}, {key, 8}, {value, 8}, {doc, Doc}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_map_empty_views(Db) ->
- Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>),
- Expect = {ok, [
- {meta, [{total, 0}, {offset, 0}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_give_ext_size_seq_indexed_test(Db) ->
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/seqdoc">>},
- {<<"options">>, {[{<<"seq_indexed">>, true}]}},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>, <<"function(doc){emit(doc._id, doc._id);}">>}
- ]}}
- ]}
- }
- ]}),
- {ok, _} = couch_db:update_doc(Db, DDoc, []),
- {ok, Db1} = couch_db:open_int(couch_db:name(Db), []),
- {ok, DDoc1} = couch_db:open_doc(Db1, <<"_design/seqdoc">>, [ejson_body]),
- couch_mrview:query_view(Db1, DDoc1, <<"view1">>, [{update, true}]),
- {ok, Info} = couch_mrview:get_info(Db1, DDoc),
- Size = couch_util:get_nested_json_value({Info}, [sizes, external]),
- ok = couch_db:close(Db1),
- ?_assert(is_number(Size)).
-
-
-run_query(Db, Opts) ->
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, Opts).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl
deleted file mode 100644
index b2969bba0..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl
+++ /dev/null
@@ -1,286 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_purge_docs_fabric_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--define(TIMEOUT, 60). % seconds
-
-
-setup_all() ->
- Ctx = test_util:start_couch([fabric, mem3]),
- meck:new(couch_mrview_index, [passthrough]),
- Ctx.
-
-
-teardown_all(Ctx) ->
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-
-setup() ->
- DbName = ?tempdb(),
- ok = fabric:create_db(DbName, [?ADMIN_CTX, {q, 1}]),
- meck:reset([couch_mrview_index]),
- meck:expect(couch_mrview_index, ensure_local_purge_docs, fun(A, B) ->
- meck:passthrough([A, B])
- end),
- DbName.
-
-
-teardown(DbName) ->
- ok = fabric:delete_db(DbName, [?ADMIN_CTX]).
-
-
-view_purge_fabric_test_() ->
- {
- "Map views",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun test_purge_verify_index/1,
- fun test_purge_hook_before_compaction/1
- ]
- }
- }
- }.
-
-
-test_purge_verify_index(DbName) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Docs1 = couch_mrview_test_util:make_docs(normal, 5),
- {ok, _} = fabric:update_docs(DbName, Docs1, [?ADMIN_CTX]),
- {ok, _} = fabric:update_doc(
- DbName,
- couch_mrview_test_util:ddoc(map),
- [?ADMIN_CTX]
- ),
-
- Result1 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
- Expect1 = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect1, Result1),
-
- {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName),
- ?assertEqual(0, couch_util:get_value(<<"purge_seq">>, Props1)),
- ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
- [ShardDbName | _Rest ] = ShardNames,
- ?assertEqual(true, couch_mrview_index:verify_index_exists(
- ShardDbName, Props1)),
-
- purge_docs(DbName, [<<"1">>]),
-
- Result2 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
- Expect2 = {ok, [
- {meta, [{total, 4}, {offset, 0}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect2, Result2),
-
- {ok, #doc{body = {Props2}}} = get_local_purge_doc(DbName),
- ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props2)),
- ?assertEqual(true, couch_mrview_index:verify_index_exists(
- ShardDbName, Props2))
- end)}.
-
-
-test_purge_hook_before_compaction(DbName) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Docs1 = couch_mrview_test_util:make_docs(normal, 5),
- {ok, _} = fabric:update_docs(DbName, Docs1, [?ADMIN_CTX]),
- {ok, _} = fabric:update_doc(
- DbName,
- couch_mrview_test_util:ddoc(map),
- [?ADMIN_CTX]
- ),
-
- Result1 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
- Expect1 = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect1, Result1),
-
- purge_docs(DbName, [<<"1">>]),
-
- Result2 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
- Expect2 = {ok, [
- {meta, [{total, 4}, {offset, 0}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect2, Result2),
-
- {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName),
- ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props1)),
-
- [ShardName | _] = local_shards(DbName),
- couch_util:with_db(ShardName, fun(Db) ->
- {ok, _} = couch_db:start_compact(Db)
- end),
- wait_compaction(ShardName, ?LINE),
-
- ?assertEqual(ok, meck:wait(1, couch_mrview_index,
- ensure_local_purge_docs, '_', 5000)
- ),
-
- % Make sure compaction didn't change the update seq
- {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName),
- ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props1)),
-
- purge_docs(DbName, [<<"2">>]),
-
- couch_util:with_db(ShardName, fun(Db) ->
- {ok, _} = couch_db:start_compact(Db)
- end),
- wait_compaction(ShardName, ?LINE),
-
- ?assertEqual(ok, meck:wait(2, couch_mrview_index,
- ensure_local_purge_docs, '_', 5000)
- ),
-
- % Make sure compaction after a purge didn't overwrite
- % the local purge doc for the index
- {ok, #doc{body = {Props2}}} = get_local_purge_doc(DbName),
- ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props2)),
-
- % Force another update to ensure that we update
- % the local doc appropriate after compaction
- Result3 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
- Expect3 = {ok, [
- {meta, [{total, 3}, {offset, 0}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect3, Result3),
-
- {ok, #doc{body = {Props3}}} = get_local_purge_doc(DbName),
- ?assertEqual(2, couch_util:get_value(<<"purge_seq">>, Props3)),
-
- % Check that if the local doc doesn't exist that one
- % is created for the index on compaction
- delete_local_purge_doc(DbName),
- ?assertMatch({not_found, _}, get_local_purge_doc(DbName)),
-
- couch_util:with_db(ShardName, fun(Db) ->
- {ok, _} = couch_db:start_compact(Db)
- end),
- wait_compaction(ShardName, ?LINE),
-
- ?assertEqual(ok, meck:wait(3, couch_mrview_index,
- ensure_local_purge_docs, '_', 5000)
- ),
-
- {ok, #doc{body = {Props4}}} = get_local_purge_doc(DbName),
- ?assertEqual(2, couch_util:get_value(<<"purge_seq">>, Props4))
- end)}.
-
-
-get_local_purge_doc(DbName) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/bar">>, []),
- {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
- Sig = IdxState#mrst.sig,
- HexSig = list_to_binary(couch_index_util:hexsig(Sig)),
- DocId = couch_mrview_util:get_local_purge_doc_id(HexSig),
- [ShardName | _] = local_shards(DbName),
- couch_util:with_db(ShardName, fun(Db) ->
- couch_db:open_doc(Db, DocId, [])
- end).
-
-
-delete_local_purge_doc(DbName) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/bar">>, []),
- {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
- Sig = IdxState#mrst.sig,
- HexSig = list_to_binary(couch_index_util:hexsig(Sig)),
- DocId = couch_mrview_util:get_local_purge_doc_id(HexSig),
- NewDoc = #doc{id = DocId, deleted = true},
- [ShardName | _] = local_shards(DbName),
- couch_util:with_db(ShardName, fun(Db) ->
- {ok, _} = couch_db:update_doc(Db, NewDoc, [])
- end).
-
-
-get_rev(#full_doc_info{} = FDI) ->
- #doc_info{
- revs = [#rev_info{} = PrevRev | _]
- } = couch_doc:to_doc_info(FDI),
- PrevRev#rev_info.rev.
-
-
-purge_docs(DbName, DocIds) ->
- lists:foreach(fun(DocId) ->
- FDI = fabric:get_full_doc_info(DbName, DocId, []),
- Rev = get_rev(FDI),
- {ok, [{ok, _}]} = fabric:purge_docs(DbName, [{DocId, [Rev]}], [])
- end, DocIds).
-
-
-wait_compaction(DbName, Line) ->
- WaitFun = fun() ->
- case is_compaction_running(DbName) of
- true -> wait;
- false -> ok
- end
- end,
- case test_util:wait(WaitFun, 10000) of
- timeout ->
- erlang:error({assertion_failed, [
- {module, ?MODULE},
- {line, Line},
- {reason, "Timeout waiting for database compaction"}
- ]});
- _ ->
- ok
- end.
-
-
-is_compaction_running(DbName) ->
- {ok, DbInfo} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:get_db_info(Db)
- end),
- couch_util:get_value(compact_running, DbInfo).
-
-
-local_shards(DbName) ->
- try
- [ShardName || #shard{name = ShardName} <- mem3:local_shards(DbName)]
- catch
- error:database_does_not_exist ->
- []
- end.
diff --git a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl
deleted file mode 100644
index 62e1410cb..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl
+++ /dev/null
@@ -1,575 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_purge_docs_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--define(TIMEOUT, 1000).
-
-
-setup() ->
- meck:new(couch_index_updater, [passthrough]),
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map, 5),
- Db.
-
-teardown(Db) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- meck:unload(),
- ok.
-
-view_purge_test_() ->
- {
- "Map views",
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun test_purge_single/1,
- fun test_purge_partial/1,
- fun test_purge_complete/1,
- fun test_purge_nochange/1,
- fun test_purge_index_reset/1,
- fun test_purge_compact_size_check/1,
- fun test_purge_single_for_docid_with_list/1,
- fun test_purge_complete_for_docid_with_list/1,
- fun test_purge_compact_for_stale_purge_cp_without_client/1,
- fun test_purge_compact_for_stale_purge_cp_with_client/1
- ]
- }
- }
- }.
-
-
-test_purge_single(Db) ->
- ?_test(begin
- Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect, Result),
-
- FDI = couch_db:get_full_doc_info(Db, <<"1">>),
- Rev = get_rev(FDI),
- {ok, [{ok, _PRevs}]} = couch_db:purge_docs(
- Db,
- [{<<"UUID1">>, <<"1">>, [Rev]}]
- ),
- {ok, Db2} = couch_db:reopen(Db),
-
- Result2 = run_query(Db2, []),
- Expect2 = {ok, [
- {meta, [{total, 4}, {offset, 0}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect2, Result2)
- end).
-
-
-test_purge_single_for_docid_with_list(Db) ->
- ?_test(begin
- Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect, Result),
-
- FDI = couch_db:get_full_doc_info(Db, <<"1">>),
- Rev = get_rev(FDI),
- {ok, [{ok, _PRevs}]} = couch_db:purge_docs(
- Db,
- [{<<"UUID1">>, "1", [Rev]}]
- ),
- {ok, Db2} = couch_db:reopen(Db),
-
- Result2 = run_query(Db2, []),
- Expect2 = {ok, [
- {meta, [{total, 4}, {offset, 0}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect2, Result2)
- end).
-
-test_purge_partial(Db) ->
- ?_test(begin
- Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect, Result),
-
- FDI1 = couch_db:get_full_doc_info(Db, <<"1">>), Rev1 = get_rev(FDI1),
- Update = {[
- {'_id', <<"1">>},
- {'_rev', couch_doc:rev_to_str({1, [couch_hash:md5_hash(<<"1.2">>)]})},
- {'val', 1.2}
- ]},
- {ok, [_Rev2]} = save_docs(Db, [Update], [replicated_changes]),
-
- PurgeInfos = [{<<"UUID1">>, <<"1">>, [Rev1]}],
-
- {ok, _} = couch_db:purge_docs(Db, PurgeInfos),
- {ok, Db2} = couch_db:reopen(Db),
-
- Result2 = run_query(Db2, []),
- Expect2 = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1.2}, {value, 1.2}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect2, Result2)
- end).
-
-
-test_purge_complete(Db) ->
- ?_test(begin
- Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect, Result),
-
- FDI1 = couch_db:get_full_doc_info(Db, <<"1">>), Rev1 = get_rev(FDI1),
- FDI2 = couch_db:get_full_doc_info(Db, <<"2">>), Rev2 = get_rev(FDI2),
- FDI5 = couch_db:get_full_doc_info(Db, <<"5">>), Rev5 = get_rev(FDI5),
-
- PurgeInfos = [
- {<<"UUID1">>, <<"1">>, [Rev1]},
- {<<"UUID2">>, <<"2">>, [Rev2]},
- {<<"UUID5">>, <<"5">>, [Rev5]}
- ],
- {ok, _} = couch_db:purge_docs(Db, PurgeInfos),
- {ok, Db2} = couch_db:reopen(Db),
-
- Result2 = run_query(Db2, []),
- Expect2 = {ok, [
- {meta, [{total, 2}, {offset, 0}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}
- ]},
- ?assertEqual(Expect2, Result2)
- end).
-
-
-test_purge_complete_for_docid_with_list(Db) ->
- ?_test(begin
- Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect, Result),
-
- FDI1 = couch_db:get_full_doc_info(Db, <<"1">>), Rev1 = get_rev(FDI1),
- FDI2 = couch_db:get_full_doc_info(Db, <<"2">>), Rev2 = get_rev(FDI2),
- FDI5 = couch_db:get_full_doc_info(Db, <<"5">>), Rev5 = get_rev(FDI5),
-
- PurgeInfos = [
- {<<"UUID1">>, "1", [Rev1]},
- {<<"UUID2">>, "2", [Rev2]},
- {<<"UUID5">>, "5", [Rev5]}
- ],
- {ok, _} = couch_db:purge_docs(Db, PurgeInfos),
- {ok, Db2} = couch_db:reopen(Db),
-
- Result2 = run_query(Db2, []),
- Expect2 = {ok, [
- {meta, [{total, 2}, {offset, 0}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}
- ]},
- ?assertEqual(Expect2, Result2)
- end).
-
-
-test_purge_nochange(Db) ->
- ?_test(begin
- Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect, Result),
-
- FDI1 = couch_db:get_full_doc_info(Db, <<"1">>),
- Rev1 = get_rev(FDI1),
-
- PurgeInfos = [
- {<<"UUID1">>, <<"6">>, [Rev1]}
- ],
- {ok, _} = couch_db:purge_docs(Db, PurgeInfos),
- {ok, Db2} = couch_db:reopen(Db),
-
- Result2 = run_query(Db2, []),
- Expect2 = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect2, Result2)
- end).
-
-
-test_purge_index_reset(Db) ->
- ?_test(begin
- ok = couch_db:set_purge_infos_limit(Db, 2),
- {ok, Db1} = couch_db:reopen(Db),
-
- Result = run_query(Db1, []),
- Expect = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect, Result),
-
- PurgeInfos = lists:map(fun(I) ->
- DocId = list_to_binary(integer_to_list(I)),
- FDI = couch_db:get_full_doc_info(Db, DocId),
- Rev = get_rev(FDI),
- {couch_uuids:random(), DocId, [Rev]}
- end, lists:seq(1, 5)),
- {ok, _} = couch_db:purge_docs(Db1, PurgeInfos),
-
- {ok, Db2} = couch_db:reopen(Db1),
-
- % Forcibly set the purge doc to a newer purge
- % sequence to force an index reset. This should
- % never happen in real life but the reset
- % is required for correctness.
- {ok, #doc{body = {OldProps}} = LocalDoc} = get_local_purge_doc(Db2),
- NewPurgeSeq = {<<"purge_seq">>, 5},
- NewProps = lists:keyreplace(<<"purge_seq">>, 1, OldProps, NewPurgeSeq),
- RewindDoc = LocalDoc#doc{body = {NewProps}},
- {ok, _} = couch_db:update_doc(Db2, RewindDoc, []),
-
- % Compact the database to remove purge infos
- {ok, _} = couch_db:start_compact(Db2),
- wait_compaction(couch_db:name(Db), "database", ?LINE),
-
- {ok, Db3} = couch_db:reopen(Db2),
- Result2 = run_query(Db3, []),
- Expect2 = {ok, [
- {meta, [{total, 0}, {offset, 0}]}
- ]},
- ?assertEqual(Expect2, Result2),
-
- % Assert that we had a reset
- meck:wait(
- 1,
- couch_index_updater,
- handle_info,
- [{'EXIT', '_', {reset, '_'}}, '_'],
- 5000
- )
- end).
-
-
-test_purge_compact_size_check(Db) ->
- ?_test(begin
- DbName = couch_db:name(Db),
- Docs = couch_mrview_test_util:make_docs(normal, 6, 200),
- {ok, Db1} = couch_mrview_test_util:save_docs(Db, Docs),
- _Result = run_query(Db1, []),
- DiskSizeBefore = db_disk_size(DbName),
-
- PurgedDocsNum = 150,
- IdsRevs = lists:foldl(fun(Id, CIdRevs) ->
- Id1 = docid(Id),
- FDI1 = couch_db:get_full_doc_info(Db1, Id1),
- Rev1 = get_rev(FDI1),
- UUID1 = uuid(Id),
- [{UUID1, Id1, [Rev1]} | CIdRevs]
- end, [], lists:seq(1, PurgedDocsNum)),
- {ok, _} = couch_db:purge_docs(Db1, IdsRevs),
-
- {ok, Db2} = couch_db:reopen(Db1),
- _Result1 = run_query(Db2, []),
- {ok, PurgedIdRevs} = couch_db:fold_purge_infos(
- Db2,
- 0,
- fun fold_fun/2,
- [],
- []
- ),
- ?assertEqual(PurgedDocsNum, length(PurgedIdRevs)),
- config:set("couchdb", "file_compression", "snappy", false),
-
- {ok, Db3} = couch_db:open_int(DbName, []),
- {ok, _CompactPid} = couch_db:start_compact(Db3),
- wait_compaction(DbName, "database", ?LINE),
- ok = couch_db:close(Db3),
- DiskSizeAfter = db_disk_size(DbName),
- ?assert(DiskSizeBefore > DiskSizeAfter)
- end).
-
-
-test_purge_compact_for_stale_purge_cp_without_client(Db) ->
- ?_test(begin
- DbName = couch_db:name(Db),
- % add more documents to database for purge
- Docs = couch_mrview_test_util:make_docs(normal, 6, 200),
- {ok, Db1} = couch_mrview_test_util:save_docs(Db, Docs),
-
- % change PurgedDocsLimit to 10 from 1000 to
- % avoid timeout of eunit test
- PurgedDocsLimit = 10,
- couch_db:set_purge_infos_limit(Db1, PurgedDocsLimit),
-
- % purge 150 documents
- PurgedDocsNum = 150,
- PurgeInfos = lists:foldl(fun(Id, CIdRevs) ->
- Id1 = docid(Id),
- FDI1 = couch_db:get_full_doc_info(Db1, Id1),
- Rev1 = get_rev(FDI1),
- UUID1 = uuid(Id),
- [{UUID1, Id1, [Rev1]} | CIdRevs]
- end, [], lists:seq(1, PurgedDocsNum)),
- {ok, _} = couch_db:purge_docs(Db1, PurgeInfos),
-
- {ok, Db2} = couch_db:reopen(Db1),
- {ok, PurgedIdRevs} = couch_db:fold_purge_infos(
- Db2,
- 0,
- fun fold_fun/2,
- [],
- []
- ),
- ?assertEqual(PurgedDocsNum, length(PurgedIdRevs)),
-
- % run compaction to trigger pruning of purge tree
- {ok, Db3} = couch_db:open_int(DbName, []),
- {ok, _CompactPid} = couch_db:start_compact(Db3),
- wait_compaction(DbName, "database", ?LINE),
- ok = couch_db:close(Db3),
-
- % check the remaining purge requests in purge tree
- {ok, Db4} = couch_db:reopen(Db3),
- OldestPSeq = couch_db:get_oldest_purge_seq(Db4),
- {ok, PurgedIdRevs2} = couch_db:fold_purge_infos(
- Db4,
- OldestPSeq - 1,
- fun fold_fun/2,
- [],
- []
- ),
- ?assertEqual(PurgedDocsLimit, length(PurgedIdRevs2))
- end).
-
-
-test_purge_compact_for_stale_purge_cp_with_client(Db) ->
- ?_test(begin
- DbName = couch_db:name(Db),
- % add more documents to database for purge
- Docs = couch_mrview_test_util:make_docs(normal, 6, 200),
- {ok, Db1} = couch_mrview_test_util:save_docs(Db, Docs),
-
- % change PurgedDocsLimit to 10 from 1000 to
- % avoid timeout of eunit test
- PurgedDocsLimit = 10,
- couch_db:set_purge_infos_limit(Db1, PurgedDocsLimit),
- _Result = run_query(Db1, []),
-
- % first purge 30 documents
- PurgedDocsNum1 = 30,
- IdsRevs = lists:foldl(fun(Id, CIdRevs) ->
- Id1 = docid(Id),
- FDI1 = couch_db:get_full_doc_info(Db1, Id1),
- Rev1 = get_rev(FDI1),
- UUID1 = uuid(Id),
- [{UUID1, Id1, [Rev1]} | CIdRevs]
- end, [], lists:seq(1, PurgedDocsNum1)),
- {ok, _} = couch_db:purge_docs(Db1, IdsRevs),
-
- {ok, Db2} = couch_db:reopen(Db1),
- % run query again to reflect purge request to mrview
- _Result1 = run_query(Db2, []),
- {ok, PurgedIdRevs} = couch_db:fold_purge_infos(
- Db2,
- 0,
- fun fold_fun/2,
- [],
- []
- ),
- ?assertEqual(PurgedDocsNum1, length(PurgedIdRevs)),
-
- % then purge 120 documents
- PurgedDocsNum2 = 150,
- IdsRevs2 = lists:foldl(fun(Id, CIdRevs) ->
- Id1 = docid(Id),
- FDI1 = couch_db:get_full_doc_info(Db1, Id1),
- Rev1 = get_rev(FDI1),
- UUID1 = uuid(Id),
- [{UUID1, Id1, [Rev1]} | CIdRevs]
- end, [], lists:seq(PurgedDocsNum1 + 1, PurgedDocsNum2)),
- {ok, _} = couch_db:purge_docs(Db2, IdsRevs2),
-
- % run compaction to trigger pruning of purge tree
- % only the first 30 purge requests are pruned
- {ok, Db3} = couch_db:open_int(DbName, []),
- {ok, _CompactPid} = couch_db:start_compact(Db3),
- wait_compaction(DbName, "database", ?LINE),
- ok = couch_db:close(Db3),
-
- % check the remaining purge requests in purge tree
- {ok, Db4} = couch_db:reopen(Db3),
- OldestPSeq = couch_db:get_oldest_purge_seq(Db4),
- {ok, PurgedIdRevs2} = couch_db:fold_purge_infos(
- Db4,
- OldestPSeq - 1,
- fun fold_fun/2,
- [],
- []
- ),
- ?assertEqual(PurgedDocsNum2 - PurgedDocsNum1, length(PurgedIdRevs2))
- end).
-
-
-get_local_purge_doc(Db) ->
- {ok, DDoc} = couch_db:open_doc(Db, <<"_design/bar">>, []),
- {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(couch_db:name(Db), DDoc),
- Sig = IdxState#mrst.sig,
- HexSig = list_to_binary(couch_index_util:hexsig(Sig)),
- DocId = couch_mrview_util:get_local_purge_doc_id(HexSig),
- couch_db:open_doc(Db, DocId, []).
-
-
-run_query(Db, Opts) ->
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, Opts).
-
-
-save_docs(Db, JsonDocs, Options) ->
- Docs = lists:map(fun(JDoc) ->
- couch_doc:from_json_obj(?JSON_DECODE(?JSON_ENCODE(JDoc)))
- end, JsonDocs),
- Opts = [full_commit | Options],
- case lists:member(replicated_changes, Options) of
- true ->
- {ok, []} = couch_db:update_docs(
- Db, Docs, Opts, replicated_changes),
- {ok, lists:map(fun(Doc) ->
- {Pos, [RevId | _]} = Doc#doc.revs,
- {Pos, RevId}
- end, Docs)};
- false ->
- {ok, Resp} = couch_db:update_docs(Db, Docs, Opts),
- {ok, [Rev || {ok, Rev} <- Resp]}
- end.
-
-
-get_rev(#full_doc_info{} = FDI) ->
- #doc_info{
- revs = [#rev_info{} = PrevRev | _]
- } = couch_doc:to_doc_info(FDI),
- PrevRev#rev_info.rev.
-
-
-db_disk_size(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Info} = couch_db:get_db_info(Db),
- ok = couch_db:close(Db),
- active_size(Info).
-
-
-active_size(Info) ->
- couch_util:get_nested_json_value({Info}, [sizes, active]).
-
-
-wait_compaction(DbName, Kind, Line) ->
- WaitFun = fun() ->
- case is_compaction_running(DbName) of
- true -> wait;
- false -> ok
- end
- end,
- case test_util:wait(WaitFun, 10000) of
- timeout ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, Line},
- {reason, "Timeout waiting for "
- ++ Kind
- ++ " database compaction"}]});
- _ ->
- ok
- end.
-
-
-is_compaction_running(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, DbInfo} = couch_db:get_db_info(Db),
- couch_db:close(Db),
- couch_util:get_value(compact_running, DbInfo).
-
-
-fold_fun({_PSeq, _UUID, Id, Revs}, Acc) ->
- {ok, [{Id, Revs} | Acc]}.
-
-
-docid(I) ->
- list_to_binary(integer_to_list(I)).
-
-
-uuid(I) ->
- Str = io_lib:format("UUID~4..0b", [I]),
- iolist_to_binary(Str).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl
deleted file mode 100644
index b83686113..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl
+++ /dev/null
@@ -1,95 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_red_views_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 1000).
-
-
-setup() ->
- {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), red),
- Db.
-
-teardown(Db) ->
- couch_db:close(Db),
- couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
- ok.
-
-
-reduce_views_test_() ->
- {
- "Reduce views",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_reduce_basic/1,
- fun should_reduce_key_range/1,
- fun should_reduce_with_group_level/1,
- fun should_reduce_with_group_exact/1
- ]
- }
- }
- }.
-
-
-should_reduce_basic(Db) ->
- Result = run_query(Db, []),
- Expect = {ok, [
- {meta, []},
- {row, [{key, null}, {value, 55}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_reduce_key_range(Db) ->
- Result = run_query(Db, [{start_key, [0, 2]}, {end_key, [0, 4]}]),
- Expect = {ok, [
- {meta, []},
- {row, [{key, null}, {value, 6}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_reduce_with_group_level(Db) ->
- Result = run_query(Db, [{group_level, 1}]),
- Expect = {ok, [
- {meta, []},
- {row, [{key, [0]}, {value, 30}]},
- {row, [{key, [1]}, {value, 25}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-should_reduce_with_group_exact(Db) ->
- Result = run_query(Db, [{group_level, exact}]),
- Expect = {ok, [
- {meta, []},
- {row, [{key, [0, 2]}, {value, 2}]},
- {row, [{key, [0, 4]}, {value, 4}]},
- {row, [{key, [0, 6]}, {value, 6}]},
- {row, [{key, [0, 8]}, {value, 8}]},
- {row, [{key, [0, 10]}, {value, 10}]},
- {row, [{key, [1, 1]}, {value, 1}]},
- {row, [{key, [1, 3]}, {value, 3}]},
- {row, [{key, [1, 5]}, {value, 5}]},
- {row, [{key, [1, 7]}, {value, 7}]},
- {row, [{key, [1, 9]}, {value, 9}]}
- ]},
- ?_assertEqual(Expect, Result).
-
-
-run_query(Db, Opts) ->
- couch_mrview:query_view(Db, <<"_design/red">>, <<"baz">>, Opts).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_util_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_util_tests.erl
deleted file mode 100644
index 7046c9bb2..000000000
--- a/src/couch_mrview/test/eunit/couch_mrview_util_tests.erl
+++ /dev/null
@@ -1,39 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_util_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-
-
-couch_mrview_util_test_() ->
- [
- ?_assertEqual(0, validate_group_level(undefined, undefined)),
- ?_assertEqual(exact, validate_group_level(true, undefined)),
- ?_assertEqual(0, validate_group_level(false, undefined)),
- ?_assertEqual(1, validate_group_level(undefined, 1)),
- ?_assertEqual(0, validate_group_level(true, 0)),
- ?_assertEqual(0, validate_group_level(undefined, 0)),
- ?_assertEqual(1, validate_group_level(true, 1)),
- ?_assertEqual(0, validate_group_level(false, 0)),
- ?_assertThrow({query_parse_error,
- <<"Can't specify group=false and group_level>0 at the same time">>},
- validate_group_level(false,1))
- ].
-
-validate_group_level(Group, GroupLevel) ->
- Args0 = #mrargs{group=Group, group_level=GroupLevel, view_type=red},
- Args1 = couch_mrview_util:validate_args(Args0),
- Args1#mrargs.group_level.
-
diff --git a/src/couch_peruser/.gitignore b/src/couch_peruser/.gitignore
deleted file mode 100644
index 93fc2e28b..000000000
--- a/src/couch_peruser/.gitignore
+++ /dev/null
@@ -1,9 +0,0 @@
-/.eunit
-/doc
-/ebin
-/deps
-/.rebar
-/couchperuser-*
-erl_crash.dump
-TEST-*.xml
-*.beam
diff --git a/src/couch_peruser/LICENSE b/src/couch_peruser/LICENSE
deleted file mode 100644
index d64569567..000000000
--- a/src/couch_peruser/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/couch_peruser/README.md b/src/couch_peruser/README.md
deleted file mode 100644
index 64a05188c..000000000
--- a/src/couch_peruser/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# couch_peruser [![Build Status](https://travis-ci.org/apache/couchdb-peruser.svg?branch=master)](https://travis-ci.org/apache/couchdb-peruser)
-
-couch_peruser is a CouchDB application that ensures that a private per-user
-database exists for each document in _users. These databases are
-writable only by the corresponding user. Databases are in the form:
-
- userdb-{hex encoded username}
-
-For example, the user `bob` will have a database named `userdb-626f62`.
-
-The reason for hex encoding is that CouchDB usernames have no restrictions,
-but CouchDB databases have restrictions. Hex encoding the UTF-8
-representation of the username is a transformation that's easy to
-correctly implement in just about any language, especially JavaScript
-and Erlang. Other encodings would be possible, but would require
-additional client and server-side code to support that encoding. This
-is the simplest scheme that is obviously correct.
-
-## Implementation Notes
-
-The module itself is a `gen_server` and it implements the `mem3_cluster`
-behaviour.
-
-In a CouchDB cluster, the module runs on each node in the cluster. On startup,
-it launches a changes listener for each shard of the `authentication_db`
-(`_users`).
-
-In a cluster, when a change notification comes in (after a user doc has been
-created/updated/deleted), each node independently calculates if it should
-handle the notification based on the current list of active nodes in the
-cluster. This ensures that we avoid trying to update the internal `_dbs`
-concurrently and causing conflicts. It also ensures that at least one node
-does handle a notification. The mechanism that handles this does survive
-cluster reconfigurations transparently.
diff --git a/src/couch_peruser/src/couch_peruser.app.src b/src/couch_peruser/src/couch_peruser.app.src
deleted file mode 100644
index 6cfaf4421..000000000
--- a/src/couch_peruser/src/couch_peruser.app.src
+++ /dev/null
@@ -1,20 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_peruser, [
- {description, "couch_peruser - maintains per-user databases in CouchDB"},
- {vsn, git},
- {registered, [couch_peruser, couch_peruser_sup]},
- {applications, [kernel, stdlib, config, couch, fabric, mem3]},
- {mod, {couch_peruser_app, []}},
- {env, []}
-]}.
diff --git a/src/couch_peruser/src/couch_peruser.erl b/src/couch_peruser/src/couch_peruser.erl
deleted file mode 100644
index 4c06e8f27..000000000
--- a/src/couch_peruser/src/couch_peruser.erl
+++ /dev/null
@@ -1,423 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_peruser).
--behaviour(gen_server).
--behaviour(mem3_cluster).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-% gen_server callbacks
--export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3, format_status/2]).
-
--export([init_changes_handler/1, changes_handler/3]).
-
-% mem3_cluster callbacks
--export([
- cluster_stable/1,
- cluster_unstable/1
-]).
-
--record(changes_state, {
- parent :: pid(),
- db_name :: binary(),
- delete_dbs :: boolean(),
- changes_pid :: pid(),
- changes_ref :: reference(),
- q_for_peruser_db :: integer(),
- peruser_dbname_prefix :: binary()
-}).
-
--record(state, {
- parent :: pid(),
- db_name :: binary(),
- delete_dbs :: boolean(),
- states :: list(),
- mem3_cluster_pid :: pid(),
- cluster_stable :: boolean(),
- q_for_peruser_db :: integer(),
- peruser_dbname_prefix :: binary()
-}).
-
--define(DEFAULT_USERDB_PREFIX, "userdb-").
--define(RELISTEN_DELAY, 5000).
--define(DEFAULT_QUIET_PERIOD, 60). % seconds
--define(DEFAULT_START_PERIOD, 5). % seconds
-
-%%
-%% Please leave in the commented-out couch_log:debug calls, thanks! — Jan
-%%
--spec start_link() -> {ok, pid()} | ignore | {error, term()}.
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
--spec init_state() -> #state{}.
-init_state() ->
- couch_log:debug("peruser: starting on node ~p in pid ~p", [node(), self()]),
- case config:get_boolean("couch_peruser", "enable", false) of
- false ->
- couch_log:debug("peruser: disabled on node ~p", [node()]),
- #state{};
- true ->
- couch_log:debug("peruser: enabled on node ~p", [node()]),
- DbName = ?l2b(config:get(
- "couch_httpd_auth", "authentication_db", "_users")),
- DeleteDbs = config:get_boolean("couch_peruser", "delete_dbs", false),
- Q = config:get_integer("couch_peruser", "q", 1),
- Prefix = config:get("couch_peruser", "database_prefix", ?DEFAULT_USERDB_PREFIX),
- case couch_db:validate_dbname(Prefix) of
- ok -> ok;
- Error ->
- couch_log:error("couch_peruser can't proceed as illegal database prefix ~p.
- Error: ~p", [Prefix, Error]),
- throw(Error)
- end,
-
-
- % set up cluster-stable listener
- Period = abs(config:get_integer("couch_peruser", "cluster_quiet_period",
- ?DEFAULT_QUIET_PERIOD)),
- StartPeriod = abs(config:get_integer("couch_peruser",
- "cluster_start_period", ?DEFAULT_START_PERIOD)),
-
- {ok, Mem3Cluster} = mem3_cluster:start_link(?MODULE, self(), StartPeriod,
- Period),
-
- #state{
- parent = self(),
- db_name = DbName,
- delete_dbs = DeleteDbs,
- mem3_cluster_pid = Mem3Cluster,
- cluster_stable = false,
- q_for_peruser_db = Q,
- peruser_dbname_prefix = ?l2b(Prefix)
- }
- end.
-
-
--spec start_listening(State :: #state{}) -> #state{} | ok.
-start_listening(#state{states=ChangesStates}=State)
- when length(ChangesStates) > 0 ->
- % couch_log:debug("peruser: start_listening() already run on node ~p in pid ~p", [node(), self()]),
- State;
-start_listening(#state{db_name=DbName, delete_dbs=DeleteDbs,
- q_for_peruser_db = Q, peruser_dbname_prefix = Prefix} = State) ->
- % couch_log:debug("peruser: start_listening() on node ~p", [node()]),
- try
- States = lists:map(fun (A) ->
- S = #changes_state{
- parent = State#state.parent,
- db_name = A#shard.name,
- delete_dbs = DeleteDbs,
- q_for_peruser_db = Q,
- peruser_dbname_prefix = Prefix
- },
- {Pid, Ref} = spawn_opt(
- ?MODULE, init_changes_handler, [S], [link, monitor]),
- S#changes_state{changes_pid=Pid, changes_ref=Ref}
- end, mem3:local_shards(DbName)),
- % couch_log:debug("peruser: start_listening() States ~p", [States]),
-
- State#state{states = States, cluster_stable = true}
- catch error:database_does_not_exist ->
- couch_log:warning("couch_peruser can't proceed as underlying database (~s) is missing, disables itself.", [DbName]),
- config:set("couch_peruser", "enable", "false", lists:concat([binary_to_list(DbName), " is missing"]))
- end.
-
--spec init_changes_handler(ChangesState :: #changes_state{}) -> ok.
-init_changes_handler(#changes_state{db_name=DbName} = ChangesState) ->
- % couch_log:debug("peruser: init_changes_handler() on DbName ~p", [DbName]),
- try
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX, sys_db]),
- FunAcc = {fun ?MODULE:changes_handler/3, ChangesState},
- (couch_changes:handle_db_changes(
- #changes_args{feed="continuous", timeout=infinity},
- {json_req, null},
- Db))(FunAcc)
- catch error:database_does_not_exist ->
- ok
- end.
-
--type db_change() :: {atom(), tuple(), binary()}.
--spec changes_handler(
- Change :: db_change(),
- ResultType :: any(),
- ChangesState :: #changes_state{}) -> #changes_state{}.
-changes_handler(
- {change, {Doc}, _Prepend},
- _ResType,
- ChangesState=#changes_state{db_name=DbName, q_for_peruser_db = Q,
- peruser_dbname_prefix = Prefix}) ->
- % couch_log:debug("peruser: changes_handler() on DbName/Doc ~p/~p", [DbName, Doc]),
-
- case couch_util:get_value(<<"id">>, Doc) of
- <<"org.couchdb.user:",User/binary>> = DocId ->
- case should_handle_doc(DbName, DocId) of
- true ->
- case couch_util:get_value(<<"deleted">>, Doc, false) of
- false ->
- UserDb = ensure_user_db(Prefix, User, Q),
- ok = ensure_security(User, UserDb, fun add_user/3),
- ChangesState;
- true ->
- case ChangesState#changes_state.delete_dbs of
- true ->
- _UserDb = delete_user_db(Prefix, User),
- ChangesState;
- false ->
- UserDb = user_db_name(Prefix, User),
- ok = ensure_security(User, UserDb, fun remove_user/3),
- ChangesState
- end
- end;
- false ->
- ChangesState
- end;
- _ ->
- ChangesState
- end;
-changes_handler(_Event, _ResType, ChangesState) ->
- ChangesState.
-
--spec should_handle_doc(ShardName :: binary(), DocId::binary()) -> boolean().
-should_handle_doc(ShardName, DocId) ->
- case is_stable() of
- false ->
- % when the cluster is unstable, we have already stopped all Listeners
- % the next stable event will restart all listeners and pick up this
- % doc change
- couch_log:debug("peruser: skipping, cluster unstable ~s/~s",
- [ShardName, DocId]),
- false;
- true ->
- should_handle_doc_int(ShardName, DocId)
- end.
-
--spec should_handle_doc_int(
- ShardName :: binary(),
- DocId :: binary()) -> boolean().
-should_handle_doc_int(ShardName, DocId) ->
- DbName = mem3:dbname(ShardName),
- Live = [erlang:node() | erlang:nodes()],
- Shards = mem3:shards(DbName, DocId),
- Nodes = [N || #shard{node=N} <- Shards, lists:member(N, Live)],
- case mem3:owner(DbName, DocId, Nodes) of
- ThisNode when ThisNode =:= node() ->
- couch_log:debug("peruser: handling ~s/~s", [DbName, DocId]),
- true; % do the database action
- _OtherNode ->
- couch_log:debug("peruser: skipping ~s/~s", [DbName, DocId]),
- false
- end.
-
--spec delete_user_db(Prefix:: binary(), User :: binary()) -> binary().
-delete_user_db(Prefix, User) ->
- UserDb = user_db_name(Prefix, User),
- try
- case fabric:delete_db(UserDb, [?ADMIN_CTX]) of
- ok -> ok;
- accepted -> ok
- end
- catch error:database_does_not_exist ->
- ok
- end,
- UserDb.
-
--spec ensure_user_db(Prefix:: binary(), User :: binary(), Q :: integer()) -> binary().
-ensure_user_db(Prefix, User, Q) ->
- UserDb = user_db_name(Prefix, User),
- try
- {ok, _DbInfo} = fabric:get_db_info(UserDb)
- catch error:database_does_not_exist ->
- case fabric:create_db(UserDb, [?ADMIN_CTX, {q, integer_to_list(Q)}]) of
- {error, file_exists} -> ok;
- ok -> ok;
- accepted -> ok
- end
- end,
- UserDb.
-
--spec add_user(
- User :: binary(),
- Properties :: tuple(),
- Acc :: tuple()) -> tuple().
-add_user(User, Prop, {Modified, SecProps}) ->
- {PropValue} = couch_util:get_value(Prop, SecProps, {[]}),
- Names = couch_util:get_value(<<"names">>, PropValue, []),
- case lists:member(User, Names) of
- true ->
- {Modified, SecProps};
- false ->
- {true,
- lists:keystore(
- Prop, 1, SecProps,
- {Prop,
- {lists:keystore(
- <<"names">>, 1, PropValue,
- {<<"names">>, [User | Names]})}})}
- end.
-
--spec remove_user(
- User :: binary(),
- Properties :: tuple(),
- Acc :: tuple()) -> tuple().
-remove_user(User, Prop, {Modified, SecProps}) ->
- {PropValue} = couch_util:get_value(Prop, SecProps, {[]}),
- Names = couch_util:get_value(<<"names">>, PropValue, []),
- case lists:member(User, Names) of
- false ->
- {Modified, SecProps};
- true ->
- {true,
- lists:keystore(
- Prop, 1, SecProps,
- {Prop,
- {lists:keystore(
- <<"names">>, 1, PropValue,
- {<<"names">>, lists:delete(User, Names)})}})}
- end.
-
--spec ensure_security(
- User :: binary(),
- UserDb :: binary(),
- TransformFun :: fun()) -> ok.
-ensure_security(User, UserDb, TransformFun) ->
- case fabric:get_all_security(UserDb, [?ADMIN_CTX]) of
- {error, no_majority} ->
- % TODO: make sure this is still true: single node, ignore
- ok;
- {ok, Shards} ->
- {_ShardInfo, {SecProps}} = hd(Shards),
- % assert that shards have the same security object
- true = lists:all(fun ({_, {SecProps1}}) ->
- SecProps =:= SecProps1
- end, Shards),
- case lists:foldl(
- fun (Prop, SAcc) -> TransformFun(User, Prop, SAcc) end,
- {false, SecProps},
- [<<"admins">>, <<"members">>]) of
- {false, _} ->
- ok;
- {true, SecProps1} ->
- ok = fabric:set_security(UserDb, {SecProps1}, [?ADMIN_CTX])
- end
- end.
-
--spec user_db_name(Prefix :: binary(), User :: binary()) -> binary().
-user_db_name(Prefix, User) ->
- HexUser = list_to_binary(
- [string:to_lower(integer_to_list(X, 16)) || <<X>> <= User]),
- <<Prefix/binary,HexUser/binary>>.
-
--spec exit_changes(State :: #state{}) -> ok.
-exit_changes(State) ->
- lists:foreach(fun (ChangesState) ->
- demonitor(ChangesState#changes_state.changes_ref, [flush]),
- unlink(ChangesState#changes_state.changes_pid),
- exit(ChangesState#changes_state.changes_pid, kill)
- end, State#state.states).
-
--spec is_stable() -> true | false.
-is_stable() ->
- gen_server:call(?MODULE, is_stable).
-
--spec subscribe_for_changes() -> ok.
-subscribe_for_changes() ->
- config:subscribe_for_changes([
- {"couch_httpd_auth", "authentication_db"},
- "couch_peruser"
- ]).
-
-% Mem3 cluster callbacks
-
-% TODO: find out what type Server is
--spec cluster_unstable(Server :: any()) -> any().
-cluster_unstable(Server) ->
- gen_server:cast(Server, cluster_unstable),
- Server.
-
-% TODO: find out what type Server is
--spec cluster_stable(Server :: any()) -> any().
-cluster_stable(Server) ->
- gen_server:cast(Server, cluster_stable),
- Server.
-
-%% gen_server callbacks
--spec init(Options :: list()) -> {ok, #state{}}.
-init([]) ->
- ok = subscribe_for_changes(),
- {ok, init_state()}.
-
-handle_call(is_stable, _From, #state{cluster_stable = IsStable} = State) ->
- {reply, IsStable, State};
-handle_call(_Msg, _From, State) ->
- {reply, error, State}.
-
-
-handle_cast(update_config, State) when State#state.states =/= undefined ->
- exit_changes(State),
- {noreply, init_state()};
-handle_cast(update_config, _) ->
- {noreply, init_state()};
-handle_cast(stop, State) ->
- {stop, normal, State};
-handle_cast(cluster_unstable, State) when State#state.states =/= undefined ->
- exit_changes(State),
- {noreply, init_state()};
-handle_cast(cluster_unstable, _) ->
- {noreply, init_state()};
-handle_cast(cluster_stable, State) ->
- {noreply, start_listening(State)};
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info({'DOWN', _Ref, _, _, _Reason}, State) ->
- {stop, normal, State};
-handle_info({config_change, "couch_peruser", _, _, _}, State) ->
- handle_cast(update_config, State);
-handle_info({
- config_change,
- "couch_httpd_auth",
- "authentication_db", _, _}, State) ->
- handle_cast(update_config, State);
-handle_info({gen_event_EXIT, _Handler, _Reason}, State) ->
- erlang:send_after(?RELISTEN_DELAY, self(), restart_config_listener),
- {noreply, State};
-handle_info({'EXIT', _Pid, _Reason}, State) ->
- erlang:send_after(?RELISTEN_DELAY, self(), restart_config_listener),
- {noreply, State};
-handle_info(restart_config_listener, State) ->
- ok = subscribe_for_changes(),
- {noreply, State};
-handle_info(_Msg, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- %% Everything should be linked or monitored, let nature
- %% take its course.
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
- format_status(_Opt, [_PDict, State]) ->
- #state{
- states = States
- } = State,
- Scrubbed = State#state{
- states = {length, length(States)}
- },
- [{data, [{"State",
- ?record_to_keyval(state, Scrubbed)
- }]}]. \ No newline at end of file
diff --git a/src/couch_peruser/src/couch_peruser_app.erl b/src/couch_peruser/src/couch_peruser_app.erl
deleted file mode 100644
index 770c08237..000000000
--- a/src/couch_peruser/src/couch_peruser_app.erl
+++ /dev/null
@@ -1,26 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_peruser_app).
-
--behaviour(application).
-
--export([start/2, stop/1]).
-
-
-start(_Type, _StartArgs) ->
- couch_peruser_sup:start_link().
-
-
-stop(_State) ->
- ok.
-
diff --git a/src/couch_peruser/test/eunit/couch_peruser_test.erl b/src/couch_peruser/test/eunit/couch_peruser_test.erl
deleted file mode 100644
index 5ddbe7a5a..000000000
--- a/src/couch_peruser/test/eunit/couch_peruser_test.erl
+++ /dev/null
@@ -1,538 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_peruser_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(ADMIN_USERNAME, "admin").
--define(ADMIN_PASSWORD, "secret").
-
--define(WAIT_FOR_USER_DELETE_TIMEOUT, 1000).
-
-setup_all() ->
- TestCtx = test_util:start_couch([chttpd]),
- ok = application:start(couch_peruser),
- Hashed = couch_passwords:hash_admin_password(?ADMIN_PASSWORD),
- ok = config:set("admins", ?ADMIN_USERNAME, ?b2l(Hashed), _Persist=false),
- TestCtx.
-
-teardown_all(TestCtx) ->
- config:delete("admins", ?ADMIN_USERNAME),
- ok = application:stop(couch_peruser),
- test_util:stop_couch(TestCtx).
-
-setup() ->
- TestAuthDb = ?tempdb(),
- do_request(put, get_base_url() ++ "/" ++ ?b2l(TestAuthDb)),
- do_request(put, get_cluster_base_url() ++ "/" ++ ?b2l(TestAuthDb)),
- set_config("couch_httpd_auth", "authentication_db", ?b2l(TestAuthDb)),
- set_config("couch_peruser", "cluster_quiet_period", "0"),
- set_config("couch_peruser", "cluster_start_period", "0"),
- set_config("couch_peruser", "enable", "true"),
- set_config("cluster", "n", "1"),
- TestAuthDb.
-
-teardown(TestAuthDb) ->
- set_config("couch_peruser", "enable", "false"),
- set_config("couch_peruser", "delete_dbs", "false"),
- set_config("couch_httpd_auth", "authentication_db", "_users"),
- set_config("couch_peruser", "cluster_quiet_period", "60"),
- set_config("couch_peruser", "cluster_start_period", "5"),
- set_config("cluster", "n", "3"),
- do_request(delete, get_cluster_base_url() ++ "/" ++ ?b2l(TestAuthDb)),
- do_request(delete, get_base_url() ++ "/" ++ ?b2l(TestAuthDb)),
- lists:foreach(fun(DbName) ->
- case binary:part(DbName, 0, 7) of
- <<"userdb-">> -> delete_db(DbName);
- _ -> ok
- end
- end, all_dbs()).
-
-set_config(Section, Key, Value) ->
- ok = config:set(Section, Key, Value, _Persist=false).
-
-delete_config(Section, Key) ->
- ok = config:delete(Section, Key, _Persist=false).
-
-do_request(Method, Url) ->
- Headers = [{basic_auth, {?ADMIN_USERNAME, ?ADMIN_PASSWORD}}],
- {ok, _, _, _} = test_request:request(Method, Url, Headers).
-
-do_request(Method, Url, Body) ->
- Headers = [
- {basic_auth, {?ADMIN_USERNAME, ?ADMIN_PASSWORD}},
- {"Content-Type", "application/json"}],
- {ok, _, _, _} = test_request:request(Method, Url, Headers, Body).
-
-do_anon_request(Method, Url, Body) ->
- Headers = [
- {"Content-Type", "application/json"}],
- {ok, _, _, _} = test_request:request(Method, Url, Headers, Body).
-
-create_db(DbName) ->
- {ok, _, _, _} = do_request(put, get_cluster_base_url() ++ "/" ++ ?b2l(DbName)).
-
-delete_db(DbName) ->
- {ok, _, _, _} = do_request(delete, get_cluster_base_url() ++ "/" ++ ?b2l(DbName)).
-
-create_user(AuthDb, Name) ->
- Body = "{\"name\":\"" ++ Name ++
- "\",\"type\":\"user\",\"roles\":[],\"password\":\"secret\"}",
- Url = lists:concat([
- get_cluster_base_url(), "/", ?b2l(AuthDb), "/org.couchdb.user:", Name]),
- {ok, 201, _, _} = do_request(put, Url, Body).
-
-create_anon_user(AuthDb, Name) ->
- Body = "{\"name\":\"" ++ Name ++
- "\",\"type\":\"user\",\"roles\":[],\"password\":\"secret\"}",
- Url = lists:concat([
- get_cluster_base_url(), "/", ?b2l(AuthDb), "/org.couchdb.user:", Name]),
- {ok, 201, _, _} = do_anon_request(put, Url, Body).
-
-delete_user(AuthDb, Name) ->
- Url = lists:concat([get_cluster_base_url(), "/", ?b2l(AuthDb),
- "/org.couchdb.user:", Name]),
- {ok, 200, _, Body} = do_request(get, Url),
- {DocProps} = jiffy:decode(Body),
- Rev = proplists:get_value(<<"_rev">>, DocProps),
- {ok, 200, _, _} = do_request(delete, Url ++ "?rev=" ++ ?b2l(Rev)).
-
-get_security(DbName) ->
- Url = lists:concat([
- get_cluster_base_url(), "/", ?b2l(DbName), "/_security"]),
- test_util:wait(fun() ->
- {ok, 200, _, Body} = do_request(get, Url),
- case jiffy:decode(Body) of
- {[]} -> wait;
- {SecurityProperties} -> SecurityProperties
- end
- end).
-
-set_security(DbName, SecurityProperties) ->
- Url = lists:concat([
- get_cluster_base_url(), "/", ?b2l(DbName), "/_security"]),
- Body = jiffy:encode({SecurityProperties}),
- {ok, 200, _, _} = do_request(put, Url, Body).
-
-all_dbs() ->
- {ok, 200, _, Body} = do_request(get, get_cluster_base_url() ++ "/_all_dbs"),
- jiffy:decode(Body).
-
-all_dbs_with_errors() ->
- {Result, StatusCode, _Headers, Body} = do_request(get, get_cluster_base_url() ++ "/_all_dbs"),
- {Result, StatusCode, _Headers, jiffy:decode(Body)}.
-
-get_base_url() ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- "http://" ++ Addr ++ ":" ++ Port.
-
-get_cluster_base_url() ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
- "http://" ++ Addr ++ ":" ++ Port.
-
-
-should_create_user_db_with_default(TestAuthDb) ->
- ?_test(begin
- create_user(TestAuthDb, "foo"),
- wait_for_db_create(<<"userdb-666f6f">>),
- {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f">>),
- {ClusterInfo} = couch_util:get_value(cluster, DbInfo),
- ?assert(lists:member(<<"userdb-666f6f">>, all_dbs())),
- ?assertEqual(1, couch_util:get_value(q, ClusterInfo))
- end).
-
-should_create_user_db_with_custom_prefix(TestAuthDb) ->
- ?_test(begin
- set_config("couch_peruser", "database_prefix", "newuserdb-"),
- create_user(TestAuthDb, "fooo"),
- wait_for_db_create(<<"newuserdb-666f6f6f">>),
- delete_config("couch_peruser", "database_prefix"),
- ?assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs()))
- end).
-
-should_create_user_db_with_custom_special_prefix(TestAuthDb) ->
- ?_test(begin
- set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
- create_user(TestAuthDb, "fooo"),
- wait_for_db_create(<<"userdb_$()+--/666f6f6f">>),
- delete_config("couch_peruser", "database_prefix"),
- ?assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs()))
- end).
-
-should_create_anon_user_db_with_default(TestAuthDb) ->
- ?_test(begin
- create_anon_user(TestAuthDb, "fooo"),
- wait_for_db_create(<<"userdb-666f6f6f">>),
- {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f6f">>),
- {ClusterInfo} = couch_util:get_value(cluster, DbInfo),
- ?assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())),
- ?assertEqual(1, couch_util:get_value(q, ClusterInfo))
- end).
-
-should_create_anon_user_db_with_custom_prefix(TestAuthDb) ->
- ?_test(begin
- set_config("couch_peruser", "database_prefix", "newuserdb-"),
- create_anon_user(TestAuthDb, "fooo"),
- wait_for_db_create(<<"newuserdb-666f6f6f">>),
- delete_config("couch_peruser", "database_prefix"),
- ?assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs()))
- end).
-
-should_create_anon_user_db_with_custom_special_prefix(TestAuthDb) ->
- ?_test(begin
- set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
- create_anon_user(TestAuthDb, "fooo"),
- wait_for_db_create(<<"userdb_$()+--/666f6f6f">>),
- delete_config("couch_peruser", "database_prefix"),
- ?assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs()))
- end).
-
-should_create_user_db_with_q4(TestAuthDb) ->
- ?_test(begin
- set_config("couch_peruser", "q", "4"),
- create_user(TestAuthDb, "foo"),
- wait_for_db_create(<<"userdb-666f6f">>),
- {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f">>),
- {ClusterInfo} = couch_util:get_value(cluster, DbInfo),
- delete_config("couch_peruser", "q"),
- ?assert(lists:member(<<"userdb-666f6f">>, all_dbs())),
- ?assertEqual(4, couch_util:get_value(q, ClusterInfo))
- end).
-
-should_create_anon_user_db_with_q4(TestAuthDb) ->
- ?_test(begin
- set_config("couch_peruser", "q", "4"),
- create_anon_user(TestAuthDb, "fooo"),
- wait_for_db_create(<<"userdb-666f6f6f">>),
- {ok, TargetInfo} = fabric:get_db_info(<<"userdb-666f6f6f">>),
- {ClusterInfo} = couch_util:get_value(cluster, TargetInfo),
- delete_config("couch_peruser", "q"),
- ?assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())),
- ?assertEqual(4, couch_util:get_value(q, ClusterInfo))
- end).
-
-should_not_delete_user_db(TestAuthDb) ->
- ?_test(begin
- User = "foo",
- UserDbName = <<"userdb-666f6f">>,
- create_user(TestAuthDb, User),
- wait_for_db_create(<<"userdb-666f6f">>),
- AfterCreate = lists:member(UserDbName, all_dbs()),
- delete_user(TestAuthDb, User),
- timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- AfterDelete = lists:member(UserDbName, all_dbs()),
- ?assert(AfterCreate),
- ?assert(AfterDelete)
- end).
-
-should_delete_user_db(TestAuthDb) ->
- ?_test(begin
- User = "bar",
- UserDbName = <<"userdb-626172">>,
- set_config("couch_peruser", "delete_dbs", "true"),
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- AfterCreate = lists:member(UserDbName, all_dbs()),
- delete_user(TestAuthDb, User),
- wait_for_db_delete(UserDbName),
- AfterDelete = lists:member(UserDbName, all_dbs()),
- ?assert(AfterCreate),
- ?assertNot(AfterDelete)
- end).
-
-should_delete_user_db_with_custom_prefix(TestAuthDb) ->
- ?_test(begin
- User = "bar",
- UserDbName = <<"newuserdb-626172">>,
- set_config("couch_peruser", "delete_dbs", "true"),
- set_config("couch_peruser", "database_prefix", "newuserdb-"),
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- AfterCreate = lists:member(UserDbName, all_dbs()),
- delete_user(TestAuthDb, User),
- wait_for_db_delete(UserDbName),
- delete_config("couch_peruser", "database_prefix"),
- AfterDelete = lists:member(UserDbName, all_dbs()),
- ?assert(AfterCreate),
- ?assertNot(AfterDelete)
- end).
-
-should_delete_user_db_with_custom_special_prefix(TestAuthDb) ->
- ?_test(begin
- User = "bar",
- UserDbName = <<"userdb_$()+--/626172">>,
- set_config("couch_peruser", "delete_dbs", "true"),
- set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- AfterCreate = lists:member(UserDbName, all_dbs()),
- delete_user(TestAuthDb, User),
- wait_for_db_delete(UserDbName),
- delete_config("couch_peruser", "database_prefix"),
- AfterDelete = lists:member(UserDbName, all_dbs()),
- ?assert(AfterCreate),
- ?assertNot(AfterDelete)
- end).
-
-should_reflect_config_changes(TestAuthDb) ->
- {timeout, 10000, ?_test(begin
- User = "baz",
- UserDbName = <<"userdb-62617a">>,
- set_config("couch_peruser", "delete_dbs", "true"),
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- AfterCreate1 = lists:member(UserDbName, all_dbs()),
- delete_user(TestAuthDb, User),
- timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- wait_for_db_delete(UserDbName),
- AfterDelete1 = lists:member(UserDbName, all_dbs()),
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- AfterCreate2 = lists:member(UserDbName, all_dbs()),
- set_config("couch_peruser", "delete_dbs", "false"),
- delete_user(TestAuthDb, User),
- timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- AfterDelete2 = lists:member(UserDbName, all_dbs()),
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- set_config("couch_peruser", "delete_dbs", "true"),
- delete_user(TestAuthDb, User),
- wait_for_db_delete(UserDbName),
- AfterDelete3 = lists:member(UserDbName, all_dbs()),
- set_config("couch_peruser", "enable", "false"),
- create_user(TestAuthDb, User),
- timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- AfterCreate3 = lists:member(UserDbName, all_dbs()),
- ?assert(AfterCreate1),
- ?assertNot(AfterDelete1),
- ?assert(AfterCreate2),
- ?assert(AfterDelete2),
- ?assertNot(AfterDelete3),
- ?assertNot(AfterCreate3)
- end)}.
-
-
-should_add_user_to_db_admins(TestAuthDb) ->
- ?_test(begin
- User = "qux",
- UserDbName = <<"userdb-717578">>,
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- ?assertEqual(
- {[{<<"names">>,[<<"qux">>]}]},
- proplists:get_value(<<"admins">>, get_security(UserDbName)))
- end).
-
-should_add_user_to_db_members(TestAuthDb) ->
- ?_test(begin
- User = "qux",
- UserDbName = <<"userdb-717578">>,
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- ?assertEqual(
- {[{<<"names">>,[<<"qux">>]}]},
- proplists:get_value(<<"members">>, get_security(UserDbName)))
- end).
-
-should_not_remove_existing_db_admins(TestAuthDb) ->
- ?_test(begin
- User = "qux",
- UserDbName = <<"userdb-717578">>,
- SecurityProperties = [
- {<<"admins">>,{[{<<"names">>,[<<"foo">>,<<"bar">>]}]}},
- {<<"members">>,{[{<<"names">>,[<<"baz">>,<<"pow">>]}]}}
- ],
- create_db(UserDbName),
- set_security(UserDbName, SecurityProperties),
- create_user(TestAuthDb, User),
- wait_for_security_create(<<"admins">>, User, UserDbName),
- {AdminProperties} = proplists:get_value(<<"admins">>,
- get_security(UserDbName)),
- AdminNames = proplists:get_value(<<"names">>, AdminProperties),
- ?assert(lists:member(<<"foo">>, AdminNames)),
- ?assert(lists:member(<<"bar">>, AdminNames)),
- ?assert(lists:member(<<"qux">>, AdminNames))
- end).
-
-should_not_remove_existing_db_members(TestAuthDb) ->
- ?_test(begin
- User = "qux",
- UserDbName = <<"userdb-717578">>,
- SecurityProperties = [
- {<<"admins">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}},
- {<<"members">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}}
- ],
- create_db(UserDbName),
- set_security(UserDbName, SecurityProperties),
- create_user(TestAuthDb, User),
- wait_for_security_create(<<"members">>, User, UserDbName),
- {MemberProperties} = proplists:get_value(<<"members">>,
- get_security(UserDbName)),
- MemberNames = proplists:get_value(<<"names">>, MemberProperties),
- ?assert(lists:member(<<"pow">>, MemberNames)),
- ?assert(lists:member(<<"wow">>, MemberNames)),
- ?assert(lists:member(<<"qux">>, MemberNames))
- end).
-
-should_remove_user_from_db_admins(TestAuthDb) ->
- ?_test(begin
- User = "qux",
- UserDbName = <<"userdb-717578">>,
- SecurityProperties = [
- {<<"admins">>,{[{<<"names">>,[<<"foo">>,<<"bar">>]}]}},
- {<<"members">>,{[{<<"names">>,[<<"baz">>,<<"pow">>]}]}}
- ],
- create_db(UserDbName),
- set_security(UserDbName, SecurityProperties),
- create_user(TestAuthDb, User),
- wait_for_security_create(<<"admins">>, User, UserDbName),
- {AdminProperties} = proplists:get_value(<<"admins">>,
- get_security(UserDbName)),
- AdminNames = proplists:get_value(<<"names">>, AdminProperties),
- FooBefore = lists:member(<<"foo">>, AdminNames),
- BarBefore = lists:member(<<"bar">>, AdminNames),
- QuxBefore = lists:member(<<"qux">>, AdminNames),
- delete_user(TestAuthDb, User),
- wait_for_security_delete(<<"admins">>, User, UserDbName),
- {NewAdminProperties} = proplists:get_value(<<"admins">>,
- get_security(UserDbName)),
- NewAdminNames = proplists:get_value(<<"names">>, NewAdminProperties),
- FooAfter = lists:member(<<"foo">>, NewAdminNames),
- BarAfter = lists:member(<<"bar">>, NewAdminNames),
- QuxAfter = lists:member(<<"qux">>, NewAdminNames),
- ?assert(FooBefore),
- ?assert(BarBefore),
- ?assert(QuxBefore),
- ?assert(FooAfter),
- ?assert(BarAfter),
- ?assertNot(QuxAfter)
- end).
-
-should_remove_user_from_db_members(TestAuthDb) ->
- ?_test(begin
- User = "qux",
- UserDbName = <<"userdb-717578">>,
- SecurityProperties = [
- {<<"admins">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}},
- {<<"members">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}}
- ],
- create_db(UserDbName),
- set_security(UserDbName, SecurityProperties),
- create_user(TestAuthDb, User),
- wait_for_security_create(<<"members">>, User, UserDbName),
- {MemberProperties} = proplists:get_value(<<"members">>,
- get_security(UserDbName)),
- MemberNames = proplists:get_value(<<"names">>, MemberProperties),
- PowBefore = lists:member(<<"pow">>, MemberNames),
- WowBefore = lists:member(<<"wow">>, MemberNames),
- QuxBefore = lists:member(<<"qux">>, MemberNames),
- delete_user(TestAuthDb, User),
- wait_for_security_delete(<<"members">>, User, UserDbName),
- {NewMemberProperties} = proplists:get_value(<<"members">>,
- get_security(UserDbName)),
- NewMemberNames = proplists:get_value(<<"names">>, NewMemberProperties),
- PowAfter = lists:member(<<"pow">>, NewMemberNames),
- WowAfter = lists:member(<<"wow">>, NewMemberNames),
- QuxAfter = lists:member(<<"qux">>, NewMemberNames),
- ?assert(PowBefore),
- ?assert(WowBefore),
- ?assert(QuxBefore),
- ?assert(PowAfter),
- ?assert(WowAfter),
- ?assertNot(QuxAfter)
- end).
-
-
-
-wait_for_db_create(UserDbName) ->
- test_util:wait(fun() ->
- case all_dbs_with_errors() of
- {error, _, _ , _} -> wait;
- {ok, _, _, AllDbs} ->
- case lists:member(UserDbName, AllDbs) of
- true -> true;
- false -> wait
- end
- end
- end).
-
-wait_for_db_delete(UserDbName) ->
- test_util:wait(fun() ->
- case all_dbs_with_errors() of
- {ok, 500, _ , _} -> wait;
- {ok, _, _, AllDbs} ->
- case not lists:member(UserDbName, AllDbs) of
- true -> true;
- false -> wait
- end
- end
- end).
-
-wait_for_security_create(Type, User0, UserDbName) ->
- User = ?l2b(User0),
- test_util:wait(fun() ->
- {Props} = proplists:get_value(Type, get_security(UserDbName)),
- Names = proplists:get_value(<<"names">>, Props),
- case lists:member(User, Names) of
- true -> true;
- false -> wait
- end
- end).
-
-wait_for_security_delete(Type, User0, UserDbName) ->
- User = ?l2b(User0),
- test_util:wait(fun() ->
- {Props} = proplists:get_value(Type, get_security(UserDbName)),
- Names = proplists:get_value(<<"names">>, Props),
- case not lists:member(User, Names) of
- true -> true;
- false -> wait
- end
- end).
-
-couch_peruser_test_() ->
- {
- "couch_peruser test",
- {
- setup,
- fun setup_all/0, fun teardown_all/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_create_anon_user_db_with_default/1,
- fun should_create_anon_user_db_with_custom_prefix/1,
- fun should_create_anon_user_db_with_custom_special_prefix/1,
- fun should_create_user_db_with_default/1,
- fun should_create_user_db_with_custom_prefix/1,
- fun should_create_user_db_with_custom_special_prefix/1,
- fun should_create_user_db_with_q4/1,
- fun should_create_anon_user_db_with_q4/1,
- fun should_not_delete_user_db/1,
- fun should_delete_user_db/1,
- fun should_delete_user_db_with_custom_prefix/1,
- fun should_delete_user_db_with_custom_special_prefix/1,
- fun should_reflect_config_changes/1,
- fun should_add_user_to_db_admins/1,
- fun should_add_user_to_db_members/1,
- fun should_not_remove_existing_db_admins/1,
- fun should_not_remove_existing_db_members/1,
- fun should_remove_user_from_db_admins/1,
- fun should_remove_user_from_db_members/1
- ]
- }
- }
- }.
diff --git a/src/couch_plugins/LICENSE b/src/couch_plugins/LICENSE
deleted file mode 100644
index f6cd2bc80..000000000
--- a/src/couch_plugins/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/couch_plugins/Makefile.am b/src/couch_plugins/Makefile.am
deleted file mode 100644
index 37cd9d5c1..000000000
--- a/src/couch_plugins/Makefile.am
+++ /dev/null
@@ -1,40 +0,0 @@
-## Licensed under the Apache License, Version 2.0 (the "License"); you may not
-## use this file except in compliance with the License. You may obtain a copy of
-## the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-## License for the specific language governing permissions and limitations under
-## the License.
-
-couch_pluginslibdir = $(localerlanglibdir)/couch_plugins-0.1
-couch_pluginsebindir = $(couch_pluginslibdir)/ebin
-
-couch_pluginsebin_DATA = $(compiled_files)
-
-
-source_files = \
- src/couch_plugins.app.src \
- src/couch_plugins.erl \
- src/couch_plugins_httpd.erl
-
-compiled_files = \
- ebin/couch_plugins.app \
- ebin/couch_plugins.beam \
- ebin/couch_plugins_httpd.beam
-
-EXTRA_DIST = $(source_files) README.md
-CLEANFILES = $(compiled_files)
-
-ebin/%.app: src/%.app.src
- @mkdir -p ebin/
- sed -e "s|%version%|@version@|g" \
- < $< > $@
-
-ebin/%.beam: src/%.erl $(include_files)
- @mkdir -p ebin/
- $(ERLC) -Wall -I$(top_srcdir)/src -I$(top_srcdir)/src/couchdb \
- -o ebin/ $(ERLC_FLAGS) ${TEST} $<;
diff --git a/src/couch_plugins/README.md b/src/couch_plugins/README.md
deleted file mode 100644
index b00a080c1..000000000
--- a/src/couch_plugins/README.md
+++ /dev/null
@@ -1,159 +0,0 @@
-Heya,
-
-I couldn’t help myself thinking about plugin stuff and ended up
-whipping up a proof of concept.
-
-Here’s a <1 minute demo video:
-
- https://dl.dropboxusercontent.com/u/82149/couchdb-plugins-demo.mov
-
-Alternative encoding:
-
- https://dl.dropboxusercontent.com/u/82149/couchdb-plugins-demo.m4v)
-
-
-In my head the whole plugin idea is a very wide area, but I was so
-intrigued by the idea of getting something running with a click on a
-button in Futon. So I looked for a minimally viable plugin system.
-
-
-## Design principles
-
-It took me a day to put this all together and this was only possible
-because I took a lot of shortcuts. I believe they are all viable for a
-first iteration of a plugins system:
-
-1. Install with one click on a button in Futon (or HTTP call)
-2. Only pure Erlang plugins are allowed.
-3. The plugin author must provide a binary package for each Erlang (and,
- later, each CouchDB version).
-4. Complete trust-based system. You trust me to not do any nasty things
- when you click on the install button. No crypto, no nothing. Only
- people who can commit to Futon can release new versions of plugins.
-5. Minimal user-friendlyness: won’t install plugins that don’t match
- the current Erlang version, gives semi-sensible error messages
- (wrapped in a HTTP 500 response :)
-6. Require a pretty strict format for binary releases.
-
-
-## Roadmap
-
-Here’s a list of things this first iterations does and doesn’t do:
-
-- Pure Erlang plugins only. No C-dependencies, no JavaScript, no nothing.
-- No C-dependencies.
-- Install a plugin via Futon (or HTTP call). Admin only.
-- A hardcoded list of plugins in Futon.
-- Loads a pre-packaged, pre-compiled .tar.gz file from a URL.
-- Only installs if Erlang version matches.
-- No security checking of binaries.
-- No identity checking of binaries.
-- Register installed plugins in the config system.
-- Make sure plugins start with the next restart of CouchDB.
-- Uninstall a plugin via Futon (or HTTP call). Admin only.
-- Show when a particular plugin is installed.
-- Only installs if CouchDB version matches.
-- Serve static web assets (for Futon/Fauxton) from `/_plugins/<name>/`.
-
-I hope you agree we can ship this with a few warnings so people can get a
-hang of it.
-
-
-A roadmap, progress and issues can be found here:
-
-https://issues.apache.org/jira/issues/?jql=component+%3D+Plugins+AND+project+%3D+COUCHDB+AND+resolution+%3D+Unresolved+ORDER+BY+priority+DESC
-
-
-
-## How it works
-
-This plugin system lives in `src/couch_plugins` and is a tiny CouchDB
-module.
-
-It exposes one new API endpoint `/_plugins` that an admin user can
-POST to.
-
-The additional Futon page lives at `/_utils/plugins.html` it is
-hardcoded.
-
-Futon (or you) post an object to `/_plugins` with four properties:
-
- {
- "name": "geocouch", // name of the plugin, must be unique
- "url": "http://people.apache.org/~jan", // “base URL” for plugin releases (see below)
- "version": "couchdb1.2.x_v0.3.0-11-g4ea0bea", // whatever version internal to the plugin
- "checksums": {
- "R15B03": "ZetgdHj2bY2w37buulWVf3USOZs=" // base64’d sha hash over the binary
- }
- }
-
-`couch_plugins` then attempts to download a .tar.gz from this
-location:
-
- http://people.apache.org/~jan/geocouch-couchdb1.2.x_v0.3.0-12-g4ea0bea-R15B03.tar.gz
-
-It should be obvious how the URL is constructed from the POST data.
-(This url is live, feel free to play around with this tarball).
-
-Next it calculates the sha hash for the downloaded .tar.gz file and
-matches it against the correct version in the `checksums` parameter.
-
-If that succeeds, we unpack the .tar.gz file (currently in `/tmp`,
-need to find a better place for this) and adds the extracted directory
-to the Erlang code path
-(`code:add_path("/tmp/couchdb_plugins/geocouch-couchdb1.2.x_v0.3.0-12-g4ea0bea-R15B03/ebin")`)
-and loads the included application (`application:load(geocouch)`).
-
-Then it looks into the `./priv/default.d` directory that lives next to
-`ebin/` in the plugin directory for configuration `.ini` files and loads them.
-On next startup these configuration files are loaded after global defaults,
-and before any local configuration.
-
-If that all goes to plan, we report success back to the HTTP caller.
-
-That’s it! :)
-
-It’s deceptively simple, probably does a few things very wrong and
-leaves a few things open (see above).
-
-One open question I’d like an answer for is finding a good location to
-unpack & install the plugin files that isn’t `tmp`. If the answer is
-different for a pre-BigCouch/rcouch-merge and post-BigCouch/rcouch-
-merge world, I’d love to know :)
-
-
-## Code
-
-The main branch for this is 1867-feature-plugins:
-
- ASF: https://git-wip-us.apache.org/repos/asf?p=couchdb.git;a=log;h=refs/heads/1867-feature-plugins
- GitHub: https://github.com/janl/couchdb/compare/apache:master...1867-feature-plugins
-
-I created a branch on GeoCouch that adds a few lines to its `Makefile`
-that shows how a binary package is built:
-
- https://github.com/janl/geocouch/compare/couchbase:couchdb1.3.x...couchdb1.3.x-plugins
-
-
-## Build
-
-Build CouchDB as usual:
-
- ./bootstrap
- ./configure
- make
- make dev
- ./utils/run
-
-* * *
-
-I hope you like this :) Please comment and improve heavily!
-
-Let me know if you have any questions :)
-
-If you have any criticism, please phrase it in a way that we can use
-to improve this, thanks!
-
-Best,
-Jan
---
diff --git a/src/couch_plugins/src/couch_plugins.app.src b/src/couch_plugins/src/couch_plugins.app.src
deleted file mode 100644
index 07d6b14d6..000000000
--- a/src/couch_plugins/src/couch_plugins.app.src
+++ /dev/null
@@ -1,22 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-{application, couch_plugins,
- [
- {description, "A CouchDB Plugin Installer"},
- {vsn, git},
- {registered, []},
- {applications, [
- kernel,
- stdlib
- ]},
- {env, []}
- ]}.
diff --git a/src/couch_plugins/src/couch_plugins.erl b/src/couch_plugins/src/couch_plugins.erl
deleted file mode 100644
index 139a878bf..000000000
--- a/src/couch_plugins/src/couch_plugins.erl
+++ /dev/null
@@ -1,304 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(couch_plugins).
--include_lib("couch/include/couch_db.hrl").
--export([install/1, uninstall/1]).
-
-% couch_plugins:install({"geocouch", "http://127.0.0.1:8000", "1.0.0", [{"R15B03", "+XOJP6GSzmuO2qKdnjO+mWckXVs="}]}).
-% couch_plugins:install({"geocouch", "http://people.apache.org/~jan/", "couchdb1.2.x_v0.3.0-11-gd83ba22", [{"R15B03", "ZetgdHj2bY2w37buulWVf3USOZs="}]}).
-
-plugin_dir() ->
- couch_config:get("couchdb", "plugin_dir").
-
-log(T) ->
- couch_log:debug("[couch_plugins] ~p ~n", [T]).
-
-%% "geocouch", "http://localhost:8000/dist", "1.0.0"
--type plugin() :: {string(), string(), string(), list()}.
--spec install(plugin()) -> ok | {error, string()}.
-install({Name, _BaseUrl, Version, Checksums}=Plugin) ->
- log("Installing " ++ Name),
-
- {ok, LocalFilename} = download(Plugin),
- log("downloaded to " ++ LocalFilename),
-
- ok = verify_checksum(LocalFilename, Checksums),
- log("checksum verified"),
-
- ok = untargz(LocalFilename),
- log("extraction done"),
-
- ok = add_code_path(Name, Version),
- log("added code path"),
-
- ok = register_plugin(Name, Version),
- log("registered plugin"),
-
- load_config(Name, Version),
- log("loaded config"),
-
- ok.
-
-% Idempotent uninstall, if you uninstall a non-existant
-% plugin, you get an `ok`.
--spec uninstall(plugin()) -> ok | {error, string()}.
-uninstall({Name, _BaseUrl, Version, _Checksums}) ->
- % unload config
- ok = unload_config(Name, Version),
- log("config unloaded"),
-
- % delete files
- ok = delete_files(Name, Version),
- log("files deleted"),
-
- % delete code path
- ok = del_code_path(Name, Version),
- log("deleted code path"),
-
- % unregister plugin
- ok = unregister_plugin(Name),
- log("unregistered plugin"),
-
- % done
- ok.
-
-%% * * *
-
-
-%% Plugin Registration
-%% On uninstall:
-%% - add plugins/name = version to config
-%% On uninstall:
-%% - remove plugins/name from config
-
--spec register_plugin(string(), string()) -> ok.
-register_plugin(Name, Version) ->
- couch_config:set("plugins", Name, Version).
-
--spec unregister_plugin(string()) -> ok.
-unregister_plugin(Name) ->
- couch_config:delete("plugins", Name).
-
-%% * * *
-
-
-%% Load Config
-%% Parses <plugindir>/priv/default.d/<pluginname.ini> and applies
-%% the contents to the config system, or removes them on uninstall
-
--spec load_config(string(), string()) -> ok.
-load_config(Name, Version) ->
- loop_config(Name, Version, fun set_config/1).
-
--spec unload_config(string(), string()) -> ok.
-unload_config(Name, Version) ->
- loop_config(Name, Version, fun delete_config/1).
-
--spec loop_config(string(), string(), function()) -> ok.
-loop_config(Name, Version, Fun) ->
- lists:foreach(fun(File) -> load_config_file(File, Fun) end,
- filelib:wildcard(file_names(Name, Version))).
-
--spec load_config_file(string(), function()) -> ok.
-load_config_file(File, Fun) ->
- {ok, Config} = couch_config:parse_ini_file(File),
- lists:foreach(Fun, Config).
-
--spec set_config({{string(), string()}, string()}) -> ok.
-set_config({{Section, Key}, Value}) ->
- ok = couch_config:set(Section, Key, Value).
-
--spec delete_config({{string(), string()}, _Value}) -> ok.
-delete_config({{Section, Key}, _Value}) ->
- ok = couch_config:delete(Section, Key).
-
--spec file_names(string(), string()) -> string().
-file_names(Name, Version) ->
- filename:join(
- [plugin_dir(), get_file_slug(Name, Version),
- "priv", "default.d", "*.ini"]).
-
-%% * * *
-
-
-%% Code Path Management
-%% The Erlang code path is where the Erlang runtime looks for `.beam`
-%% files to load on, say, `application:load()`. Since plugin directories
-%% are created on demand and named after CouchDB and Erlang versions,
-%% we manage the Erlang code path semi-automatically here.
-
--spec add_code_path(string(), string()) -> ok | {error, bad_directory}.
-add_code_path(Name, Version) ->
- PluginPath = plugin_dir() ++ "/" ++ get_file_slug(Name, Version) ++ "/ebin",
- case code:add_path(PluginPath) of
- true -> ok;
- Else ->
- couch_log:error("Failed to add PluginPath: '~s'", [PluginPath]),
- Else
- end.
-
--spec del_code_path(string(), string()) -> ok | {error, atom()}.
-del_code_path(Name, Version) ->
- PluginPath = plugin_dir() ++ "/" ++ get_file_slug(Name, Version) ++ "/ebin",
- case code:del_path(PluginPath) of
- true -> ok;
- _Else ->
- couch_log:debug("Failed to delete PluginPath: '~s', ignoring",
- [PluginPath]),
- ok
- end.
-
-%% * * *
-
-
--spec untargz(string()) -> {ok, string()} | {error, string()}.
-untargz(Filename) ->
- % read .gz file
- {ok, GzData} = file:read_file(Filename),
- % gunzip
- log("unzipped"),
- TarData = zlib:gunzip(GzData),
- ok = filelib:ensure_dir(plugin_dir()),
- % untar
- erl_tar:extract({binary, TarData}, [{cwd, plugin_dir()}, keep_old_files]).
-
--spec delete_files(string(), string()) -> ok | {error, atom()}.
-delete_files(Name, Version) ->
- PluginPath = plugin_dir() ++ "/" ++ get_file_slug(Name, Version),
- mochitemp:rmtempdir(PluginPath).
-
-
-% downloads a pluygin .tar.gz into a local plugins directory
--spec download(string()) -> ok | {error, string()}.
-download({Name, _BaseUrl, Version, _Checksums}=Plugin) ->
- TargetFile = filename:join(mochitemp:gettempdir(), get_filename(Name, Version)),
- case file_exists(TargetFile) of
- %% wipe and redownload
- true -> file:delete(TargetFile);
- _Else -> ok
- end,
- Url = get_url(Plugin),
- HTTPOptions = [
- {connect_timeout, 30*1000}, % 30 seconds
- {timeout, 30*1000} % 30 seconds
- ],
- % todo: windows
- Options = [
- {stream, TargetFile}, % /tmp/something
- {body_format, binary},
- {full_result, false}
- ],
- % todo: reduce to just httpc:request()
- case httpc:request(get, {Url, []}, HTTPOptions, Options) of
- {ok, _Result} ->
- log("downloading " ++ Url),
- {ok, TargetFile};
- Error -> Error
- end.
-
--spec verify_checksum(string(), list()) -> ok | {error, string()}.
-verify_checksum(Filename, Checksums) ->
-
- CouchDBVersion = couchdb_version(),
- case proplists:get_value(CouchDBVersion, Checksums) of
- undefined ->
- couch_log:error("[couch_plugins] Can't find checksum for CouchDB Version"
- " '~s'", [CouchDBVersion]),
- {error, no_couchdb_checksum};
- OTPChecksum ->
- OTPRelease = erlang:system_info(otp_release),
- case proplists:get_value(OTPRelease, OTPChecksum) of
- undefined ->
- couch_log:error("[couch_plugins] Can't find checksum for Erlang Version"
- " '~s'", [OTPRelease]),
- {error, no_erlang_checksum};
- Checksum ->
- do_verify_checksum(Filename, Checksum)
- end
- end.
-
--spec do_verify_checksum(string(), string()) -> ok | {error, string()}.
-do_verify_checksum(Filename, Checksum) ->
- couch_log:debug("Checking Filename: ~s", [Filename]),
- case file:read_file(Filename) of
- {ok, Data} ->
- ComputedChecksum = binary_to_list(base64:encode(crypto:hash(sha, Data))),
- case ComputedChecksum of
- Checksum -> ok;
- _Else ->
- couch_log:error("Checksum mismatch. Wanted: '~p'. Got '~p'",
- [Checksum, ComputedChecksum]),
- {error, checksum_mismatch}
- end;
- Error -> Error
- end.
-
-
-%% utils
-
--spec get_url(plugin()) -> string().
-get_url({Name, BaseUrl, Version, _Checksums}) ->
- BaseUrl ++ "/" ++ get_filename(Name, Version).
-
--spec get_filename(string(), string()) -> string().
-get_filename(Name, Version) ->
- get_file_slug(Name, Version) ++ ".tar.gz".
-
--spec get_file_slug(string(), string()) -> string().
-get_file_slug(Name, Version) ->
- % OtpRelease does not include patch levels like the -1 in R15B03-1
- OTPRelease = erlang:system_info(otp_release),
- CouchDBVersion = couchdb_version(),
- string:join([Name, Version, OTPRelease, CouchDBVersion], "-").
-
--spec file_exists(string()) -> boolean().
-file_exists(Filename) ->
- does_file_exist(file:read_file_info(Filename)).
--spec does_file_exist(term()) -> boolean().
-does_file_exist({error, enoent}) -> false;
-does_file_exist(_Else) -> true.
-
-couchdb_version() ->
- couch_server:get_version(short).
-
-% installing a plugin:
-% - POST /_plugins -d {plugin-def}
-% - get plugin definition
-% - get download URL (matching erlang version)
-% - download archive
-% - match checksum
-% - untar-gz archive into a plugins dir
-% - code:add_path(“geocouch-{geocouch_version}-{erlang_version}/ebin”)
-% - [cp geocouch-{geocouch_version}-{erlang_version}/etc/ ]
-% - application:start(geocouch)
-% - register plugin in plugin registry
-
-% Plugin registry impl:
-% - _plugins database
-% - pro: known db ops
-% - con: no need for replication, needs to be system db etc.
-% - _config/plugins namespace in config
-% - pro: lightweight, fits rarely-changing nature better
-% - con: potentially not flexible enough
-
-
-
-% /geocouch
-% /geocouch/dist/
-% /geocouch/dist/geocouch-{geocouch_version}-{erlang_version}.tar.gz
-
-% tar.gz includes:
-% geocouch-{geocouch_version}-{erlang_version}/
-% geocouch-{geocouch_version}-{erlang_version}/ebin
-% [geocouch-{geocouch_version}-{erlang_version}/config/config.erlt]
-% [geocouch-{geocouch_version}-{erlang_version}/share/]
-
diff --git a/src/couch_plugins/src/couch_plugins_httpd.erl b/src/couch_plugins/src/couch_plugins_httpd.erl
deleted file mode 100644
index 90a09a5a5..000000000
--- a/src/couch_plugins/src/couch_plugins_httpd.erl
+++ /dev/null
@@ -1,65 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(couch_plugins_httpd).
-
--export([handle_req/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
-handle_req(#httpd{method='POST'}=Req) ->
- ok = couch_httpd:verify_is_server_admin(Req),
- couch_httpd:validate_ctype(Req, "application/json"),
-
- {PluginSpec} = couch_httpd:json_body_obj(Req),
- Url = binary_to_list(couch_util:get_value(<<"url">>, PluginSpec)),
- Name = binary_to_list(couch_util:get_value(<<"name">>, PluginSpec)),
- Version = binary_to_list(couch_util:get_value(<<"version">>, PluginSpec)),
- Delete = couch_util:get_value(<<"delete">>, PluginSpec),
- {Checksums0} = couch_util:get_value(<<"checksums">>, PluginSpec),
- Checksums = parse_checksums(Checksums0),
-
- Plugin = {Name, Url, Version, Checksums},
- case do_install(Delete, Plugin) of
- ok ->
- couch_httpd:send_json(Req, 202, {[{ok, true}]});
- Error ->
- couch_log:debug("Plugin Spec: ~p", [PluginSpec]),
- couch_httpd:send_error(Req, {bad_request, Error})
- end;
-% handles /_plugins/<pluginname>/<file>
-% serves <plugin_dir>/<pluginname>-<pluginversion>-<otpversion>-<couchdbversion>/<file>
-handle_req(#httpd{method='GET',path_parts=[_, Name0 | Path0]}=Req) ->
- Name = ?b2l(Name0),
- Path = lists:map(fun binary_to_list/1, Path0),
- OTPRelease = erlang:system_info(otp_release),
- PluginVersion = couch_config:get("plugins", Name),
- CouchDBVersion = couch_server:get_version(short),
- FullName = string:join([Name, PluginVersion, OTPRelease, CouchDBVersion], "-"),
- FullPath = filename:join([FullName, "priv", "www", string:join(Path, "/")]) ++ "/",
- couch_log:debug("Serving ~p from ~p", [FullPath, plugin_dir()]),
- couch_httpd:serve_file(Req, FullPath, plugin_dir());
-handle_req(Req) ->
- couch_httpd:send_method_not_allowed(Req, "POST").
-
-plugin_dir() ->
- couch_config:get("couchdb", "plugin_dir").
-do_install(false, Plugin) ->
- couch_plugins:install(Plugin);
-do_install(true, Plugin) ->
- couch_plugins:uninstall(Plugin).
-
-parse_checksums(Checksums) ->
- lists:map(fun({K, {V}}) ->
- {binary_to_list(K), parse_checksums(V)};
- ({K, V}) ->
- {binary_to_list(K), binary_to_list(V)}
- end, Checksums).
diff --git a/src/couch_mrview/src/couch_mrview.app.src b/src/couch_prometheus/src/couch_prometheus.app.src
index 735d1f8a0..bf49e59d2 100644
--- a/src/couch_mrview/src/couch_mrview.app.src
+++ b/src/couch_prometheus/src/couch_prometheus.app.src
@@ -10,9 +10,11 @@
% License for the specific language governing permissions and limitations under
% the License.
-{application, couch_mrview, [
- {description, "CouchDB Map/Reduce Views"},
+{application, couch_prometheus, [
+ {description, "Aggregated metrics info for Prometheus consumption"},
{vsn, git},
{registered, []},
- {applications, [kernel, stdlib, couch_index, couch_stats, ioq]}
+ {applications, [kernel, stdlib, folsom, couch_stats, couch_log]},
+ {mod, {couch_prometheus_app, []}},
+ {env, []}
]}.
diff --git a/src/rexi/include/rexi.hrl b/src/couch_prometheus/src/couch_prometheus.hrl
index a2d86b2ab..0970f4469 100644
--- a/src/rexi/include/rexi.hrl
+++ b/src/couch_prometheus/src/couch_prometheus.hrl
@@ -10,11 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
--record(error, {
- timestamp,
- reason,
- mfa,
- nonce,
- stack
-}).
+-define(REFRESH_INTERVAL, 5).
+-define(PROMETHEUS_VERSION, "2.0").
diff --git a/src/ddoc_cache/src/ddoc_cache_app.erl b/src/couch_prometheus/src/couch_prometheus_app.erl
index 5afa7ac95..232c16a8a 100644
--- a/src/ddoc_cache/src/ddoc_cache_app.erl
+++ b/src/couch_prometheus/src/couch_prometheus_app.erl
@@ -10,16 +10,14 @@
% License for the specific language governing permissions and limitations under
% the License.
--module(ddoc_cache_app).
--behaviour(application).
+-module(couch_prometheus_app).
+-behaviour(application).
-export([start/2, stop/1]).
-
start(_StartType, _StartArgs) ->
- ddoc_cache_sup:start_link().
-
+ couch_prometheus_sup:start_link().
stop(_State) ->
ok.
diff --git a/src/couch_prometheus/src/couch_prometheus_http.erl b/src/couch_prometheus/src/couch_prometheus_http.erl
new file mode 100644
index 000000000..bd0c4c6f9
--- /dev/null
+++ b/src/couch_prometheus/src/couch_prometheus_http.erl
@@ -0,0 +1,102 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_prometheus_http).
+
+-compile(tuple_calls).
+
+-export([
+ start_link/0,
+ handle_request/1
+]).
+
+-include("couch_prometheus.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+start_link() ->
+ IP = case config:get("prometheus", "bind_address", "any") of
+ "any" -> any;
+ Else -> Else
+ end,
+ Port = config:get("prometheus", "port"),
+ ok = couch_httpd:validate_bind_address(IP),
+
+ Options = [
+ {name, ?MODULE},
+ {loop, fun ?MODULE:handle_request/1},
+ {ip, IP},
+ {port, Port}
+ ],
+ case mochiweb_http:start(Options) of
+ {ok, Pid} ->
+ {ok, Pid};
+ {error, Reason} ->
+ io:format("Failure to start Mochiweb: ~s~n", [Reason]),
+ {error, Reason}
+ end.
+
+handle_request(MochiReq) ->
+ RawUri = MochiReq:get(raw_path),
+ {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
+ PathParts = string:tokens(Path, "/"),
+ try
+ case PathParts of
+ ["_node", Node, "_prometheus"] ->
+ send_prometheus(MochiReq, Node);
+ _ ->
+ send_error(MochiReq, 404, <<"not_found">>, <<>>)
+ end
+ catch T:R ->
+ Body = list_to_binary(io_lib:format("~p:~p", [T, R])),
+ send_error(MochiReq, 500, <<"server_error">>, Body)
+ end.
+
+send_prometheus(MochiReq, Node) ->
+ Type = "text/plain; version=" ++ ?PROMETHEUS_VERSION,
+ Headers = couch_httpd:server_header() ++ [
+ {<<"Content-Type">>, ?l2b(Type)}
+ ],
+ Body = call_node(Node, couch_prometheus_server, scrape, []),
+ send_resp(MochiReq, 200, Headers, Body).
+
+send_resp(MochiReq, Status, ExtraHeaders, Body) ->
+ Headers = couch_httpd:server_header() ++ ExtraHeaders,
+ MochiReq:respond({Status, Headers, Body}).
+
+send_error(MochiReq, Code, Error, Reason) ->
+ Headers = couch_httpd:server_header() ++ [
+ {<<"Content-Type">>, <<"application/json">>}
+ ],
+ JsonError = {[{<<"error">>, Error},
+ {<<"reason">>, Reason}]},
+ Body = ?JSON_ENCODE(JsonError),
+ MochiReq:respond({Code, Headers, Body}).
+
+call_node("_local", Mod, Fun, Args) ->
+ call_node(node(), Mod, Fun, Args);
+call_node(Node0, Mod, Fun, Args) when is_list(Node0) ->
+ Node1 = try
+ list_to_existing_atom(Node0)
+ catch
+ error:badarg ->
+ NoNode = list_to_binary(Node0),
+ throw({not_found, <<"no such node: ", NoNode/binary>>})
+ end,
+ call_node(Node1, Mod, Fun, Args);
+call_node(Node, Mod, Fun, Args) when is_atom(Node) ->
+ case rpc:call(Node, Mod, Fun, Args) of
+ {badrpc, nodedown} ->
+ Reason = list_to_binary(io_lib:format("~s is down", [Node])),
+ throw({error, {nodedown, Reason}});
+ Else ->
+ Else
+ end.
diff --git a/src/couch_prometheus/src/couch_prometheus_server.erl b/src/couch_prometheus/src/couch_prometheus_server.erl
new file mode 100644
index 000000000..e97df04a4
--- /dev/null
+++ b/src/couch_prometheus/src/couch_prometheus_server.erl
@@ -0,0 +1,174 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_prometheus_server).
+
+-behaviour(gen_server).
+
+-import(couch_prometheus_util, [
+ couch_to_prom/3,
+ to_prom/3,
+ to_prom_summary/2
+]).
+
+-export([
+ scrape/0,
+ version/0
+]).
+
+-export([
+ start_link/0,
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3,
+ terminate/2
+]).
+
+-include("couch_prometheus.hrl").
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+-record(st, {
+ metrics,
+ refresh
+}).
+
+init([]) ->
+ Metrics = refresh_metrics(),
+ RT = update_refresh_timer(),
+ {ok, #st{metrics=Metrics, refresh=RT}}.
+
+scrape() ->
+ {ok, Metrics} = gen_server:call(?MODULE, scrape),
+ Metrics.
+
+version() ->
+ ?PROMETHEUS_VERSION.
+
+handle_call(scrape, _from, #st{metrics = Metrics}=State) ->
+ {reply, {ok, Metrics}, State};
+handle_call(refresh, _from, #st{refresh=OldRT} = State) ->
+ timer:cancel(OldRT),
+ Metrics = refresh_metrics(),
+ RT = update_refresh_timer(),
+ {reply, ok, State#st{metrics=Metrics, refresh=RT}};
+handle_call(Msg, _From, State) ->
+ {stop, {unknown_call, Msg}, error, State}.
+
+handle_cast(Msg, State) ->
+ {stop, {unknown_cast, Msg}, State}.
+
+handle_info(refresh, State) ->
+ Metrics = refresh_metrics(),
+ RT = update_refresh_timer(),
+ {noreply, State#st{metrics=Metrics, refresh=RT}};
+handle_info(Msg, State) ->
+ {stop, {unknown_info, Msg}, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+refresh_metrics() ->
+ CouchDB = get_couchdb_stats(),
+ System = couch_stats_httpd:to_ejson(get_system_stats()),
+ couch_prometheus_util:to_bin(lists:map(fun(Line) ->
+ io_lib:format("~s~n", [Line])
+ end, CouchDB ++ System)).
+
+get_couchdb_stats() ->
+ Stats = lists:sort(couch_stats:fetch()),
+ lists:flatmap(fun({Path, Info}) ->
+ couch_to_prom(Path, Info, Stats)
+ end, Stats).
+
+get_system_stats() ->
+ lists:flatten([
+ get_uptime_stat(),
+ get_vm_stats(),
+ get_io_stats(),
+ get_message_queue_stats(),
+ get_run_queue_stats(),
+ get_vm_stats(),
+ get_ets_stats()
+ ]).
+
+get_uptime_stat() ->
+ to_prom(uptime_seconds, counter, couch_app:uptime() div 1000).
+
+get_vm_stats() ->
+ MemLabels = lists:map(fun({Type, Value}) ->
+ {[{memory_type, Type}], Value}
+ end, erlang:memory()),
+ {NumGCs, WordsReclaimed, _} = erlang:statistics(garbage_collection),
+ CtxSwitches = element(1, erlang:statistics(context_switches)),
+ Reds = element(1, erlang:statistics(reductions)),
+ ProcCount = erlang:system_info(process_count),
+ ProcLimit = erlang:system_info(process_limit),
+ [
+ to_prom(erlang_memory_bytes, gauge, MemLabels),
+ to_prom(erlang_gc_collections_total, counter, NumGCs),
+ to_prom(erlang_gc_words_reclaimed_total, counter, WordsReclaimed),
+ to_prom(erlang_context_switches_total, counter, CtxSwitches),
+ to_prom(erlang_reductions_total, counter, Reds),
+ to_prom(erlang_processes, gauge, ProcCount),
+ to_prom(erlang_process_limit, gauge, ProcLimit)
+ ].
+
+get_io_stats() ->
+ {{input, In}, {output, Out}} = erlang:statistics(io),
+ [
+ to_prom(erlang_io_recv_bytes_total, counter, In),
+ to_prom(erlang_io_sent_bytes_total, counter, Out)
+ ].
+
+get_message_queue_stats() ->
+ Queues = lists:map(fun(Name) ->
+ case process_info(whereis(Name), message_queue_len) of
+ {message_queue_len, N} ->
+ N;
+ _ ->
+ 0
+ end
+ end, registered()),
+ [
+ to_prom(erlang_message_queues, gauge, lists:sum(Queues)),
+ to_prom(erlang_message_queue_min, gauge, lists:min(Queues)),
+ to_prom(erlang_message_queue_max, gauge, lists:max(Queues))
+ ].
+
+get_run_queue_stats() ->
+ %% Workaround for https://bugs.erlang.org/browse/ERL-1355
+ {Normal, Dirty} = case erlang:system_info(dirty_cpu_schedulers) > 0 of
+ false ->
+ {statistics(run_queue), 0};
+ true ->
+ [DCQ | SQs] = lists:reverse(statistics(run_queue_lengths)),
+ {lists:sum(SQs), DCQ}
+ end,
+ [
+ to_prom(erlang_scheduler_queues, gauge, Normal),
+ to_prom(erlang_dirty_cpu_scheduler_queues, gauge, Dirty)
+ ].
+
+get_ets_stats() ->
+ NumTabs = length(ets:all()),
+ to_prom(erlang_ets_table, gauge, NumTabs).
+
+update_refresh_timer() ->
+ RefreshTime = 1000 * config:get_integer("couch_prometheus", "interval", ?REFRESH_INTERVAL),
+ erlang:send_after(RefreshTime, self(), refresh).
diff --git a/src/couch_peruser/src/couch_peruser_sup.erl b/src/couch_prometheus/src/couch_prometheus_sup.erl
index b89a36324..8d8c7e078 100644
--- a/src/couch_peruser/src/couch_peruser_sup.erl
+++ b/src/couch_prometheus/src/couch_prometheus_sup.erl
@@ -10,20 +10,30 @@
% License for the specific language governing permissions and limitations under
% the License.
--module(couch_peruser_sup).
+-module(couch_prometheus_sup).
-behaviour(supervisor).
--export([start_link/0, init/1]).
+-export([
+ start_link/0,
+ init/1
+]).
-%% Helper macro for declaring children of supervisor
-define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
init([]) ->
- {ok, { {one_for_one, 5, 10}, [?CHILD(couch_peruser, worker)]}}.
-
+ {ok, {
+ {one_for_one, 5, 10}, [
+ ?CHILD(couch_prometheus_server, worker)
+ ] ++ maybe_start_prometheus_http()
+ }}.
+
+maybe_start_prometheus_http() ->
+ case config:get("prometheus", "additional_port", "false") of
+ "false" -> [];
+ "true" -> [?CHILD(couch_prometheus_http, worker)];
+ _ -> []
+ end.
diff --git a/src/couch_prometheus/src/couch_prometheus_util.erl b/src/couch_prometheus/src/couch_prometheus_util.erl
new file mode 100644
index 000000000..c3b58cb3a
--- /dev/null
+++ b/src/couch_prometheus/src/couch_prometheus_util.erl
@@ -0,0 +1,166 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_prometheus_util ).
+
+-export([
+ couch_to_prom/3,
+ to_bin/1,
+ to_prom/3,
+ to_prom_summary/2
+]).
+
+-include("couch_prometheus.hrl").
+
+couch_to_prom([couch_log, level, alert], Info, _All) ->
+ to_prom(couch_log_requests_total, counter, {[{level, alert}], val(Info)});
+couch_to_prom([couch_log, level, Level], Info, _All) ->
+ to_prom(couch_log_requests_total, {[{level, Level}], val(Info)});
+
+couch_to_prom([couch_replicator, checkpoints, failure], Info, _All) ->
+ to_prom(couch_replicator_checkpoints_failure_total, counter, val(Info));
+couch_to_prom([couch_replicator, checkpoints, success], Info, All) ->
+ Total = val(Info) + val([couch_replicator, checkpoints, failure], All),
+ to_prom(couch_replicator_checkpoints_total, counter, Total);
+couch_to_prom([couch_replicator, responses, failure], Info, _All) ->
+ to_prom(couch_replicator_responses_failure_total, counter, val(Info));
+couch_to_prom([couch_replicator, responses, success], Info, All) ->
+ Total = val(Info) + val([couch_replicator, responses, failure], All),
+ to_prom(couch_replicator_responses_total, counter, Total);
+couch_to_prom([couch_replicator, stream_responses, failure], Info, _All) ->
+ to_prom(couch_replicator_stream_responses_failure_total, counter, val(Info));
+couch_to_prom([couch_replicator, stream_responses, success], Info, All) ->
+ Total = val(Info) + val([couch_replicator, stream_responses, failure], All),
+ to_prom(couch_replicator_stream_responses_total, counter, Total);
+
+couch_to_prom([couchdb, auth_cache_hits], Info, All) ->
+ Total = val(Info) + val([couchdb, auth_cache_misses], All),
+ to_prom(auth_cache_requests_total, counter, Total);
+couch_to_prom([couchdb, auth_cache_misses], Info, _All) ->
+ to_prom(auth_cache_misses_total, counter, val(Info));
+couch_to_prom([couchdb, httpd_request_methods, 'COPY'], Info, _All) ->
+ to_prom(httpd_request_methods, counter, {[{method, 'COPY'}], val(Info)});
+couch_to_prom([couchdb, httpd_request_methods, Method], Info, _All) ->
+ to_prom(httpd_request_methods, {[{method, Method}], val(Info)});
+couch_to_prom([couchdb, httpd_status_codes, Code], Info, _All) ->
+ to_prom(httpd_status_codes, {[{code, Code}], val(Info)});
+
+couch_to_prom([ddoc_cache, hit], Info, All) ->
+ Total = val(Info) + val([ddoc_cache, miss], All),
+ to_prom(ddoc_cache_requests_total, counter, Total);
+couch_to_prom([ddoc_cache, miss], Info, _All) ->
+ to_prom(ddoc_cache_requests_failures_total, counter, val(Info));
+couch_to_prom([ddoc_cache, recovery], Info, _All) ->
+ to_prom(ddoc_cache_requests_recovery_total, counter, val(Info));
+
+couch_to_prom([fabric, read_repairs, failure], Info, _All) ->
+ to_prom(fabric_read_repairs_failures_total, counter, val(Info));
+couch_to_prom([fabric, read_repairs, success], Info, All) ->
+ Total = val(Info) + val([fabric, read_repairs, failure], All),
+ to_prom(fabric_read_repairs_total, counter, Total);
+
+couch_to_prom([rexi, streams, timeout, init_stream], Info, _All) ->
+ to_prom(rexi_streams_timeout_total, counter, {[{stage, init_stream}], val(Info)});
+couch_to_prom([rexi_streams, timeout, Stage], Info, _All) ->
+ to_prom(rexi_streams_timeout_total, {[{stage, Stage}], val(Info)});
+
+couch_to_prom([couchdb | Rest], Info, All) ->
+ couch_to_prom(Rest, Info, All);
+
+couch_to_prom(Path, Info, _All) ->
+ case lists:keyfind(type, 1, Info) of
+ {type, counter} ->
+ Metric = counter_metric(Path),
+ to_prom(Metric, counter, val(Info));
+ {type, gauge} ->
+ to_prom(path_to_name(Path), gauge, val(Info));
+ {type, histogram} ->
+ to_prom_summary(Path, Info)
+ end.
+
+to_prom(Metric, Type, Data) ->
+ TypeStr = to_bin(io_lib:format("# TYPE ~s ~s", [to_prom_name(Metric), Type])),
+ [TypeStr] ++ to_prom(Metric, Data).
+
+to_prom(Metric, Instances) when is_list(Instances) ->
+ lists:flatmap(fun(Inst) -> to_prom(Metric, Inst) end, Instances);
+to_prom(Metric, {Labels, Value}) ->
+ LabelParts = lists:map(fun({K, V}) ->
+ lists:flatten(io_lib:format("~s=\"~s\"", [to_bin(K), to_bin(V)]))
+ end, Labels),
+ MetricStr = case length(LabelParts) > 0 of
+ true ->
+ LabelStr = string:join(LabelParts, ", "),
+ lists:flatten(io_lib:format("~s{~s}", [to_prom_name(Metric), LabelStr]));
+ false ->
+ lists:flatten(io_lib:format("~s", [to_prom_name(Metric)]))
+ end,
+ [to_bin(io_lib:format("~s ~p", [MetricStr, Value]))];
+to_prom(Metric, Value) ->
+ [to_bin(io_lib:format("~s ~p", [to_prom_name(Metric), Value]))].
+
+to_prom_summary(Path, Info) ->
+ Metric = path_to_name(Path ++ ["seconds"]),
+ {value, Value} = lists:keyfind(value, 1, Info),
+ {arithmetic_mean, Mean} = lists:keyfind(arithmetic_mean, 1, Value),
+ {percentile, Percentiles} = lists:keyfind(percentile, 1, Value),
+ {n, Count} = lists:keyfind(n, 1, Value),
+ Quantiles = lists:map(fun({Perc, Val0}) ->
+ % Prometheus uses seconds, so we need to covert milliseconds to seconds
+ Val = Val0/1000,
+ case Perc of
+ 50 -> {[{quantile, <<"0.5">>}], Val};
+ 75 -> {[{quantile, <<"0.75">>}], Val};
+ 90 -> {[{quantile, <<"0.9">>}], Val};
+ 95 -> {[{quantile, <<"0.95">>}], Val};
+ 99 -> {[{quantile, <<"0.99">>}], Val};
+ 999 -> {[{quantile, <<"0.999">>}], Val}
+ end
+ end, Percentiles),
+ SumMetric = path_to_name(Path ++ ["seconds", "sum"]),
+ SumStat = to_prom(SumMetric, Count * Mean),
+ CountMetric = path_to_name(Path ++ ["seconds", "count"]),
+ CountStat = to_prom(CountMetric, Count),
+ to_prom(Metric, summary, Quantiles) ++ [SumStat, CountStat].
+
+to_prom_name(Metric) ->
+ to_bin(io_lib:format("couchdb_~s", [Metric])).
+
+path_to_name(Path) ->
+ Parts = lists:map(fun(Part) ->
+ io_lib:format("~s", [Part])
+ end, Path),
+ string:join(Parts, "_").
+
+counter_metric(Path) ->
+ Name = path_to_name(Path),
+ case string:find(Name, <<"_total">>, trailing) == <<"_total">> of
+ true -> Name;
+ false -> to_bin(io_lib:format("~s_total", [Name]))
+ end.
+
+to_bin(Data) when is_list(Data) ->
+ iolist_to_binary(Data);
+to_bin(Data) when is_atom(Data) ->
+ atom_to_binary(Data, utf8);
+to_bin(Data) when is_integer(Data) ->
+ integer_to_binary(Data);
+to_bin(Data) when is_binary(Data) ->
+ Data.
+
+val(Data) ->
+ {value, V} = lists:keyfind(value, 1, Data),
+ V.
+
+val(Key, Stats) ->
+ {Key, Data} = lists:keyfind(Key, 1, Stats),
+ val(Data). \ No newline at end of file
diff --git a/src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl b/src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl
new file mode 100644
index 000000000..c862b9a9f
--- /dev/null
+++ b/src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl
@@ -0,0 +1,147 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_prometheus_e2e_tests).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-define(USER, "prometheus_test_admin").
+-define(PASS, "pass").
+-define(AUTH, {basic_auth, {?USER, ?PASS}}).
+-define(PROM_PORT, "17986").
+-define(CONTENT_JSON, {"Content-Type", "application/json"}).
+
+start() ->
+ test_util:start_couch([chttpd, couch_prometheus]).
+
+setup() ->
+ Hashed = couch_passwords:hash_admin_password(?PASS),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set_integer("stats", "interval", 2),
+ ok = config:set_integer("couch_prometheus", "interval", 1),
+ Port = mochiweb_socket_server:get(chttpd, port),
+ construct_url(Port).
+
+teardown(_) ->
+ ok.
+
+couch_prometheus_e2e_test_() ->
+ {
+ "Prometheus E2E Tests",
+ {
+ setup,
+ fun start/0, fun test_util:stop_couch/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun node_call_chttpd/1,
+ fun node_call_prometheus_http/1,
+ fun deny_prometheus_http/1,
+ fun node_see_updated_metrics/1
+ ]
+ }
+ }
+ }.
+
+% normal chttpd path via cluster port
+node_call_chttpd(Url) ->
+ {ok, RC1, _, _} = test_request:get(
+ Url,
+ [?CONTENT_JSON, ?AUTH],
+ []
+ ),
+ ?_assertEqual(200, RC1).
+
+% normal chttpd path via cluster port
+node_see_updated_metrics(Url) ->
+ TmpDb = ?tempdb(),
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(chttpd, port),
+ DbUrl = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
+ create_db(DbUrl),
+ [create_doc(DbUrl, "testdoc" ++ integer_to_binary(I)) || I <- lists:seq(1, 100)],
+ delete_db(DbUrl),
+ InitMetrics = wait_for_metrics(Url, "couchdb_httpd_requests_total 0", 5000),
+ UpdatedMetrics = wait_for_metrics(Url, "couchdb_httpd_requests_total", 10000),
+ % since the puts happen so fast, we can't have an exact
+ % total requests given the scraping interval. so we just want to acknowledge
+ % a change as occurred
+ ?_assertNotEqual(InitMetrics, UpdatedMetrics).
+
+% normal chttpd path via cluster port
+node_call_prometheus_http(_) ->
+ maybe_start_http_server("true"),
+ Url = construct_url(?PROM_PORT),
+ {ok, RC1, _, _} = test_request:get(
+ Url,
+ [?CONTENT_JSON, ?AUTH],
+ []
+ ),
+ % since this port doesn't require auth, this should work
+ {ok, RC2, _, _} = test_request:get(
+ Url,
+ [?CONTENT_JSON],
+ []
+ ),
+ delete_db(Url),
+ ?_assertEqual(200, RC2).
+
+% we don't start the http server
+deny_prometheus_http(_) ->
+ maybe_start_http_server("false"),
+ Url = construct_url(?PROM_PORT),
+ Response = test_request:get(
+ Url,
+ [?CONTENT_JSON, ?AUTH],
+ []
+ ),
+ ?_assertEqual({error,{conn_failed,{error,econnrefused}}}, Response).
+
+maybe_start_http_server(Additional) ->
+ test_util:stop_applications([couch_prometheus, chttpd]),
+ Hashed = couch_passwords:hash_admin_password(?PASS),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("prometheus", "additional_port", Additional),
+ ok = config:set("prometheus", "port", ?PROM_PORT),
+ test_util:start_applications([couch_prometheus, chttpd]).
+
+construct_url(Port) ->
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ lists:concat(["http://", Addr, ":", Port, "/_node/_local/_prometheus"]).
+
+create_db(Url) ->
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(chttpd, port),
+ {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
+ ?assert(Status =:= 201 orelse Status =:= 202).
+
+delete_db(Url) ->
+ {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
+
+create_doc(Url, Id) ->
+ test_request:put(Url ++ "/" ++ Id,
+ [?CONTENT_JSON, ?AUTH], "{\"mr\": \"rockoartischocko\"}").
+
+wait_for_metrics(Url, Value, Timeout) ->
+ test_util:wait(fun() ->
+ {ok, _, _, Body} = test_request:get(
+ Url,
+ [?CONTENT_JSON, ?AUTH],
+ []
+ ),
+ case string:find(Body, Value) of
+ nomatch -> wait;
+ M -> M
+ end
+ end, Timeout).
diff --git a/src/couch_prometheus/test/eunit/couch_prometheus_util_tests.erl b/src/couch_prometheus/test/eunit/couch_prometheus_util_tests.erl
new file mode 100644
index 000000000..8fe17e561
--- /dev/null
+++ b/src/couch_prometheus/test/eunit/couch_prometheus_util_tests.erl
@@ -0,0 +1,65 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_prometheus_util_tests).
+
+-include_lib("couch/include/couch_eunit.hrl").
+
+-import(couch_prometheus_util, [
+ to_prom/3,
+ to_prom_summary/2
+]).
+
+couch_prometheus_util_test_() ->
+ [
+ ?_assertEqual(<<"couchdb_ddoc_cache 10">>,
+ test_to_prom_output(ddoc_cache, counter, 10)),
+ ?_assertEqual(<<"couchdb_httpd_status_codes{code=\"200\"} 3">>,
+ test_to_prom_output(httpd_status_codes, counter, {[{code, 200}], 3})),
+ ?_assertEqual(<<"couchdb_temperature_celsius 36">>,
+ test_to_prom_output(temperature_celsius, gauge, 36)),
+ ?_assertEqual(<<"couchdb_mango_query_time_seconds{quantile=\"0.75\"} 4.5">>,
+ test_to_prom_sum_output([mango_query_time], [
+ {value,
+ [
+ {min,0.0},
+ {max,0.0},
+ {arithmetic_mean,0.0},
+ {geometric_mean,0.0},
+ {harmonic_mean,0.0},
+ {median,0.0},{variance,0.0},
+ {standard_deviation,0.0},
+ {skewness,0.0},{kurtosis,0.0},
+ {percentile,[
+ {50,0.0},
+ {75, 4500},
+ {90,0.0},
+ {95,0.0},
+ {99,0.0},
+ {999,0.0}]},
+ {histogram,[
+ {0,0}]},
+ {n,0}
+ ]
+ },
+ {type,histogram},
+ {desc, <<"length of time processing a mango query">>}
+ ]))
+ ].
+
+test_to_prom_output(Metric, Type, Val) ->
+ Out = to_prom(Metric, Type, Val),
+ lists:nth(2, Out).
+
+test_to_prom_sum_output(Metric, Info) ->
+ Out = to_prom_summary(Metric, Info),
+ lists:nth(3, Out). \ No newline at end of file
diff --git a/src/couch_pse_tests/src/cpse_gather.erl b/src/couch_pse_tests/src/cpse_gather.erl
deleted file mode 100644
index 7804d419e..000000000
--- a/src/couch_pse_tests/src/cpse_gather.erl
+++ /dev/null
@@ -1,95 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_gather).
-
-
--export([
- module/1
-]).
-
-
-module(ModName) ->
- Exports = ModName:module_info(exports),
-
- SetupMod = get_setup_all(ModName, Exports),
- TeardownMod = get_teardown_all(ModName, Exports),
- SetupTest = get_fun(ModName, setup_each, 0, Exports),
- TeardownTest = get_fun(ModName, teardown_each, 1, Exports),
-
- RevTests = lists:foldl(fun({Fun, Arity}, Acc) ->
- case {atom_to_list(Fun), Arity} of
- {[$c, $p, $s, $e, $_ | _], Arity} when Arity == 0; Arity == 1 ->
- TestFun = make_test_fun(ModName, Fun, Arity),
- [TestFun | Acc];
- _ ->
- Acc
- end
- end, [], Exports),
- Tests = lists:reverse(RevTests),
-
- {
- setup,
- spawn,
- SetupMod,
- TeardownMod,
- [
- {
- foreach,
- SetupTest,
- TeardownTest,
- Tests
- }
- ]
- }.
-
-
-get_setup_all(ModName, Exports) ->
- case lists:member({setup_all, 0}, Exports) of
- true -> fun ModName:setup_all/0;
- false -> fun cpse_util:setup_all/0
- end.
-
-
-get_teardown_all(ModName, Exports) ->
- case lists:member({teardown_all, 1}, Exports) of
- true -> fun ModName:teardown_all/1;
- false -> fun cpse_util:teardown_all/1
- end.
-
-
-get_fun(ModName, FunName, Arity, Exports) ->
- case lists:member({FunName, Arity}, Exports) of
- true -> fun ModName:FunName/Arity;
- false when Arity == 0 -> fun() -> ok end;
- false when Arity == 1 -> fun(_) -> ok end
- end.
-
-
-make_test_fun(Module, Fun, Arity) ->
- Name = atom_to_list(Fun),
- case Arity of
- 0 ->
- fun(_) ->
- {timeout, 60, {Name, fun() ->
- process_flag(trap_exit, true),
- Module:Fun()
- end}}
- end;
- 1 ->
- fun(Arg) ->
- {timeout, 60, {Name, fun() ->
- process_flag(trap_exit, true),
- Module:Fun(Arg)
- end}}
- end
- end.
diff --git a/src/couch_pse_tests/src/cpse_test_attachments.erl b/src/couch_pse_tests/src/cpse_test_attachments.erl
deleted file mode 100644
index ddd1077d1..000000000
--- a/src/couch_pse_tests/src/cpse_test_attachments.erl
+++ /dev/null
@@ -1,99 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_attachments).
--compile(export_all).
--compile(nowarn_export_all).
-
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-setup_each() ->
- {ok, Db} = cpse_util:create_db(),
- Db.
-
-
-teardown_each(Db) ->
- ok = couch_server:delete(couch_db:name(Db), []).
-
-
-cpse_write_attachment(Db1) ->
- AttBin = crypto:strong_rand_bytes(32768),
-
- try
- [Att0] = cpse_util:prep_atts(Db1, [
- {<<"ohai.txt">>, AttBin}
- ]),
-
- {stream, Stream} = couch_att:fetch(data, Att0),
- ?assertEqual(true, couch_db_engine:is_active_stream(Db1, Stream)),
-
- Actions = [{create, {<<"first">>, {[]}, [Att0]}}],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- [FDI] = couch_db_engine:open_docs(Db3, [<<"first">>]),
-
- #rev_info{
- rev = {RevPos, PrevRevId},
- deleted = Deleted,
- body_sp = DocPtr
- } = cpse_util:prev_rev(FDI),
-
- Doc0 = #doc{
- id = <<"foo">>,
- revs = {RevPos, [PrevRevId]},
- deleted = Deleted,
- body = DocPtr
- },
-
- Doc1 = couch_db_engine:read_doc_body(Db3, Doc0),
- Atts1 = if not is_binary(Doc1#doc.atts) -> Doc1#doc.atts; true ->
- couch_compress:decompress(Doc1#doc.atts)
- end,
-
- StreamSrc = fun(Sp) -> couch_db_engine:open_read_stream(Db3, Sp) end,
- [Att1] = [couch_att:from_disk_term(StreamSrc, T) || T <- Atts1],
- ReadBin = couch_att:to_binary(Att1),
- ?assertEqual(AttBin, ReadBin)
- catch throw:not_supported ->
- ok
- end.
-
-
-% N.B. This test may be overly specific for some theoretical
-% storage engines that don't re-initialize their
-% attachments streams when restarting (for instance if
-% we ever have something that stores attachemnts in
-% an external object store)
-cpse_inactive_stream(Db1) ->
- AttBin = crypto:strong_rand_bytes(32768),
-
- try
- [Att0] = cpse_util:prep_atts(Db1, [
- {<<"ohai.txt">>, AttBin}
- ]),
-
- {stream, Stream} = couch_att:fetch(data, Att0),
- ?assertEqual(true, couch_db_engine:is_active_stream(Db1, Stream)),
-
- cpse_util:shutdown_db(Db1),
- {ok, Db2} = couch_db:reopen(Db1),
-
- ?assertEqual(false, couch_db_engine:is_active_stream(Db2, Stream))
- catch throw:not_supported ->
- ok
- end.
diff --git a/src/couch_pse_tests/src/cpse_test_compaction.erl b/src/couch_pse_tests/src/cpse_test_compaction.erl
deleted file mode 100644
index 6bc470b2f..000000000
--- a/src/couch_pse_tests/src/cpse_test_compaction.erl
+++ /dev/null
@@ -1,318 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_compaction).
--compile(export_all).
--compile(nowarn_export_all).
-
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-setup_each() ->
- {ok, Db} = cpse_util:create_db(),
- Db.
-
-
-teardown_each(Db) ->
- ok = couch_server:delete(couch_db:name(Db), []).
-
-
-cpse_compact_empty(Db1) ->
- Term1 = cpse_util:db_as_term(Db1),
-
- cpse_util:compact(Db1),
-
- {ok, Db2} = couch_db:reopen(Db1),
- Term2 = cpse_util:db_as_term(Db2),
-
- Diff = cpse_util:term_diff(Term1, Term2),
- ?assertEqual(nodiff, Diff).
-
-
-cpse_compact_doc(Db1) ->
- Actions = [{create, {<<"foo">>, {[]}}}],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- Term1 = cpse_util:db_as_term(Db2),
-
- cpse_util:compact(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
- Term2 = cpse_util:db_as_term(Db3),
-
- Diff = cpse_util:term_diff(Term1, Term2),
- ?assertEqual(nodiff, Diff).
-
-
-cpse_compact_local_doc(Db1) ->
- Actions = [{create, {<<"_local/foo">>, {[]}}}],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- Term1 = cpse_util:db_as_term(Db2),
-
- cpse_util:compact(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
- Term2 = cpse_util:db_as_term(Db3),
-
- Diff = cpse_util:term_diff(Term1, Term2),
- ?assertEqual(nodiff, Diff).
-
-
-cpse_compact_with_everything(Db1) ->
- % Add a whole bunch of docs
- DocActions = lists:map(fun(Seq) ->
- {create, {docid(Seq), {[{<<"int">>, Seq}]}}}
- end, lists:seq(1, 1000)),
-
- LocalActions = lists:map(fun(I) ->
- {create, {local_docid(I), {[{<<"int">>, I}]}}}
- end, lists:seq(1, 25)),
-
- Actions1 = DocActions ++ LocalActions,
-
- {ok, Db2} = cpse_util:apply_batch(Db1, Actions1),
- ok = couch_db:set_security(Db1, {[{<<"foo">>, <<"bar">>}]}),
- ok = couch_db:set_revs_limit(Db1, 500),
-
- Actions2 = [
- {create, {<<"foo">>, {[]}}},
- {create, {<<"bar">>, {[{<<"hooray">>, <<"purple">>}]}}},
- {conflict, {<<"bar">>, {[{<<"booo">>, false}]}}}
- ],
-
- {ok, Db3} = cpse_util:apply_actions(Db2, Actions2),
-
- [FooFDI, BarFDI] = couch_db_engine:open_docs(Db3, [<<"foo">>, <<"bar">>]),
-
- FooRev = cpse_util:prev_rev(FooFDI),
- BarRev = cpse_util:prev_rev(BarFDI),
-
- Actions3 = [
- {purge, {<<"foo">>, FooRev#rev_info.rev}},
- {purge, {<<"bar">>, BarRev#rev_info.rev}}
- ],
-
- {ok, Db4} = cpse_util:apply_actions(Db3, Actions3),
-
- PurgedIdRevs = [
- {<<"bar">>, [BarRev#rev_info.rev]},
- {<<"foo">>, [FooRev#rev_info.rev]}
- ],
-
- {ok, PIdRevs4} = couch_db_engine:fold_purge_infos(
- Db4, 0, fun fold_fun/2, [], []),
- ?assertEqual(PurgedIdRevs, PIdRevs4),
-
- {ok, Db5} = try
- [Att0, Att1, Att2, Att3, Att4] = cpse_util:prep_atts(Db4, [
- {<<"ohai.txt">>, crypto:strong_rand_bytes(2048)},
- {<<"stuff.py">>, crypto:strong_rand_bytes(32768)},
- {<<"a.erl">>, crypto:strong_rand_bytes(29)},
- {<<"a.hrl">>, crypto:strong_rand_bytes(5000)},
- {<<"a.app">>, crypto:strong_rand_bytes(400)}
- ]),
-
- Actions4 = [
- {create, {<<"small_att">>, {[]}, [Att0]}},
- {create, {<<"large_att">>, {[]}, [Att1]}},
- {create, {<<"multi_att">>, {[]}, [Att2, Att3, Att4]}}
- ],
- cpse_util:apply_actions(Db4, Actions4)
- catch throw:not_supported ->
- {ok, Db4}
- end,
- {ok, Db6} = couch_db:reopen(Db5),
-
- Term1 = cpse_util:db_as_term(Db6),
-
- Config = [
- {"database_compaction", "doc_buffer_size", "1024"},
- {"database_compaction", "checkpoint_after", "2048"}
- ],
-
- cpse_util:with_config(Config, fun() ->
- cpse_util:compact(Db6)
- end),
-
- {ok, Db7} = couch_db:reopen(Db6),
- Term2 = cpse_util:db_as_term(Db7),
-
- Diff = cpse_util:term_diff(Term1, Term2),
- ?assertEqual(nodiff, Diff).
-
-
-cpse_recompact_updates(Db1) ->
- Actions1 = lists:map(fun(Seq) ->
- {create, {docid(Seq), {[{<<"int">>, Seq}]}}}
- end, lists:seq(1, 1000)),
- {ok, Db2} = cpse_util:apply_batch(Db1, Actions1),
-
- {ok, Compactor} = couch_db:start_compact(Db2),
- catch erlang:suspend_process(Compactor),
-
- Actions2 = [
- {update, {<<"0001">>, {[{<<"updated">>, true}]}}},
- {create, {<<"boop">>, {[]}}}
- ],
-
- {ok, Db3} = cpse_util:apply_actions(Db2, Actions2),
- Term1 = cpse_util:db_as_term(Db3),
-
- catch erlang:resume_process(Compactor),
- cpse_util:compact(Db3),
-
- {ok, Db4} = couch_db:reopen(Db3),
- Term2 = cpse_util:db_as_term(Db4),
-
- Diff = cpse_util:term_diff(Term1, Term2),
- ?assertEqual(nodiff, Diff).
-
-
-cpse_purge_during_compact(Db1) ->
- Actions1 = lists:map(fun(Seq) ->
- {create, {docid(Seq), {[{<<"int">>, Seq}]}}}
- end, lists:seq(1, 1000)),
- Actions2 = [
- {create, {<<"foo">>, {[]}}},
- {create, {<<"bar">>, {[]}}},
- {create, {<<"baz">>, {[]}}}
- ],
- {ok, Db2} = cpse_util:apply_batch(Db1, Actions1 ++ Actions2),
- Actions3 = [
- {conflict, {<<"bar">>, {[{<<"vsn">>, 2}]}}}
- ],
- {ok, Db3} = cpse_util:apply_actions(Db2, Actions3),
-
- {ok, Pid} = couch_db:start_compact(Db3),
- catch erlang:suspend_process(Pid),
-
- [BarFDI, BazFDI] = couch_db_engine:open_docs(Db3, [<<"bar">>, <<"baz">>]),
- BarRev = cpse_util:prev_rev(BarFDI),
- BazRev = cpse_util:prev_rev(BazFDI),
- Actions4 = [
- {purge, {<<"bar">>, BarRev#rev_info.rev}},
- {purge, {<<"baz">>, BazRev#rev_info.rev}}
- ],
-
- {ok, Db4} = cpse_util:apply_actions(Db3, Actions4),
- Term1 = cpse_util:db_as_term(Db4),
-
- catch erlang:resume_process(Pid),
- cpse_util:compact(Db4),
-
- {ok, Db5} = couch_db:reopen(Db4),
- Term2 = cpse_util:db_as_term(Db5),
-
- Diff = cpse_util:term_diff(Term1, Term2),
- ?assertEqual(nodiff, Diff).
-
-
-cpse_multiple_purge_during_compact(Db1) ->
- Actions1 = lists:map(fun(Seq) ->
- {create, {docid(Seq), {[{<<"int">>, Seq}]}}}
- end, lists:seq(1, 1000)),
- Actions2 = [
- {create, {<<"foo">>, {[]}}},
- {create, {<<"bar">>, {[]}}},
- {create, {<<"baz">>, {[]}}}
- ],
- {ok, Db2} = cpse_util:apply_batch(Db1, Actions1 ++ Actions2),
-
- Actions3 = [
- {conflict, {<<"bar">>, {[{<<"vsn">>, 2}]}}}
- ],
- {ok, Db3} = cpse_util:apply_actions(Db2, Actions3),
-
-
- {ok, Pid} = couch_db:start_compact(Db3),
- catch erlang:suspend_process(Pid),
-
- [BarFDI, BazFDI] = couch_db_engine:open_docs(Db3, [<<"bar">>, <<"baz">>]),
- BarRev = cpse_util:prev_rev(BarFDI),
- Actions4 = [
- {purge, {<<"bar">>, BarRev#rev_info.rev}}
- ],
- {ok, Db4} = cpse_util:apply_actions(Db3, Actions4),
-
- BazRev = cpse_util:prev_rev(BazFDI),
- Actions5 = [
- {purge, {<<"baz">>, BazRev#rev_info.rev}}
- ],
-
- {ok, Db5} = cpse_util:apply_actions(Db4, Actions5),
- Term1 = cpse_util:db_as_term(Db5),
-
- catch erlang:resume_process(Pid),
- cpse_util:compact(Db5),
-
- {ok, Db6} = couch_db:reopen(Db5),
- Term2 = cpse_util:db_as_term(Db6),
-
- Diff = cpse_util:term_diff(Term1, Term2),
- ?assertEqual(nodiff, Diff).
-
-
-cpse_compact_purged_docs_limit(Db1) ->
- NumDocs = 1200,
- {RActions, RIds} = lists:foldl(fun(Id, {CActions, CIds}) ->
- Id1 = docid(Id),
- Action = {create, {Id1, {[{<<"int">>, Id}]}}},
- {[Action| CActions], [Id1| CIds]}
- end, {[], []}, lists:seq(1, NumDocs)),
- Ids = lists:reverse(RIds),
- {ok, Db2} = cpse_util:apply_batch(Db1, lists:reverse(RActions)),
-
- FDIs = couch_db_engine:open_docs(Db2, Ids),
- RActions2 = lists:foldl(fun(FDI, CActions) ->
- Id = FDI#full_doc_info.id,
- PrevRev = cpse_util:prev_rev(FDI),
- Rev = PrevRev#rev_info.rev,
- [{purge, {Id, Rev}}| CActions]
- end, [], FDIs),
- {ok, Db3} = cpse_util:apply_batch(Db2, lists:reverse(RActions2)),
-
- % check that before compaction all NumDocs of purge_requests
- % are in purge_tree,
- % even if NumDocs=1200 is greater than purged_docs_limit=1000
- {ok, PurgedIdRevs} = couch_db_engine:fold_purge_infos(
- Db3, 0, fun fold_fun/2, [], []),
- ?assertEqual(1, couch_db_engine:get_oldest_purge_seq(Db3)),
- ?assertEqual(NumDocs, length(PurgedIdRevs)),
-
- % compact db
- cpse_util:compact(Db3),
- {ok, Db4} = couch_db:reopen(Db3),
-
- % check that after compaction only purged_docs_limit purge_requests
- % are in purge_tree
- PurgedDocsLimit = couch_db_engine:get_purge_infos_limit(Db4),
- OldestPSeq = couch_db_engine:get_oldest_purge_seq(Db4),
- {ok, PurgedIdRevs2} = couch_db_engine:fold_purge_infos(
- Db4, OldestPSeq - 1, fun fold_fun/2, [], []),
- ExpectedOldestPSeq = NumDocs - PurgedDocsLimit + 1,
- ?assertEqual(ExpectedOldestPSeq, OldestPSeq),
- ?assertEqual(PurgedDocsLimit, length(PurgedIdRevs2)).
-
-
-docid(I) ->
- Str = io_lib:format("~4..0b", [I]),
- iolist_to_binary(Str).
-
-
-local_docid(I) ->
- Str = io_lib:format("_local/~4..0b", [I]),
- iolist_to_binary(Str).
-
-
-fold_fun({_PSeq, _UUID, Id, Revs}, Acc) ->
- {ok, [{Id, Revs} | Acc]}.
diff --git a/src/couch_pse_tests/src/cpse_test_copy_purge_infos.erl b/src/couch_pse_tests/src/cpse_test_copy_purge_infos.erl
deleted file mode 100644
index 4e41430d3..000000000
--- a/src/couch_pse_tests/src/cpse_test_copy_purge_infos.erl
+++ /dev/null
@@ -1,82 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_copy_purge_infos).
--compile(export_all).
--compile(nowarn_export_all).
-
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--define(NUM_DOCS, 100).
-
-
-setup_each() ->
- {ok, SrcDb} = cpse_util:create_db(),
- {ok, SrcDb2} = create_and_purge(SrcDb),
- {ok, TrgDb} = cpse_util:create_db(),
- {SrcDb2, TrgDb}.
-
-
-teardown_each({SrcDb, TrgDb}) ->
- ok = couch_server:delete(couch_db:name(SrcDb), []),
- ok = couch_server:delete(couch_db:name(TrgDb), []).
-
-
-cpse_copy_empty_purged_info({_, Db}) ->
- {ok, Db1} = couch_db_engine:copy_purge_infos(Db, []),
- ?assertEqual(ok, cpse_util:assert_each_prop(Db1, [{purge_infos, []}])).
-
-
-cpse_copy_purged_info({SrcDb, TrgDb}) ->
- {ok, RPIs} = couch_db_engine:fold_purge_infos(SrcDb, 0, fun(PI, Acc) ->
- {ok, [PI | Acc]}
- end, [], []),
- PIs = lists:reverse(RPIs),
- AEPFold = fun({PSeq, UUID, Id, Revs}, {CPSeq, CPurges}) ->
- {max(PSeq, CPSeq), [{UUID, Id, Revs} | CPurges]}
- end,
- {PurgeSeq, RPurges} = lists:foldl(AEPFold, {0, []}, PIs),
- Purges = lists:reverse(RPurges),
- {ok, TrgDb2} = couch_db_engine:copy_purge_infos(TrgDb, PIs),
- AssertProps = [{purge_seq, PurgeSeq}, {purge_infos, Purges}],
- ?assertEqual(ok, cpse_util:assert_each_prop(TrgDb2, AssertProps)).
-
-
-create_and_purge(Db) ->
- {RActions, RIds} = lists:foldl(fun(Id, {CActions, CIds}) ->
- Id1 = docid(Id),
- Action = {create, {Id1, {[{<<"int">>, Id}]}}},
- {[Action| CActions], [Id1| CIds]}
- end, {[], []}, lists:seq(1, ?NUM_DOCS)),
- Actions = lists:reverse(RActions),
- Ids = lists:reverse(RIds),
- {ok, Db1} = cpse_util:apply_batch(Db, Actions),
-
- FDIs = couch_db_engine:open_docs(Db1, Ids),
- RActions2 = lists:foldl(fun(FDI, CActions) ->
- Id = FDI#full_doc_info.id,
- PrevRev = cpse_util:prev_rev(FDI),
- Rev = PrevRev#rev_info.rev,
- Action = {purge, {Id, Rev}},
- [Action| CActions]
- end, [], FDIs),
- Actions2 = lists:reverse(RActions2),
- {ok, Db2} = cpse_util:apply_batch(Db1, Actions2),
- {ok, Db2}.
-
-
-docid(I) ->
- Str = io_lib:format("~4..0b", [I]),
- iolist_to_binary(Str).
diff --git a/src/couch_pse_tests/src/cpse_test_fold_changes.erl b/src/couch_pse_tests/src/cpse_test_fold_changes.erl
deleted file mode 100644
index 436396276..000000000
--- a/src/couch_pse_tests/src/cpse_test_fold_changes.erl
+++ /dev/null
@@ -1,185 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_fold_changes).
--compile(export_all).
--compile(nowarn_export_all).
-
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--define(NUM_DOCS, 25).
-
-
-setup_each() ->
- {ok, Db} = cpse_util:create_db(),
- Db.
-
-
-teardown_each(Db) ->
- ok = couch_server:delete(couch_db:name(Db), []).
-
-
-cpse_empty_changes(Db) ->
- ?assertEqual(0, couch_db_engine:count_changes_since(Db, 0)),
- ?assertEqual({ok, []},
- couch_db_engine:fold_changes(Db, 0, fun fold_fun/2, [], [])).
-
-
-cpse_single_change(Db1) ->
- Actions = [{create, {<<"a">>, {[]}}}],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- ?assertEqual(1, couch_db_engine:count_changes_since(Db2, 0)),
- ?assertEqual({ok, [{<<"a">>, 1}]},
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], [])).
-
-
-cpse_two_changes(Db1) ->
- Actions = [
- {create, {<<"a">>, {[]}}},
- {create, {<<"b">>, {[]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- ?assertEqual(2, couch_db_engine:count_changes_since(Db2, 0)),
- {ok, Changes} =
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
- ?assertEqual([{<<"a">>, 1}, {<<"b">>, 2}], lists:reverse(Changes)).
-
-
-cpse_two_changes_batch(Db1) ->
- Actions = [
- {batch, [
- {create, {<<"a">>, {[]}}},
- {create, {<<"b">>, {[]}}}
- ]}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- ?assertEqual(2, couch_db_engine:count_changes_since(Db2, 0)),
- {ok, Changes} =
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
- ?assertEqual([{<<"a">>, 1}, {<<"b">>, 2}], lists:reverse(Changes)).
-
-
-cpse_two_changes_batch_sorted(Db1) ->
- Actions = [
- {batch, [
- {create, {<<"b">>, {[]}}},
- {create, {<<"a">>, {[]}}}
- ]}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- ?assertEqual(2, couch_db_engine:count_changes_since(Db2, 0)),
- {ok, Changes} =
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
- ?assertEqual([{<<"a">>, 1}, {<<"b">>, 2}], lists:reverse(Changes)).
-
-
-cpse_update_one(Db1) ->
- Actions = [
- {create, {<<"a">>, {[]}}},
- {update, {<<"a">>, {[]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- ?assertEqual(1, couch_db_engine:count_changes_since(Db2, 0)),
- ?assertEqual({ok, [{<<"a">>, 2}]},
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], [])).
-
-
-cpse_update_first_of_two(Db1) ->
- Actions = [
- {create, {<<"a">>, {[]}}},
- {create, {<<"b">>, {[]}}},
- {update, {<<"a">>, {[]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- ?assertEqual(2, couch_db_engine:count_changes_since(Db2, 0)),
- {ok, Changes} =
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
- ?assertEqual([{<<"b">>, 2}, {<<"a">>, 3}], lists:reverse(Changes)).
-
-
-cpse_update_second_of_two(Db1) ->
- Actions = [
- {create, {<<"a">>, {[]}}},
- {create, {<<"b">>, {[]}}},
- {update, {<<"b">>, {[]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- ?assertEqual(2, couch_db_engine:count_changes_since(Db2, 0)),
- {ok, Changes} =
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
- ?assertEqual([{<<"a">>, 1}, {<<"b">>, 3}], lists:reverse(Changes)).
-
-
-cpse_check_mutation_ordering(Db1) ->
- Actions = shuffle(lists:map(fun(Seq) ->
- {create, {docid(Seq), {[]}}}
- end, lists:seq(1, ?NUM_DOCS))),
-
- DocIdOrder = [DocId || {_, {DocId, _}} <- Actions],
- DocSeqs = lists:zip(DocIdOrder, lists:seq(1, ?NUM_DOCS)),
-
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- % First lets see that we can get the correct
- % suffix/prefix starting at every update sequence
- lists:foreach(fun(Seq) ->
- {ok, Suffix} =
- couch_db_engine:fold_changes(Db2, Seq, fun fold_fun/2, [], []),
- ?assertEqual(lists:nthtail(Seq, DocSeqs), lists:reverse(Suffix)),
-
- {ok, Prefix} = couch_db_engine:fold_changes(
- Db2, Seq, fun fold_fun/2, [], [{dir, rev}]),
- ?assertEqual(lists:sublist(DocSeqs, Seq + 1), Prefix)
- end, lists:seq(0, ?NUM_DOCS)),
-
- ok = do_mutation_ordering(Db2, ?NUM_DOCS + 1, DocSeqs, []).
-
-
-do_mutation_ordering(Db, _Seq, [], FinalDocSeqs) ->
- {ok, RevOrder} = couch_db_engine:fold_changes(Db, 0, fun fold_fun/2, [], []),
- ?assertEqual(FinalDocSeqs, lists:reverse(RevOrder)),
- ok;
-
-do_mutation_ordering(Db, Seq, [{DocId, _OldSeq} | Rest], DocSeqAcc) ->
- Actions = [{update, {DocId, {[]}}}],
- {ok, NewDb} = cpse_util:apply_actions(Db, Actions),
- NewAcc = DocSeqAcc ++ [{DocId, Seq}],
- Expected = Rest ++ NewAcc,
- {ok, RevOrder} =
- couch_db_engine:fold_changes(NewDb, 0, fun fold_fun/2, [], []),
- ?assertEqual(Expected, lists:reverse(RevOrder)),
- do_mutation_ordering(NewDb, Seq + 1, Rest, NewAcc).
-
-
-shuffle(List) ->
- Paired = [{couch_rand:uniform(), I} || I <- List],
- Sorted = lists:sort(Paired),
- [I || {_, I} <- Sorted].
-
-
-fold_fun(#full_doc_info{id=Id, update_seq=Seq}, Acc) ->
- {ok, [{Id, Seq} | Acc]}.
-
-
-docid(I) ->
- Str = io_lib:format("~4..0b", [I]),
- iolist_to_binary(Str).
diff --git a/src/couch_pse_tests/src/cpse_test_fold_docs.erl b/src/couch_pse_tests/src/cpse_test_fold_docs.erl
deleted file mode 100644
index d43930c4a..000000000
--- a/src/couch_pse_tests/src/cpse_test_fold_docs.erl
+++ /dev/null
@@ -1,400 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_fold_docs).
--compile(export_all).
--compile(nowarn_export_all).
-
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--define(NUM_DOCS, 100).
-
-
-setup_each() ->
- cpse_util:dbname().
-
-
-teardown_each(DbName) ->
- ok = couch_server:delete(DbName, []).
-
-
-cpse_fold_all(DbName) ->
- fold_all(DbName, fold_docs, fun docid/1).
-
-
-cpse_fold_all_local(DbName) ->
- fold_all(DbName, fold_local_docs, fun local_docid/1).
-
-
-cpse_fold_start_key(DbName) ->
- fold_start_key(DbName, fold_docs, fun docid/1).
-
-
-cpse_fold_start_key_local(DbName) ->
- fold_start_key(DbName, fold_local_docs, fun local_docid/1).
-
-
-cpse_fold_end_key(DbName) ->
- fold_end_key(DbName, fold_docs, fun docid/1).
-
-
-cpse_fold_end_key_local(DbName) ->
- fold_end_key(DbName, fold_local_docs, fun local_docid/1).
-
-
-cpse_fold_end_key_gt(DbName) ->
- fold_end_key_gt(DbName, fold_docs, fun docid/1).
-
-
-cpse_fold_end_key_gt_local(DbName) ->
- fold_end_key_gt(DbName, fold_local_docs, fun local_docid/1).
-
-
-cpse_fold_range(DbName) ->
- fold_range(DbName, fold_docs, fun docid/1).
-
-
-cpse_fold_range_local(DbName) ->
- fold_range(DbName, fold_local_docs, fun local_docid/1).
-
-
-cpse_fold_stop(DbName) ->
- fold_user_fun_stop(DbName, fold_docs, fun docid/1).
-
-
-cpse_fold_stop_local(DbName) ->
- fold_user_fun_stop(DbName, fold_local_docs, fun local_docid/1).
-
-
-% This is a loose test but we have to have this until
-% I figure out what to do about the total_rows/offset
-% meta data included in _all_docs
-cpse_fold_include_reductions(DbName) ->
- {ok, Db} = init_db(DbName, fun docid/1),
- FoldFun = fun(_, _, nil) -> {ok, nil} end,
- Opts = [include_reductions],
- {ok, Count, nil} = couch_db_engine:fold_docs(Db, FoldFun, nil, Opts),
- ?assert(is_integer(Count)),
- ?assert(Count >= 0).
-
-
-fold_all(DbName, FoldFun, DocIdFun) ->
- DocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
- {ok, Db} = init_db(DbName, DocIdFun),
-
- {ok, DocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], []),
- ?assertEqual(?NUM_DOCS, length(DocIdAccFwd)),
- ?assertEqual(DocIds, lists:reverse(DocIdAccFwd)),
-
- Opts = [{dir, rev}],
- {ok, DocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], Opts),
- ?assertEqual(?NUM_DOCS, length(DocIdAccRev)),
- ?assertEqual(DocIds, DocIdAccRev).
-
-
-fold_start_key(DbName, FoldFun, DocIdFun) ->
- {ok, Db} = init_db(DbName, DocIdFun),
-
- StartKeyNum = ?NUM_DOCS div 4,
- StartKey = DocIdFun(StartKeyNum),
-
- AllDocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
- DocIdsFwd = [DocIdFun(I) || I <- lists:seq(StartKeyNum, ?NUM_DOCS)],
- DocIdsRev = [DocIdFun(I) || I <- lists:seq(1, StartKeyNum)],
-
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {start_key, <<255>>}
- ])),
-
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, <<"">>}
- ])),
-
- {ok, AllDocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {start_key, <<"">>}
- ]),
- ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
- ?assertEqual(AllDocIds, lists:reverse(AllDocIdAccFwd)),
-
- {ok, AllDocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, <<255>>}
- ]),
- ?assertEqual(length(AllDocIds), length(AllDocIdAccRev)),
- ?assertEqual(AllDocIds, AllDocIdAccRev),
-
- {ok, DocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {start_key, StartKey}
- ]),
- ?assertEqual(length(DocIdsFwd), length(DocIdAccFwd)),
- ?assertEqual(DocIdsFwd, lists:reverse(DocIdAccFwd)),
-
- {ok, DocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, StartKey}
- ]),
- ?assertEqual(length(DocIdsRev), length(DocIdAccRev)),
- ?assertEqual(DocIdsRev, DocIdAccRev).
-
-
-fold_end_key(DbName, FoldFun, DocIdFun) ->
- {ok, Db} = init_db(DbName, DocIdFun),
-
- EndKeyNum = ?NUM_DOCS div 4,
- EndKey = DocIdFun(EndKeyNum),
-
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {end_key, <<"">>}
- ])),
-
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {end_key, <<255>>}
- ])),
-
- AllDocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
-
- {ok, AllDocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {end_key, <<255>>}
- ]),
- ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
- ?assertEqual(AllDocIds, lists:reverse(AllDocIdAccFwd)),
-
- {ok, AllDocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {end_key, <<"">>}
- ]),
- ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
- ?assertEqual(AllDocIds, AllDocIdAccRev),
-
- DocIdsFwd = [DocIdFun(I) || I <- lists:seq(1, EndKeyNum)],
-
- {ok, DocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {end_key, EndKey}
- ]),
- ?assertEqual(length(DocIdsFwd), length(DocIdAccFwd)),
- ?assertEqual(DocIdsFwd, lists:reverse(DocIdAccFwd)),
-
- DocIdsRev = [DocIdFun(I) || I <- lists:seq(EndKeyNum, ?NUM_DOCS)],
-
- {ok, DocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {end_key, EndKey}
- ]),
- ?assertEqual(length(DocIdsRev), length(DocIdAccRev)),
- ?assertEqual(DocIdsRev, DocIdAccRev).
-
-
-fold_end_key_gt(DbName, FoldFun, DocIdFun) ->
- {ok, Db} = init_db(DbName, DocIdFun),
-
- EndKeyNum = ?NUM_DOCS div 4,
- EndKey = DocIdFun(EndKeyNum),
-
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {end_key_gt, <<"">>}
- ])),
-
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {end_key_gt, <<255>>}
- ])),
-
- AllDocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
-
- {ok, AllDocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {end_key_gt, <<255>>}
- ]),
- ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
- ?assertEqual(AllDocIds, lists:reverse(AllDocIdAccFwd)),
-
- {ok, AllDocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {end_key_gt, <<"">>}
- ]),
- ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
- ?assertEqual(AllDocIds, AllDocIdAccRev),
-
- DocIdsFwd = [DocIdFun(I) || I <- lists:seq(1, EndKeyNum - 1)],
-
- {ok, DocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {end_key_gt, EndKey}
- ]),
- ?assertEqual(length(DocIdsFwd), length(DocIdAccFwd)),
- ?assertEqual(DocIdsFwd, lists:reverse(DocIdAccFwd)),
-
- DocIdsRev = [DocIdFun(I) || I <- lists:seq(EndKeyNum + 1, ?NUM_DOCS)],
-
- {ok, DocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {end_key_gt, EndKey}
- ]),
- ?assertEqual(length(DocIdsRev), length(DocIdAccRev)),
- ?assertEqual(DocIdsRev, DocIdAccRev).
-
-
-fold_range(DbName, FoldFun, DocIdFun) ->
- {ok, Db} = init_db(DbName, DocIdFun),
-
- StartKeyNum = ?NUM_DOCS div 4,
- EndKeyNum = StartKeyNum * 3,
-
- StartKey = DocIdFun(StartKeyNum),
- EndKey = DocIdFun(EndKeyNum),
-
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {start_key, <<"">>},
- {end_key, <<"">>}
- ])),
-
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, <<"">>},
- {end_key, <<255>>}
- ])),
-
- AllDocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
-
- {ok, AllDocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {start_key, <<"">>},
- {end_key, <<255>>}
- ]),
- ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
- ?assertEqual(AllDocIds, lists:reverse(AllDocIdAccFwd)),
-
- {ok, AllDocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, <<255>>},
- {end_key_gt, <<"">>}
- ]),
- ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
- ?assertEqual(AllDocIds, AllDocIdAccRev),
-
- DocIdsFwd = [DocIdFun(I) || I <- lists:seq(StartKeyNum, EndKeyNum)],
-
- {ok, DocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {start_key, StartKey},
- {end_key, EndKey}
- ]),
- ?assertEqual(length(DocIdsFwd), length(DocIdAccFwd)),
- ?assertEqual(DocIdsFwd, lists:reverse(DocIdAccFwd)),
-
- DocIdsRev = [DocIdFun(I) || I <- lists:seq(StartKeyNum, EndKeyNum)],
-
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, StartKey},
- {end_key, EndKey}
- ])),
-
- {ok, DocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, EndKey},
- {end_key, StartKey}
- ]),
- ?assertEqual(length(DocIdsRev), length(DocIdAccRev)),
- ?assertEqual(DocIdsRev, DocIdAccRev).
-
-
-fold_user_fun_stop(DbName, FoldFun, DocIdFun) ->
- {ok, Db} = init_db(DbName, DocIdFun),
-
- StartKeyNum = ?NUM_DOCS div 4,
- StartKey = DocIdFun(StartKeyNum),
-
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
- {start_key, <<255>>}
- ])),
-
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
- {dir, rev},
- {start_key, <<"">>}
- ])),
-
- SuffixDocIds = [DocIdFun(I) || I <- lists:seq(?NUM_DOCS - 3, ?NUM_DOCS)],
-
- {ok, SuffixDocIdAcc} = couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
- {start_key, DocIdFun(?NUM_DOCS - 3)}
- ]),
- ?assertEqual(length(SuffixDocIds), length(SuffixDocIdAcc)),
- ?assertEqual(SuffixDocIds, lists:reverse(SuffixDocIdAcc)),
-
- PrefixDocIds = [DocIdFun(I) || I <- lists:seq(1, 3)],
-
- {ok, PrefixDocIdAcc} = couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
- {dir, rev},
- {start_key, DocIdFun(3)}
- ]),
- ?assertEqual(3, length(PrefixDocIdAcc)),
- ?assertEqual(PrefixDocIds, PrefixDocIdAcc),
-
- FiveDocIdsFwd = [DocIdFun(I)
- || I <- lists:seq(StartKeyNum, StartKeyNum + 5)],
-
- {ok, FiveDocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
- {start_key, StartKey}
- ]),
- ?assertEqual(length(FiveDocIdsFwd), length(FiveDocIdAccFwd)),
- ?assertEqual(FiveDocIdsFwd, lists:reverse(FiveDocIdAccFwd)),
-
- FiveDocIdsRev = [DocIdFun(I)
- || I <- lists:seq(StartKeyNum - 5, StartKeyNum)],
-
- {ok, FiveDocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
- {dir, rev},
- {start_key, StartKey}
- ]),
- ?assertEqual(length(FiveDocIdsRev), length(FiveDocIdAccRev)),
- ?assertEqual(FiveDocIdsRev, FiveDocIdAccRev).
-
-
-init_db(DbName, DocIdFun) ->
- {ok, Db1} = cpse_util:create_db(DbName),
- Actions = lists:map(fun(Id) ->
- {create, {DocIdFun(Id), {[{<<"int">>, Id}]}}}
- end, lists:seq(1, ?NUM_DOCS)),
- cpse_util:apply_actions(Db1, [{batch, Actions}]).
-
-
-fold_fun(Doc, Acc) ->
- Id = case Doc of
- #doc{id = Id0} -> Id0;
- #full_doc_info{id = Id0} -> Id0
- end,
- {ok, [Id | Acc]}.
-
-
-fold_stop(Doc, Acc) ->
- Id = case Doc of
- #doc{id = Id0} -> Id0;
- #full_doc_info{id = Id0} -> Id0
- end,
- case length(Acc) of
- N when N =< 4 ->
- {ok, [Id | Acc]};
- _ ->
- {stop, [Id | Acc]}
- end.
-
-
-docid(I) ->
- Str = io_lib:format("~4..0b", [I]),
- iolist_to_binary(Str).
-
-
-local_docid(I) ->
- Str = io_lib:format("_local/~4..0b", [I]),
- iolist_to_binary(Str).
diff --git a/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl b/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl
deleted file mode 100644
index 4826c5d9c..000000000
--- a/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl
+++ /dev/null
@@ -1,167 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_fold_purge_infos).
--compile(export_all).
--compile(nowarn_export_all).
-
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--define(NUM_DOCS, 100).
-
-
-setup_each() ->
- {ok, Db} = cpse_util:create_db(),
- Db.
-
-
-teardown_each(Db) ->
- ok = couch_server:delete(couch_db:name(Db), []).
-
-
-cpse_empty_purged_docs(Db) ->
- ?assertEqual({ok, []}, couch_db_engine:fold_purge_infos(
- Db, 0, fun fold_fun/2, [], [])).
-
-
-cpse_all_purged_docs(Db1) ->
- {RActions, RIds} = lists:foldl(fun(Id, {CActions, CIds}) ->
- Id1 = docid(Id),
- Action = {create, {Id1, {[{<<"int">>, Id}]}}},
- {[Action| CActions], [Id1| CIds]}
- end, {[], []}, lists:seq(1, ?NUM_DOCS)),
- Actions = lists:reverse(RActions),
- Ids = lists:reverse(RIds),
- {ok, Db2} = cpse_util:apply_batch(Db1, Actions),
-
- FDIs = couch_db_engine:open_docs(Db2, Ids),
- {RevActions2, RevIdRevs} = lists:foldl(fun(FDI, {CActions, CIdRevs}) ->
- Id = FDI#full_doc_info.id,
- PrevRev = cpse_util:prev_rev(FDI),
- Rev = PrevRev#rev_info.rev,
- Action = {purge, {Id, Rev}},
- {[Action| CActions], [{Id, [Rev]}| CIdRevs]}
- end, {[], []}, FDIs),
- {Actions2, IdsRevs} = {lists:reverse(RevActions2), lists:reverse(RevIdRevs)},
-
- {ok, Db3} = cpse_util:apply_batch(Db2, Actions2),
- {ok, PurgedIdRevs} = couch_db_engine:fold_purge_infos(
- Db3, 0, fun fold_fun/2, [], []),
- ?assertEqual(IdsRevs, lists:reverse(PurgedIdRevs)).
-
-
-cpse_start_seq(Db1) ->
- Actions1 = [
- {create, {docid(1), {[{<<"int">>, 1}]}}},
- {create, {docid(2), {[{<<"int">>, 2}]}}},
- {create, {docid(3), {[{<<"int">>, 3}]}}},
- {create, {docid(4), {[{<<"int">>, 4}]}}},
- {create, {docid(5), {[{<<"int">>, 5}]}}}
- ],
- Ids = [docid(1), docid(2), docid(3), docid(4), docid(5)],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions1),
-
- FDIs = couch_db_engine:open_docs(Db2, Ids),
- {RActions2, RIdRevs} = lists:foldl(fun(FDI, {CActions, CIdRevs}) ->
- Id = FDI#full_doc_info.id,
- PrevRev = cpse_util:prev_rev(FDI),
- Rev = PrevRev#rev_info.rev,
- Action = {purge, {Id, Rev}},
- {[Action| CActions], [{Id, [Rev]}| CIdRevs]}
- end, {[], []}, FDIs),
- {ok, Db3} = cpse_util:apply_actions(Db2, lists:reverse(RActions2)),
-
- StartSeq = 3,
- StartSeqIdRevs = lists:nthtail(StartSeq, lists:reverse(RIdRevs)),
- {ok, PurgedIdRevs} = couch_db_engine:fold_purge_infos(
- Db3, StartSeq, fun fold_fun/2, [], []),
- ?assertEqual(StartSeqIdRevs, lists:reverse(PurgedIdRevs)).
-
-
-cpse_id_rev_repeated(Db1) ->
- Actions1 = [
- {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}},
- {conflict, {<<"foo">>, {[{<<"vsn">>, 2}]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions1),
-
- [FDI1] = couch_db_engine:open_docs(Db2, [<<"foo">>]),
- PrevRev1 = cpse_util:prev_rev(FDI1),
- Rev1 = PrevRev1#rev_info.rev,
- Actions2 = [
- {purge, {<<"foo">>, Rev1}}
- ],
-
- {ok, Db3} = cpse_util:apply_actions(Db2, Actions2),
- {ok, PurgedIdRevs1} = couch_db_engine:fold_purge_infos(
- Db3, 0, fun fold_fun/2, [], []),
- ExpectedPurgedIdRevs1 = [
- {<<"foo">>, [Rev1]}
- ],
-
- ?assertEqual(ExpectedPurgedIdRevs1, lists:reverse(PurgedIdRevs1)),
- ?assertEqual(1, couch_db_engine:get_purge_seq(Db3)),
-
- % purge the same Id,Rev when the doc still exists
- {ok, Db4} = cpse_util:apply_actions(Db3, Actions2),
- {ok, PurgedIdRevs2} = couch_db_engine:fold_purge_infos(
- Db4, 0, fun fold_fun/2, [], []),
- ExpectedPurgedIdRevs2 = [
- {<<"foo">>, [Rev1]},
- {<<"foo">>, [Rev1]}
- ],
- ?assertEqual(ExpectedPurgedIdRevs2, lists:reverse(PurgedIdRevs2)),
- ?assertEqual(2, couch_db_engine:get_purge_seq(Db4)),
-
- [FDI2] = couch_db_engine:open_docs(Db4, [<<"foo">>]),
- PrevRev2 = cpse_util:prev_rev(FDI2),
- Rev2 = PrevRev2#rev_info.rev,
- Actions3 = [
- {purge, {<<"foo">>, Rev2}}
- ],
- {ok, Db5} = cpse_util:apply_actions(Db4, Actions3),
-
- {ok, PurgedIdRevs3} = couch_db_engine:fold_purge_infos(
- Db5, 0, fun fold_fun/2, [], []),
- ExpectedPurgedIdRevs3 = [
- {<<"foo">>, [Rev1]},
- {<<"foo">>, [Rev1]},
- {<<"foo">>, [Rev2]}
- ],
- ?assertEqual(ExpectedPurgedIdRevs3, lists:reverse(PurgedIdRevs3)),
- ?assertEqual(3, couch_db_engine:get_purge_seq(Db5)),
-
- % purge the same Id,Rev when the doc was completely purged
- {ok, Db6} = cpse_util:apply_actions(Db5, Actions3),
-
- {ok, PurgedIdRevs4} = couch_db_engine:fold_purge_infos(
- Db6, 0, fun fold_fun/2, [], []),
- ExpectedPurgedIdRevs4 = [
- {<<"foo">>, [Rev1]},
- {<<"foo">>, [Rev1]},
- {<<"foo">>, [Rev2]},
- {<<"foo">>, [Rev2]}
- ],
- ?assertEqual(ExpectedPurgedIdRevs4, lists:reverse(PurgedIdRevs4)),
- ?assertEqual(4, couch_db_engine:get_purge_seq(Db6)).
-
-
-fold_fun({_PSeq, _UUID, Id, Revs}, Acc) ->
- {ok, [{Id, Revs} | Acc]}.
-
-
-docid(I) ->
- Str = io_lib:format("~4..0b", [I]),
- iolist_to_binary(Str).
diff --git a/src/couch_pse_tests/src/cpse_test_get_set_props.erl b/src/couch_pse_tests/src/cpse_test_get_set_props.erl
deleted file mode 100644
index d49f67f49..000000000
--- a/src/couch_pse_tests/src/cpse_test_get_set_props.erl
+++ /dev/null
@@ -1,95 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_get_set_props).
--compile(export_all).
--compile(nowarn_export_all).
-
-
--include_lib("eunit/include/eunit.hrl").
-
-
-setup_each() ->
- cpse_util:dbname().
-
-
-teardown_each(DbName) ->
- ok = couch_server:delete(DbName, []).
-
-
-cpse_default_props(DbName) ->
- {ok, {_App, Engine, _Extension}} = application:get_env(couch, test_engine),
- {ok, Db} = cpse_util:create_db(DbName),
- Node = node(),
-
- ?assertEqual(Engine, couch_db_engine:get_engine(Db)),
- ?assertEqual(0, couch_db_engine:get_doc_count(Db)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db)),
- ?assertEqual(true, is_list(couch_db_engine:get_size_info(Db))),
- ?assertEqual(true, is_integer(couch_db_engine:get_disk_version(Db))),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db)),
- ?assertEqual(0, couch_db_engine:get_purge_seq(Db)),
- ?assertEqual(true, is_integer(couch_db_engine:get_purge_infos_limit(Db))),
- ?assertEqual(true, couch_db_engine:get_purge_infos_limit(Db) > 0),
- ?assertEqual([], couch_db_engine:get_security(Db)),
- ?assertEqual(1000, couch_db_engine:get_revs_limit(Db)),
- ?assertMatch(<<_:32/binary>>, couch_db_engine:get_uuid(Db)),
- ?assertEqual([{Node, 0}], couch_db_engine:get_epochs(Db)),
- ?assertEqual(0, couch_db_engine:get_compacted_seq(Db)).
-
-
--define(ADMIN_ONLY_SEC_PROPS, {[
- {<<"members">>, {[
- {<<"roles">>, [<<"_admin">>]}
- ]}},
- {<<"admins">>, {[
- {<<"roles">>, [<<"_admin">>]}
- ]}}
-]}).
-
-
-cpse_admin_only_security(DbName) ->
- Config = [{"couchdb", "default_security", "admin_only"}],
- {ok, Db1} = cpse_util:with_config(Config, fun() ->
- cpse_util:create_db(DbName)
- end),
-
- ?assertEqual(?ADMIN_ONLY_SEC_PROPS, couch_db:get_security(Db1)),
- cpse_util:shutdown_db(Db1),
-
- {ok, Db2} = couch_db:reopen(Db1),
- couch_log:error("~n~n~n~n~s -> ~s~n~n", [couch_db:name(Db1), couch_db:name(Db2)]),
- ?assertEqual(?ADMIN_ONLY_SEC_PROPS, couch_db:get_security(Db2)).
-
-
-cpse_set_security(DbName) ->
- SecProps = {[{<<"foo">>, <<"bar">>}]},
- check_prop_set(DbName, get_security, set_security, {[]}, SecProps).
-
-
-cpse_set_revs_limit(DbName) ->
- check_prop_set(DbName, get_revs_limit, set_revs_limit, 1000, 50).
-
-
-check_prop_set(DbName, GetFun, SetFun, Default, Value) ->
- {ok, Db0} = cpse_util:create_db(DbName),
-
- ?assertEqual(Default, couch_db:GetFun(Db0)),
- ?assertMatch(ok, couch_db:SetFun(Db0, Value)),
-
- {ok, Db1} = couch_db:reopen(Db0),
- ?assertEqual(Value, couch_db:GetFun(Db1)),
-
- cpse_util:shutdown_db(Db1),
-
- {ok, Db2} = couch_db:reopen(Db1),
- ?assertEqual(Value, couch_db:GetFun(Db2)).
diff --git a/src/couch_pse_tests/src/cpse_test_open_close_delete.erl b/src/couch_pse_tests/src/cpse_test_open_close_delete.erl
deleted file mode 100644
index d9b589fd6..000000000
--- a/src/couch_pse_tests/src/cpse_test_open_close_delete.erl
+++ /dev/null
@@ -1,77 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_open_close_delete).
--compile(export_all).
--compile(nowarn_export_all).
-
-
--include_lib("eunit/include/eunit.hrl").
-
-
-setup_each() ->
- cpse_util:dbname().
-
-
-teardown_each(DbName) ->
- case couch_server:exists(DbName) of
- true -> ok = couch_server:delete(DbName, []);
- false -> ok
- end.
-
-
-cpse_open_non_existent(DbName) ->
- % Try twice to check that a failed open doesn't create
- % the database for some reason.
- ?assertEqual({not_found, no_db_file}, cpse_util:open_db(DbName)),
- ?assertEqual({not_found, no_db_file}, cpse_util:open_db(DbName)).
-
-
-cpse_open_create(DbName) ->
- ?assertEqual(false, couch_server:exists(DbName)),
- ?assertEqual({not_found, no_db_file}, cpse_util:open_db(DbName)),
- ?assertMatch({ok, _}, cpse_util:create_db(DbName)),
- ?assertEqual(true, couch_server:exists(DbName)).
-
-
-cpse_open_when_exists(DbName) ->
- ?assertEqual(false, couch_server:exists(DbName)),
- ?assertEqual({not_found, no_db_file}, cpse_util:open_db(DbName)),
- ?assertMatch({ok, _}, cpse_util:create_db(DbName)),
- ?assertEqual(file_exists, cpse_util:create_db(DbName)).
-
-
-cpse_terminate(DbName) ->
- ?assertEqual(false, couch_server:exists(DbName)),
- ?assertEqual({not_found, no_db_file}, cpse_util:open_db(DbName)),
- ?assertEqual(ok, cycle_db(DbName, create_db)),
- ?assertEqual(true, couch_server:exists(DbName)).
-
-
-cpse_rapid_recycle(DbName) ->
- ?assertEqual(ok, cycle_db(DbName, create_db)),
- lists:foreach(fun(_) ->
- ?assertEqual(ok, cycle_db(DbName, open_db))
- end, lists:seq(1, 100)).
-
-
-cpse_delete(DbName) ->
- ?assertEqual(false, couch_server:exists(DbName)),
- ?assertMatch(ok, cycle_db(DbName, create_db)),
- ?assertEqual(true, couch_server:exists(DbName)),
- ?assertEqual(ok, couch_server:delete(DbName, [])),
- ?assertEqual(false, couch_server:exists(DbName)).
-
-
-cycle_db(DbName, Type) ->
- {ok, Db} = cpse_util:Type(DbName),
- cpse_util:shutdown_db(Db).
diff --git a/src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl b/src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl
deleted file mode 100644
index c7a85c7e4..000000000
--- a/src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl
+++ /dev/null
@@ -1,80 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_purge_bad_checkpoints).
--compile(export_all).
--compile(nowarn_export_all).
-
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-setup_each() ->
- {ok, Db1} = cpse_util:create_db(),
- {ok, Revs} = cpse_util:save_docs(couch_db:name(Db1), [
- {[{'_id', foo0}, {vsn, 0}]},
- {[{'_id', foo1}, {vsn, 1}]},
- {[{'_id', foo2}, {vsn, 2}]},
- {[{'_id', foo3}, {vsn, 3}]},
- {[{'_id', foo4}, {vsn, 4}]},
- {[{'_id', foo5}, {vsn, 5}]},
- {[{'_id', foo6}, {vsn, 6}]},
- {[{'_id', foo7}, {vsn, 7}]},
- {[{'_id', foo8}, {vsn, 8}]},
- {[{'_id', foo9}, {vsn, 9}]}
- ]),
- PInfos = lists:map(fun(Idx) ->
- DocId = iolist_to_binary(["foo", $0 + Idx]),
- Rev = lists:nth(Idx + 1, Revs),
- {cpse_util:uuid(), DocId, [Rev]}
- end, lists:seq(0, 9)),
- {ok, _} = cpse_util:purge(couch_db:name(Db1), PInfos),
- {ok, Db2} = couch_db:reopen(Db1),
- Db2.
-
-
-teardown_each(Db) ->
- ok = couch_server:delete(couch_db:name(Db), []).
-
-
-cpse_bad_purge_seq(Db1) ->
- Db2 = save_local_doc(Db1, <<"foo">>),
- ?assertEqual(0, couch_db:get_minimum_purge_seq(Db2)),
-
- ok = couch_db:set_purge_infos_limit(Db2, 5),
- {ok, Db3} = couch_db:reopen(Db2),
- ?assertEqual(1, couch_db:get_minimum_purge_seq(Db3)).
-
-
-cpse_verify_non_boolean(Db1) ->
- Db2 = save_local_doc(Db1, 2),
- ?assertEqual(0, couch_db:get_minimum_purge_seq(Db2)),
-
- ok = couch_db:set_purge_infos_limit(Db2, 5),
- {ok, Db3} = couch_db:reopen(Db2),
- ?assertEqual(5, couch_db:get_minimum_purge_seq(Db3)).
-
-
-save_local_doc(Db1, PurgeSeq) ->
- {Mega, Secs, _} = os:timestamp(),
- NowSecs = Mega * 1000000 + Secs,
- Doc = couch_doc:from_json_obj(?JSON_DECODE(?JSON_ENCODE({[
- {<<"_id">>, <<"_local/purge-test-stuff">>},
- {<<"purge_seq">>, PurgeSeq},
- {<<"timestamp_utc">>, NowSecs},
- {<<"verify_options">>, {[{<<"signature">>, <<"stuff">>}]}},
- {<<"type">>, <<"test">>}
- ]}))),
- {ok, _} = couch_db:update_doc(Db1, Doc, []),
- {ok, Db2} = couch_db:reopen(Db1),
- Db2.
diff --git a/src/couch_pse_tests/src/cpse_test_purge_docs.erl b/src/couch_pse_tests/src/cpse_test_purge_docs.erl
deleted file mode 100644
index 60a072da6..000000000
--- a/src/couch_pse_tests/src/cpse_test_purge_docs.erl
+++ /dev/null
@@ -1,464 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_purge_docs).
--compile(export_all).
--compile(nowarn_export_all).
-
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--define(REV_DEPTH, 100).
-
-
-setup_each() ->
- {ok, Db} = cpse_util:create_db(),
- couch_db:name(Db).
-
-
-teardown_each(DbName) ->
- ok = couch_server:delete(DbName, []).
-
-
-cpse_purge_simple(DbName) ->
- {ok, Rev} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 1},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo1">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([Rev], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 2},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-
-cpse_purge_simple_info_check(DbName) ->
- {ok, Rev} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}),
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo1">>, [Rev]}
- ],
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([Rev], PRevs),
-
- {ok, AllInfos} = couch_util:with_db(DbName, fun(Db) ->
- couch_db_engine:fold_purge_infos(Db, 0, fun fold_all_infos/2, [], [])
- end),
-
- ?assertMatch([{1, <<_/binary>>, <<"foo1">>, [Rev]}], AllInfos).
-
-
-cpse_purge_empty_db(DbName) ->
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo">>, [{0, <<0>>}]}
- ],
-
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 1},
- {changes, 0},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-
-cpse_purge_single_docid(DbName) ->
- {ok, [Rev1, _Rev2]} = cpse_util:save_docs(DbName, [
- {[{'_id', foo1}, {vsn, 1}]},
- {[{'_id', foo2}, {vsn, 2}]}
- ]),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 2},
- {changes, 2},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo1">>, [Rev1]}
- ],
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([Rev1], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 1},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-
-cpse_purge_multiple_docids(DbName) ->
- {ok, [Rev1, Rev2]} = cpse_util:save_docs(DbName, [
- {[{'_id', foo1}, {vsn, 1.1}]},
- {[{'_id', foo2}, {vsn, 1.2}]}
- ]),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 2},
- {changes, 2},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo1">>, [Rev1]},
- {cpse_util:uuid(), <<"foo2">>, [Rev2]}
- ],
-
- {ok, [{ok, PRevs1}, {ok, PRevs2}]} = cpse_util:purge(DbName, PurgeInfos),
-
- ?assertEqual([Rev1], PRevs1),
- ?assertEqual([Rev2], PRevs2),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 0},
- {purge_seq, 2},
- {purge_infos, PurgeInfos}
- ]).
-
-
-cpse_purge_no_docids(DbName) ->
- {ok, [_Rev1, _Rev2]} = cpse_util:save_docs(DbName, [
- {[{'_id', foo1}, {vsn, 1}]},
- {[{'_id', foo2}, {vsn, 2}]}
- ]),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 2},
- {changes, 2},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- {ok, []} = cpse_util:purge(DbName, []),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 2},
- {changes, 2},
- {purge_seq, 0},
- {purge_infos, []}
- ]).
-
-
-cpse_purge_rev_path(DbName) ->
- {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo}, {vsn, 1}]}),
- Update = {[
- {<<"_id">>, <<"foo">>},
- {<<"_rev">>, couch_doc:rev_to_str(Rev1)},
- {<<"_deleted">>, true},
- {<<"vsn">>, 2}
- ]},
- {ok, Rev2} = cpse_util:save_doc(DbName, Update),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 1},
- {update_seq, 2},
- {changes, 1},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo">>, [Rev2]}
- ],
-
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([Rev2], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 0},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-
-cpse_purge_deep_revision_path(DbName) ->
- {ok, InitRev} = cpse_util:save_doc(DbName, {[{'_id', bar}, {vsn, 0}]}),
- LastRev = lists:foldl(fun(Count, PrevRev) ->
- Update = {[
- {'_id', bar},
- {'_rev', couch_doc:rev_to_str(PrevRev)},
- {vsn, Count}
- ]},
- {ok, NewRev} = cpse_util:save_doc(DbName, Update),
- NewRev
- end, InitRev, lists:seq(1, ?REV_DEPTH)),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"bar">>, [LastRev]}
- ],
-
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([LastRev], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, ?REV_DEPTH + 2},
- {changes, 0},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-
-cpse_purge_partial_revs(DbName) ->
- {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo}, {vsn, <<"1.1">>}]}),
- Update = {[
- {'_id', foo},
- {'_rev', couch_doc:rev_to_str({1, [couch_hash:md5_hash(<<"1.2">>)]})},
- {vsn, <<"1.2">>}
- ]},
- {ok, [_Rev2]} = cpse_util:save_docs(DbName, [Update], [replicated_changes]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo">>, [Rev1]}
- ],
-
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([Rev1], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 1},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-
-cpse_purge_missing_docid(DbName) ->
- {ok, [Rev1, _Rev2]} = cpse_util:save_docs(DbName, [
- {[{'_id', foo1}, {vsn, 1}]},
- {[{'_id', foo2}, {vsn, 2}]}
- ]),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 2},
- {changes, 2},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"baz">>, [Rev1]}
- ],
-
- {ok, [{ok, []}]} = cpse_util:purge(DbName, PurgeInfos),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 2},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-
-cpse_purge_duplicate_docids(DbName) ->
- {ok, [Rev1, _Rev2]} = cpse_util:save_docs(DbName, [
- {[{'_id', foo1}, {vsn, 1}]},
- {[{'_id', foo2}, {vsn, 2}]}
- ]),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 2},
- {purge_seq, 0},
- {changes, 2},
- {purge_infos, []}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo1">>, [Rev1]},
- {cpse_util:uuid(), <<"foo1">>, [Rev1]}
- ],
-
- {ok, Resp} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([{ok, [Rev1]}, {ok, []}], Resp),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 3},
- {purge_seq, 2},
- {changes, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-
-cpse_purge_internal_revision(DbName) ->
- {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo}, {vsn, 1}]}),
- Update = {[
- {'_id', foo},
- {'_rev', couch_doc:rev_to_str(Rev1)},
- {vsn, 2}
- ]},
- {ok, _Rev2} = cpse_util:save_doc(DbName, Update),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo">>, [Rev1]}
- ],
-
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 1},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-
-cpse_purge_missing_revision(DbName) ->
- {ok, [_Rev1, Rev2]} = cpse_util:save_docs(DbName, [
- {[{'_id', foo1}, {vsn, 1}]},
- {[{'_id', foo2}, {vsn, 2}]}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo1">>, [Rev2]}
- ],
-
- {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 2},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-
-cpse_purge_repeated_revisions(DbName) ->
- {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo}, {vsn, <<"1.1">>}]}),
- Update = {[
- {'_id', foo},
- {'_rev', couch_doc:rev_to_str({1, [couch_hash:md5_hash(<<"1.2">>)]})},
- {vsn, <<"1.2">>}
- ]},
- {ok, [Rev2]} = cpse_util:save_docs(DbName, [Update], [replicated_changes]),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 2},
- {changes, 1},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos1 = [
- {cpse_util:uuid(), <<"foo">>, [Rev1]},
- {cpse_util:uuid(), <<"foo">>, [Rev1, Rev2]}
- ],
-
- {ok, [{ok, PRevs1}, {ok, PRevs2}]} = cpse_util:purge(DbName, PurgeInfos1),
- ?assertEqual([Rev1], PRevs1),
- ?assertEqual([Rev2], PRevs2),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 0},
- {purge_seq, 2},
- {purge_infos, PurgeInfos1}
- ]).
-
-
-cpse_purge_repeated_uuid(DbName) ->
- {ok, Rev} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 1},
- {changes, 1},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo1">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs1}]} = cpse_util:purge(DbName, PurgeInfos),
- ?assertEqual([Rev], PRevs1),
-
- % Attempting to purge a repeated UUID is an error
- ?assertThrow({badreq, _}, cpse_util:purge(DbName, PurgeInfos)),
-
- % Although we can replicate it in
- {ok, []} = cpse_util:purge(DbName, PurgeInfos, [replicated_changes]),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 2},
- {changes, 0},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-
-fold_all_infos(Info, Acc) ->
- {ok, [Info | Acc]}.
diff --git a/src/couch_pse_tests/src/cpse_test_purge_replication.erl b/src/couch_pse_tests/src/cpse_test_purge_replication.erl
deleted file mode 100644
index 20dcc2f81..000000000
--- a/src/couch_pse_tests/src/cpse_test_purge_replication.erl
+++ /dev/null
@@ -1,215 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_purge_replication).
--compile(export_all).
--compile(nowarn_export_all).
-
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-
-setup_all() ->
- cpse_util:setup_all([mem3, fabric, couch_replicator]).
-
-
-setup_each() ->
- {ok, Src} = cpse_util:create_db(),
- {ok, Tgt} = cpse_util:create_db(),
- {couch_db:name(Src), couch_db:name(Tgt)}.
-
-
-teardown_each({SrcDb, TgtDb}) ->
- ok = couch_server:delete(SrcDb, []),
- ok = couch_server:delete(TgtDb, []).
-
-
-cpse_purge_http_replication({Source, Target}) ->
- {ok, Rev1} = cpse_util:save_doc(Source, {[{'_id', foo}, {vsn, 1}]}),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, Source, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 1},
- {changes, 1},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- RepObject = {[
- {<<"source">>, db_url(Source)},
- {<<"target">>, db_url(Target)}
- ]},
-
- {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
- {ok, Doc1} = cpse_util:open_doc(Target, foo),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, Target, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 1},
- {changes, 1},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo">>, [Rev1]}
- ],
-
- {ok, [{ok, PRevs}]} = cpse_util:purge(Source, PurgeInfos),
- ?assertEqual([Rev1], PRevs),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, Source, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 2},
- {changes, 0},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]),
-
- % Show that a purge on the source is
- % not replicated to the target
- {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
- {ok, Doc2} = cpse_util:open_doc(Target, foo),
- [Rev2] = Doc2#doc_info.revs,
- ?assertEqual(Rev1, Rev2#rev_info.rev),
- ?assertEqual(Doc1, Doc2),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, Target, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 1},
- {changes, 1},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- % Show that replicating from the target
- % back to the source reintroduces the doc
- RepObject2 = {[
- {<<"source">>, db_url(Target)},
- {<<"target">>, db_url(Source)}
- ]},
-
- {ok, _} = couch_replicator:replicate(RepObject2, ?ADMIN_USER),
- {ok, Doc3} = cpse_util:open_doc(Source, foo),
- [Revs3] = Doc3#doc_info.revs,
- ?assertEqual(Rev1, Revs3#rev_info.rev),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, Source, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 3},
- {changes, 1},
- {purge_seq, 1},
- {purge_infos, PurgeInfos}
- ]).
-
-
-cpse_purge_internal_repl_disabled({Source, Target}) ->
- cpse_util:with_config([{"mem3", "replicate_purges", "false"}], fun() ->
- repl(Source, Target),
-
- {ok, [Rev1, Rev2]} = cpse_util:save_docs(Source, [
- {[{'_id', foo1}, {vsn, 1}]},
- {[{'_id', foo2}, {vsn, 2}]}
- ]),
-
- repl(Source, Target),
-
- PurgeInfos1 = [
- {cpse_util:uuid(), <<"foo1">>, [Rev1]}
- ],
- {ok, [{ok, PRevs1}]} = cpse_util:purge(Source, PurgeInfos1),
- ?assertEqual([Rev1], PRevs1),
-
- PurgeInfos2 = [
- {cpse_util:uuid(), <<"foo2">>, [Rev2]}
- ],
- {ok, [{ok, PRevs2}]} = cpse_util:purge(Target, PurgeInfos2),
- ?assertEqual([Rev2], PRevs2),
-
- SrcShard = make_shard(Source),
- TgtShard = make_shard(Target),
- ?assertEqual({ok, 0}, mem3_rep:go(SrcShard, TgtShard)),
- ?assertEqual({ok, 0}, mem3_rep:go(TgtShard, SrcShard)),
-
- ?assertMatch({ok, #doc_info{}}, cpse_util:open_doc(Source, <<"foo2">>)),
- ?assertMatch({ok, #doc_info{}}, cpse_util:open_doc(Target, <<"foo1">>))
- end).
-
-
-cpse_purge_repl_simple_pull({Source, Target}) ->
- repl(Source, Target),
-
- {ok, Rev} = cpse_util:save_doc(Source, {[{'_id', foo}, {vsn, 1}]}),
- repl(Source, Target),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo">>, [Rev]}
- ],
- {ok, [{ok, PRevs}]} = cpse_util:purge(Target, PurgeInfos),
- ?assertEqual([Rev], PRevs),
- repl(Source, Target).
-
-
-cpse_purge_repl_simple_push({Source, Target}) ->
- repl(Source, Target),
-
- {ok, Rev} = cpse_util:save_doc(Source, {[{'_id', foo}, {vsn, 1}]}),
- repl(Source, Target),
-
- PurgeInfos = [
- {cpse_util:uuid(), <<"foo">>, [Rev]}
- ],
- {ok, [{ok, PRevs}]} = cpse_util:purge(Source, PurgeInfos),
- ?assertEqual([Rev], PRevs),
- repl(Source, Target).
-
-
-repl(Source, Target) ->
- SrcShard = make_shard(Source),
- TgtShard = make_shard(Target),
-
- ?assertEqual({ok, 0}, mem3_rep:go(SrcShard, TgtShard)),
-
- SrcTerm = cpse_util:db_as_term(Source, replication),
- TgtTerm = cpse_util:db_as_term(Target, replication),
-
- Diff = cpse_util:term_diff(SrcTerm, TgtTerm),
- ?assertEqual(nodiff, Diff).
-
-
-make_shard(DbName) ->
- #shard{
- name = DbName,
- node = node(),
- dbname = DbName,
- range = [0, 16#FFFFFFFF]
- }.
-
-
-db_url(DbName) ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(couch_httpd, port),
- Url = ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])),
- test_util:wait(fun() ->
- case test_request:get(?b2l(Url)) of
- {ok, 200, _, _} -> ok;
- _ -> wait
- end
- end),
- Url.
diff --git a/src/couch_pse_tests/src/cpse_test_purge_seqs.erl b/src/couch_pse_tests/src/cpse_test_purge_seqs.erl
deleted file mode 100644
index c896b6154..000000000
--- a/src/couch_pse_tests/src/cpse_test_purge_seqs.erl
+++ /dev/null
@@ -1,129 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_purge_seqs).
--compile(export_all).
-
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-setup_each() ->
- {ok, Db} = cpse_util:create_db(),
- couch_db:name(Db).
-
-
-teardown_each(DbName) ->
- ok = couch_server:delete(DbName, []).
-
-
-cpse_increment_purge_seq_on_complete_purge(DbName) ->
- {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}),
- {ok, Rev2} = cpse_util:save_doc(DbName, {[{'_id', foo2}, {vsn, 1.2}]}),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 2},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos1 = [
- {cpse_util:uuid(), <<"foo1">>, [Rev1]}
- ],
- {ok, [{ok, PRevs1}]} = cpse_util:purge(DbName, PurgeInfos1),
- ?assertEqual([Rev1], PRevs1),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 3},
- {purge_seq, 1},
- {purge_infos, PurgeInfos1}
- ]),
-
- PurgeInfos2 = [
- {cpse_util:uuid(), <<"foo2">>, [Rev2]}
- ],
- {ok, [{ok, PRevs2}]} = cpse_util:purge(DbName, PurgeInfos2),
- ?assertEqual([Rev2], PRevs2),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 4},
- {purge_seq, 2},
- {purge_infos, PurgeInfos1 ++ PurgeInfos2}
- ]).
-
-
-cpse_increment_purge_multiple_times(DbName) ->
- {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}),
- {ok, Rev2} = cpse_util:save_doc(DbName, {[{'_id', foo2}, {vsn, 1.2}]}),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 2},
- {del_doc_count, 0},
- {update_seq, 2},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos1 = [
- {cpse_util:uuid(), <<"foo1">>, [Rev1]},
- {cpse_util:uuid(), <<"foo2">>, [Rev2]}
- ],
- {ok, [{ok, PRevs1}, {ok, PRevs2}]} = cpse_util:purge(DbName, PurgeInfos1),
- ?assertEqual([Rev1], PRevs1),
- ?assertEqual([Rev2], PRevs2),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 0},
- {del_doc_count, 0},
- {update_seq, 3},
- {purge_seq, 2},
- {purge_infos, PurgeInfos1}
- ]).
-
-
-cpse_increment_purge_seq_on_partial_purge(DbName) ->
- {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, <<"1.1">>}]}),
- Update = {[
- {'_id', foo1},
- {'_rev', couch_doc:rev_to_str({1, [couch_hash:md5_hash(<<"1.2">>)]})},
- {vsn, <<"1.2">>}
- ]},
- {ok, [_Rev2]} = cpse_util:save_docs(DbName, [Update], [replicated_changes]),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 2},
- {purge_seq, 0},
- {purge_infos, []}
- ]),
-
- PurgeInfos1 = [
- {cpse_util:uuid(), <<"foo1">>, [Rev1]}
- ],
- {ok, [{ok, PRevs1}]} = cpse_util:purge(DbName, PurgeInfos1),
- ?assertEqual([Rev1], PRevs1),
-
- cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
- {doc_count, 1},
- {del_doc_count, 0},
- {update_seq, 3},
- {purge_seq, 1},
- {purge_infos, PurgeInfos1}
- ]).
diff --git a/src/couch_pse_tests/src/cpse_test_read_write_docs.erl b/src/couch_pse_tests/src/cpse_test_read_write_docs.erl
deleted file mode 100644
index a2151340a..000000000
--- a/src/couch_pse_tests/src/cpse_test_read_write_docs.erl
+++ /dev/null
@@ -1,311 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_read_write_docs).
--compile(export_all).
--compile(nowarn_export_all).
-
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-setup_each() ->
- {ok, Db} = cpse_util:create_db(),
- Db.
-
-
-teardown_each(Db) ->
- ok = couch_server:delete(couch_db:name(Db), []).
-
-
-cpse_read_docs_from_empty_db(Db) ->
- ?assertEqual([not_found], couch_db_engine:open_docs(Db, [<<"foo">>])),
- ?assertEqual(
- [not_found, not_found],
- couch_db_engine:open_docs(Db, [<<"a">>, <<"b">>])
- ).
-
-
-cpse_read_empty_local_docs(Db) ->
- {LocalA, LocalB} = {<<"_local/a">>, <<"_local/b">>},
- ?assertEqual([not_found], couch_db_engine:open_local_docs(Db, [LocalA])),
- ?assertEqual(
- [not_found, not_found],
- couch_db_engine:open_local_docs(Db, [LocalA, LocalB])
- ).
-
-
-cpse_write_one_doc(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- ?assertEqual(1, couch_db_engine:get_doc_count(Db2)),
-
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- ?assertEqual(1, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(1, couch_db_engine:get_update_seq(Db3)),
-
- [FDI] = couch_db_engine:open_docs(Db3, [<<"foo">>]),
- #rev_info{
- rev = {RevPos, PrevRevId},
- deleted = Deleted,
- body_sp = DocPtr
- } = cpse_util:prev_rev(FDI),
-
- Doc0 = #doc{
- id = <<"foo">>,
- revs = {RevPos, [PrevRevId]},
- deleted = Deleted,
- body = DocPtr
- },
-
- Doc1 = couch_db_engine:read_doc_body(Db3, Doc0),
- Body1 = if not is_binary(Doc1#doc.body) -> Doc1#doc.body; true ->
- couch_compress:decompress(Doc1#doc.body)
- end,
- ?assertEqual({[{<<"vsn">>, 1}]}, Body1).
-
-
-cpse_write_two_docs(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}},
- {create, {<<"bar">>, {[{<<"stuff">>, true}]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- ?assertEqual(2, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(2, couch_db_engine:get_update_seq(Db3)),
-
- Resps = couch_db_engine:open_docs(Db3, [<<"foo">>, <<"bar">>]),
- ?assertEqual(false, lists:member(not_found, Resps)).
-
-
-cpse_write_three_doc_batch(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {batch, [
- {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}},
- {create, {<<"bar">>, {[{<<"stuff">>, true}]}}},
- {create, {<<"baz">>, {[]}}}
- ]}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- ?assertEqual(3, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(3, couch_db_engine:get_update_seq(Db3)),
-
- Resps = couch_db_engine:open_docs(Db3, [<<"foo">>, <<"bar">>, <<"baz">>]),
- ?assertEqual(false, lists:member(not_found, Resps)).
-
-
-cpse_update_doc(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}},
- {update, {<<"foo">>, {[{<<"vsn">>, 2}]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
-
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- ?assertEqual(1, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(2, couch_db_engine:get_update_seq(Db3)),
-
- [FDI] = couch_db_engine:open_docs(Db3, [<<"foo">>]),
-
- #rev_info{
- rev = {RevPos, PrevRevId},
- deleted = Deleted,
- body_sp = DocPtr
- } = cpse_util:prev_rev(FDI),
-
- Doc0 = #doc{
- id = <<"foo">>,
- revs = {RevPos, [PrevRevId]},
- deleted = Deleted,
- body = DocPtr
- },
-
- Doc1 = couch_db_engine:read_doc_body(Db3, Doc0),
- Body1 = if not is_binary(Doc1#doc.body) -> Doc1#doc.body; true ->
- couch_compress:decompress(Doc1#doc.body)
- end,
-
- ?assertEqual({[{<<"vsn">>, 2}]}, Body1).
-
-
-cpse_delete_doc(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}},
- {delete, {<<"foo">>, {[]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
- ?assertEqual(0, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(1, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(2, couch_db_engine:get_update_seq(Db3)),
-
- [FDI] = couch_db_engine:open_docs(Db3, [<<"foo">>]),
-
- #rev_info{
- rev = {RevPos, PrevRevId},
- deleted = Deleted,
- body_sp = DocPtr
- } = cpse_util:prev_rev(FDI),
-
- Doc0 = #doc{
- id = <<"foo">>,
- revs = {RevPos, [PrevRevId]},
- deleted = Deleted,
- body = DocPtr
- },
-
- Doc1 = couch_db_engine:read_doc_body(Db3, Doc0),
- Body1 = if not is_binary(Doc1#doc.body) -> Doc1#doc.body; true ->
- couch_compress:decompress(Doc1#doc.body)
- end,
-
- ?assertEqual({[]}, Body1).
-
-
-cpse_write_local_doc(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {create, {<<"_local/foo">>, {[{<<"yay">>, false}]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- ?assertEqual(0, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db3)),
-
- [not_found] = couch_db_engine:open_docs(Db3, [<<"_local/foo">>]),
- [#doc{} = Doc] = couch_db_engine:open_local_docs(Db3, [<<"_local/foo">>]),
- ?assertEqual({[{<<"yay">>, false}]}, Doc#doc.body).
-
-
-cpse_write_mixed_batch(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {batch, [
- {create, {<<"bar">>, {[]}}},
- {create, {<<"_local/foo">>, {[{<<"yay">>, false}]}}}
- ]}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- ?assertEqual(1, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(1, couch_db_engine:get_update_seq(Db3)),
-
- [#full_doc_info{}] = couch_db_engine:open_docs(Db3, [<<"bar">>]),
- [not_found] = couch_db_engine:open_docs(Db3, [<<"_local/foo">>]),
-
- [not_found] = couch_db_engine:open_local_docs(Db3, [<<"bar">>]),
- [#doc{}] = couch_db_engine:open_local_docs(Db3, [<<"_local/foo">>]).
-
-
-cpse_update_local_doc(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {create, {<<"_local/foo">>, {[]}}},
- {update, {<<"_local/foo">>, {[{<<"stuff">>, null}]}}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- ?assertEqual(0, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db3)),
-
- [not_found] = couch_db_engine:open_docs(Db3, [<<"_local/foo">>]),
- [#doc{} = Doc] = couch_db_engine:open_local_docs(Db3, [<<"_local/foo">>]),
- ?assertEqual({[{<<"stuff">>, null}]}, Doc#doc.body).
-
-
-cpse_delete_local_doc(Db1) ->
- ?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db1)),
-
- Actions = [
- {create, {<<"_local/foo">>, []}},
- {delete, {<<"_local/foo">>, []}}
- ],
- {ok, Db2} = cpse_util:apply_actions(Db1, Actions),
- cpse_util:shutdown_db(Db2),
-
- {ok, Db3} = couch_db:reopen(Db2),
-
- ?assertEqual(0, couch_db_engine:get_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)),
- ?assertEqual(0, couch_db_engine:get_update_seq(Db3)),
-
- [not_found] = couch_db_engine:open_docs(Db3, [<<"_local/foo">>]),
- ?assertEqual(
- [not_found],
- couch_db_engine:open_local_docs(Db3, [<<"_local/foo">>])
- ).
diff --git a/src/couch_pse_tests/src/cpse_test_ref_counting.erl b/src/couch_pse_tests/src/cpse_test_ref_counting.erl
deleted file mode 100644
index cb115a785..000000000
--- a/src/couch_pse_tests/src/cpse_test_ref_counting.erl
+++ /dev/null
@@ -1,113 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_test_ref_counting).
--compile(export_all).
--compile(nowarn_export_all).
-
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--define(NUM_CLIENTS, 1000).
-
-
-setup_each() ->
- {ok, Db} = cpse_util:create_db(),
- {Db, self()}.
-
-
-teardown_each({Db, _}) ->
- ok = couch_server:delete(couch_db:name(Db), []).
-
-
-cpse_empty_monitors({Db, Pid}) ->
- Pids = couch_db_engine:monitored_by(Db),
- ?assert(is_list(Pids)),
- Expected = [
- Pid,
- couch_db:get_pid(Db),
- whereis(couch_stats_process_tracker)
- ],
- ?assertEqual([], Pids -- Expected).
-
-
-cpse_incref_decref({Db, _}) ->
- {Pid, _} = Client = start_client(Db),
- wait_client(Client),
-
- Pids1 = couch_db_engine:monitored_by(Db),
- ?assert(lists:member(Pid, Pids1)),
-
- close_client(Client),
-
- Pids2 = couch_db_engine:monitored_by(Db),
- ?assert(not lists:member(Pid, Pids2)).
-
-
-cpse_incref_decref_many({Db, _}) ->
- Clients = lists:map(fun(_) ->
- start_client(Db)
- end, lists:seq(1, ?NUM_CLIENTS)),
-
- lists:foreach(fun(C) -> wait_client(C) end, Clients),
-
- Pids1 = couch_db_engine:monitored_by(Db),
- % +3 for self, db pid, and process tracker
- ?assertEqual(?NUM_CLIENTS + 3, length(Pids1)),
-
- lists:foreach(fun(C) -> close_client(C) end, Clients),
-
- Pids2 = couch_db_engine:monitored_by(Db),
- ?assertEqual(3, length(Pids2)).
-
-
-start_client(Db0) ->
- spawn_monitor(fun() ->
- {ok, Db1} = couch_db:open_int(couch_db:name(Db0), []),
-
- receive
- {waiting, Pid} ->
- Pid ! go
- after 1000 ->
- erlang:error(timeout)
- end,
-
- receive
- close ->
- couch_db:close(Db1),
- ok
- after 1000 ->
- erlang:error(timeout)
- end
- end).
-
-
-wait_client({Pid, _Ref}) ->
- Pid ! {waiting, self()},
- receive
- go -> ok
- after 1000 ->
- erlang:error(timeout)
- end.
-
-
-close_client({Pid, Ref}) ->
- Pid ! close,
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
- after 1000 ->
- erlang:error(timeout)
- end.
-
diff --git a/src/couch_pse_tests/src/cpse_util.erl b/src/couch_pse_tests/src/cpse_util.erl
deleted file mode 100644
index 24f49e88c..000000000
--- a/src/couch_pse_tests/src/cpse_util.erl
+++ /dev/null
@@ -1,677 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(cpse_util).
--compile(export_all).
--compile(nowarn_export_all).
-
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--define(TEST_MODULES, [
- cpse_test_open_close_delete,
- cpse_test_get_set_props,
- cpse_test_read_write_docs,
- cpse_test_attachments,
- cpse_test_fold_docs,
- cpse_test_fold_changes,
- cpse_test_fold_purge_infos,
- cpse_test_copy_purge_infos,
- cpse_test_purge_docs,
- cpse_test_purge_replication,
- cpse_test_purge_bad_checkpoints,
- cpse_test_compaction,
- cpse_test_ref_counting,
- cpse_test_purge_seqs
-]).
-
-
--define(SHUTDOWN_TIMEOUT, 5000).
--define(COMPACTOR_TIMEOUT, 50000).
--define(ATTACHMENT_WRITE_TIMEOUT, 10000).
--define(MAKE_DOC_SUMMARY_TIMEOUT, 5000).
-
-
-create_tests(EngineApp, Extension) ->
- create_tests(EngineApp, EngineApp, Extension).
-
-
-create_tests(EngineApp, EngineModule, Extension) ->
- TestEngine = {EngineApp, EngineModule, Extension},
- application:set_env(couch, test_engine, TestEngine),
- lists:map(fun(TestMod) ->
- {atom_to_list(TestMod), cpse_gather:module(TestMod)}
- end, ?TEST_MODULES).
-
-
-setup_all() ->
- setup_all([]).
-
-
-setup_all(ExtraApps) ->
- Ctx = test_util:start_couch(ExtraApps),
- {ok, {_, EngineMod, Extension}} = application:get_env(couch, test_engine),
- EngineModStr = atom_to_list(EngineMod),
- config:set("couchdb_engines", Extension, EngineModStr, false),
- config:set("log", "include_sasl", "false", false),
- config:set("mem3", "replicate_purges", "true", false),
- Ctx.
-
-
-teardown_all(Ctx) ->
- test_util:stop_couch(Ctx).
-
-
-rootdir() ->
- config:get("couchdb", "database_dir", ".").
-
-
-dbname() ->
- UUID = couch_uuids:random(),
- <<"db-", UUID/binary>>.
-
-
-get_engine() ->
- case application:get_env(couch, test_engine) of
- {ok, {_App, _Mod, Extension}} ->
- list_to_binary(Extension);
- _ ->
- <<"couch">>
- end.
-
-
-create_db() ->
- create_db(dbname()).
-
-
-create_db(DbName) ->
- Engine = get_engine(),
- couch_db:create(DbName, [{engine, Engine}, ?ADMIN_CTX]).
-
-
-open_db(DbName) ->
- Engine = get_engine(),
- couch_db:open_int(DbName, [{engine, Engine}, ?ADMIN_CTX]).
-
-
-shutdown_db(Db) ->
- Pid = couch_db:get_pid(Db),
- Ref = erlang:monitor(process, Pid),
- exit(Pid, kill),
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
- after ?SHUTDOWN_TIMEOUT ->
- erlang:error(database_shutdown_timeout)
- end,
- test_util:wait(fun() ->
- case ets:member(couch_dbs, couch_db:name(Db)) of
- true -> wait;
- false -> ok
- end
- end).
-
-
-save_doc(DbName, Json) ->
- {ok, [Rev]} = save_docs(DbName, [Json], []),
- {ok, Rev}.
-
-
-save_docs(DbName, JsonDocs) ->
- save_docs(DbName, JsonDocs, []).
-
-
-save_docs(DbName, JsonDocs, Options) ->
- Docs = lists:map(fun(JDoc) ->
- couch_doc:from_json_obj(?JSON_DECODE(?JSON_ENCODE(JDoc)))
- end, JsonDocs),
- Opts = [full_commit | Options],
- {ok, Db} = couch_db:open_int(DbName, []),
- try
- case lists:member(replicated_changes, Options) of
- true ->
- {ok, []} = couch_db:update_docs(
- Db, Docs, Opts, replicated_changes),
- {ok, lists:map(fun(Doc) ->
- {Pos, [RevId | _]} = Doc#doc.revs,
- {Pos, RevId}
- end, Docs)};
- false ->
- {ok, Resp} = couch_db:update_docs(Db, Docs, Opts),
- {ok, [Rev || {ok, Rev} <- Resp]}
- end
- after
- couch_db:close(Db)
- end.
-
-
-open_doc(DbName, DocId0) ->
- DocId = ?JSON_DECODE(?JSON_ENCODE(DocId0)),
- {ok, Db} = couch_db:open_int(DbName, []),
- try
- couch_db:get_doc_info(Db, DocId)
- after
- couch_db:close(Db)
- end.
-
-
-purge(DbName, PurgeInfos) ->
- purge(DbName, PurgeInfos, []).
-
-
-purge(DbName, PurgeInfos0, Options) when is_list(PurgeInfos0) ->
- PurgeInfos = lists:map(fun({UUID, DocIdJson, Revs}) ->
- {UUID, ?JSON_DECODE(?JSON_ENCODE(DocIdJson)), Revs}
- end, PurgeInfos0),
- {ok, Db} = couch_db:open_int(DbName, []),
- try
- couch_db:purge_docs(Db, PurgeInfos, Options)
- after
- couch_db:close(Db)
- end.
-
-
-uuid() ->
- couch_uuids:random().
-
-
-assert_db_props(Module, Line, DbName, Props) when is_binary(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- try
- assert_db_props(Module, Line, Db, Props)
- catch error:{assertEqual, Props} ->
- {_, Rest} = proplists:split(Props, [module, line]),
- erlang:error({assertEqual, [{module, Module}, {line, Line} | Rest]})
- after
- couch_db:close(Db)
- end;
-
-assert_db_props(Module, Line, Db, Props) ->
- try
- assert_each_prop(Db, Props)
- catch error:{assertEqual, Props} ->
- {_, Rest} = proplists:split(Props, [module, line]),
- erlang:error({assertEqual, [{module, Module}, {line, Line} | Rest]})
- end.
-
-
-assert_each_prop(_Db, []) ->
- ok;
-assert_each_prop(Db, [{doc_count, Expect} | Rest]) ->
- {ok, DocCount} = couch_db:get_doc_count(Db),
- ?assertEqual(Expect, DocCount),
- assert_each_prop(Db, Rest);
-assert_each_prop(Db, [{del_doc_count, Expect} | Rest]) ->
- {ok, DelDocCount} = couch_db:get_del_doc_count(Db),
- ?assertEqual(Expect, DelDocCount),
- assert_each_prop(Db, Rest);
-assert_each_prop(Db, [{update_seq, Expect} | Rest]) ->
- UpdateSeq = couch_db:get_update_seq(Db),
- ?assertEqual(Expect, UpdateSeq),
- assert_each_prop(Db, Rest);
-assert_each_prop(Db, [{changes, Expect} | Rest]) ->
- {ok, NumChanges} = couch_db:fold_changes(Db, 0, fun aep_changes/2, 0, []),
- ?assertEqual(Expect, NumChanges),
- assert_each_prop(Db, Rest);
-assert_each_prop(Db, [{purge_seq, Expect} | Rest]) ->
- PurgeSeq = couch_db:get_purge_seq(Db),
- ?assertEqual(Expect, PurgeSeq),
- assert_each_prop(Db, Rest);
-assert_each_prop(Db, [{purge_infos, Expect} | Rest]) ->
- {ok, PurgeInfos} = couch_db:fold_purge_infos(Db, 0, fun aep_fold/2, [], []),
- ?assertEqual(Expect, lists:reverse(PurgeInfos)),
- assert_each_prop(Db, Rest).
-
-
-aep_changes(_A, Acc) ->
- {ok, Acc + 1}.
-
-
-aep_fold({_PSeq, UUID, Id, Revs}, Acc) ->
- {ok, [{UUID, Id, Revs} | Acc]}.
-
-
-apply_actions(DbName, Actions) when is_binary(DbName) ->
- {ok, Db0} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, Db1} = apply_actions(Db0, Actions),
- couch_db:close(Db1),
- ok;
-
-apply_actions(Db, []) ->
- {ok, Db};
-
-apply_actions(Db, [Action | Rest]) ->
- {ok, NewDb} = apply_action(Db, Action),
- apply_actions(NewDb, Rest).
-
-
-apply_action(Db, {batch, BatchActions}) ->
- apply_batch(Db, BatchActions);
-
-apply_action(Db, Action) ->
- apply_batch(Db, [Action]).
-
-
-apply_batch(Db, Actions) ->
- AccIn = {[], [], [], []},
- AccOut = lists:foldl(fun(Action, Acc) ->
- {DocAcc, ConfAcc, LDocAcc, PurgeAcc} = Acc,
- case gen_write(Db, Action) of
- {update, Doc} ->
- {[Doc | DocAcc], ConfAcc, LDocAcc, PurgeAcc};
- {conflict, Doc} ->
- {DocAcc, [Doc | ConfAcc], LDocAcc, PurgeAcc};
- {local, Doc} ->
- {DocAcc, ConfAcc, [Doc | LDocAcc], PurgeAcc};
- {purge, PurgeInfo} ->
- {DocAcc, ConfAcc, LDocAcc, [PurgeInfo | PurgeAcc]}
- end
- end, AccIn, Actions),
-
- {Docs0, Conflicts0, LDocs0, PurgeInfos0} = AccOut,
- Docs = lists:reverse(Docs0),
- Conflicts = lists:reverse(Conflicts0),
- LDocs = lists:reverse(LDocs0),
- PurgeInfos = lists:reverse(PurgeInfos0),
-
- {ok, Resp} = couch_db:update_docs(Db, Docs ++ LDocs),
- false = lists:member(conflict, Resp),
- {ok, Db1} = couch_db:reopen(Db),
-
- {ok, []} = couch_db:update_docs(Db, Conflicts, [], replicated_changes),
- {ok, Db2} = couch_db:reopen(Db1),
-
- if PurgeInfos == [] -> ok; true ->
- {ok, _} = couch_db:purge_docs(Db2, PurgeInfos)
- end,
- couch_db:reopen(Db2).
-
-
-gen_write(Db, {Action, {<<"_local/", _/binary>> = DocId, Body}}) ->
- PrevRev = case couch_db:open_doc(Db, DocId) of
- {not_found, _} ->
- 0;
- {ok, #doc{revs = {0, []}}} ->
- 0;
- {ok, #doc{revs = {0, [RevStr | _]}}} ->
- binary_to_integer(RevStr)
- end,
- {RevId, Deleted} = case Action of
- Action when Action == create; Action == update ->
- {PrevRev + 1, false};
- delete ->
- {0, true}
- end,
- {local, #doc{
- id = DocId,
- revs = {0, [list_to_binary(integer_to_list(RevId))]},
- body = Body,
- deleted = Deleted
- }};
-
-gen_write(Db, {Action, {DocId, Body}}) ->
- gen_write(Db, {Action, {DocId, Body, []}});
-
-gen_write(Db, {create, {DocId, Body, Atts}}) ->
- {not_found, _} = couch_db:open_doc(Db, DocId),
- {update, #doc{
- id = DocId,
- revs = {0, []},
- deleted = false,
- body = Body,
- atts = Atts
- }};
-
-gen_write(_Db, {purge, {DocId, PrevRevs0, _}}) ->
- PrevRevs = if is_list(PrevRevs0) -> PrevRevs0; true -> [PrevRevs0] end,
- {purge, {couch_uuids:random(), DocId, PrevRevs}};
-
-gen_write(Db, {Action, {DocId, Body, Atts}}) ->
- #full_doc_info{} = PrevFDI = couch_db:get_full_doc_info(Db, DocId),
-
- #full_doc_info{
- id = DocId
- } = PrevFDI,
-
- #rev_info{
- rev = PrevRev
- } = prev_rev(PrevFDI),
-
- NewRev = gen_rev(Action, DocId, PrevRev, Body, Atts),
-
- Deleted = case Action of
- update -> false;
- conflict -> false;
- delete -> true
- end,
-
- Type = case Action of
- conflict -> conflict;
- _ -> update
- end,
-
- {Type, #doc{
- id = DocId,
- revs = NewRev,
- deleted = Deleted,
- body = Body,
- atts = Atts
- }}.
-
-
-gen_rev(A, DocId, {Pos, Rev}, Body, Atts) when A == update; A == delete ->
- NewRev = couch_hash:md5_hash(term_to_binary({DocId, Rev, Body, Atts})),
- {Pos + 1, [NewRev, Rev]};
-gen_rev(conflict, DocId, _, Body, Atts) ->
- UUID = couch_uuids:random(),
- NewRev = couch_hash:md5_hash(term_to_binary({DocId, UUID, Body, Atts})),
- {1, [NewRev]}.
-
-
-prep_atts(_Db, []) ->
- [];
-
-prep_atts(Db, [{FileName, Data} | Rest]) ->
- {_, Ref} = spawn_monitor(fun() ->
- {ok, Stream} = couch_db:open_write_stream(Db, []),
- exit(write_att(Stream, FileName, Data, Data))
- end),
- Att = receive
- {'DOWN', Ref, _, _, {{no_catch, not_supported}, _}} ->
- throw(not_supported);
- {'DOWN', Ref, _, _, Resp} ->
- Resp
- after ?ATTACHMENT_WRITE_TIMEOUT ->
- erlang:error(attachment_write_timeout)
- end,
- [Att | prep_atts(Db, Rest)].
-
-
-write_att(Stream, FileName, OrigData, <<>>) ->
- {StreamEngine, Len, Len, Md5, Md5} = couch_stream:close(Stream),
- couch_util:check_md5(Md5, couch_hash:md5_hash(OrigData)),
- Len = size(OrigData),
- couch_att:new([
- {name, FileName},
- {type, <<"application/octet-stream">>},
- {data, {stream, StreamEngine}},
- {att_len, Len},
- {disk_len, Len},
- {md5, Md5},
- {encoding, identity}
- ]);
-
-write_att(Stream, FileName, OrigData, Data) ->
- {Chunk, Rest} = case size(Data) > 4096 of
- true ->
- <<Head:4096/binary, Tail/binary>> = Data,
- {Head, Tail};
- false ->
- {Data, <<>>}
- end,
- ok = couch_stream:write(Stream, Chunk),
- write_att(Stream, FileName, OrigData, Rest).
-
-
-prev_rev(#full_doc_info{} = FDI) ->
- #doc_info{
- revs = [#rev_info{} = PrevRev | _]
- } = couch_doc:to_doc_info(FDI),
- PrevRev.
-
-
-db_as_term(Db) ->
- db_as_term(Db, compact).
-
-db_as_term(DbName, Type) when is_binary(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- db_as_term(Db, Type)
- end);
-
-db_as_term(Db, Type) ->
- [
- {props, db_props_as_term(Db, Type)},
- {docs, db_docs_as_term(Db)},
- {local_docs, db_local_docs_as_term(Db, Type)},
- {changes, db_changes_as_term(Db)},
- {purged_docs, db_purged_docs_as_term(Db)}
- ].
-
-
-db_props_as_term(Db, Type) ->
- Props0 = [
- get_doc_count,
- get_del_doc_count,
- get_disk_version,
- get_update_seq,
- get_purge_seq,
- get_purge_infos_limit,
- get_security,
- get_revs_limit,
- get_uuid,
- get_epochs
- ],
- Props = if Type /= replication -> Props0; true ->
- Props0 -- [get_uuid]
- end,
- lists:map(fun(Fun) ->
- {Fun, couch_db_engine:Fun(Db)}
- end, Props).
-
-
-db_docs_as_term(Db) ->
- FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
- {ok, FDIs} = couch_db:fold_docs(Db, FoldFun, [], []),
- lists:reverse(lists:map(fun(FDI) ->
- fdi_to_term(Db, FDI)
- end, FDIs)).
-
-
-db_local_docs_as_term(Db, Type) ->
- FoldFun = fun(Doc, Acc) ->
- case Doc#doc.id of
- <<?LOCAL_DOC_PREFIX, "purge-mem3", _/binary>>
- when Type == replication ->
- {ok, Acc};
- _ ->
- {ok, [Doc | Acc]}
- end
- end,
- {ok, LDocs} = couch_db:fold_local_docs(Db, FoldFun, [], []),
- lists:reverse(LDocs).
-
-
-db_changes_as_term(Db) ->
- FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
- {ok, Changes} = couch_db:fold_changes(Db, 0, FoldFun, [], []),
- lists:reverse(lists:map(fun(FDI) ->
- fdi_to_term(Db, FDI)
- end, Changes)).
-
-
-db_purged_docs_as_term(Db) ->
- InitPSeq = couch_db_engine:get_oldest_purge_seq(Db) - 1,
- FoldFun = fun({PSeq, UUID, Id, Revs}, Acc) ->
- {ok, [{PSeq, UUID, Id, Revs} | Acc]}
- end,
- {ok, PDocs} = couch_db_engine:fold_purge_infos(
- Db, InitPSeq, FoldFun, [], []),
- lists:reverse(PDocs).
-
-
-fdi_to_term(Db, FDI) ->
- #full_doc_info{
- id = DocId,
- rev_tree = OldTree
- } = FDI,
- {NewRevTree, _} = couch_key_tree:mapfold(fun(Rev, Node, Type, Acc) ->
- tree_to_term(Rev, Node, Type, Acc, DocId)
- end, Db, OldTree),
- FDI#full_doc_info{
- rev_tree = NewRevTree,
- % Blank out sizes because we allow storage
- % engines to handle this with their own
- % definition until further notice.
- sizes = #size_info{
- active = -1,
- external = -1
- }
- }.
-
-
-tree_to_term(_Rev, _Leaf, branch, Acc, _DocId) ->
- {?REV_MISSING, Acc};
-
-tree_to_term({Pos, RevId}, #leaf{} = Leaf, leaf, Db, DocId) ->
- #leaf{
- deleted = Deleted,
- ptr = Ptr
- } = Leaf,
-
- Doc0 = #doc{
- id = DocId,
- revs = {Pos, [RevId]},
- deleted = Deleted,
- body = Ptr
- },
-
- Doc1 = couch_db_engine:read_doc_body(Db, Doc0),
-
- Body = if not is_binary(Doc1#doc.body) -> Doc1#doc.body; true ->
- couch_compress:decompress(Doc1#doc.body)
- end,
-
- Atts1 = if not is_binary(Doc1#doc.atts) -> Doc1#doc.atts; true ->
- couch_compress:decompress(Doc1#doc.atts)
- end,
-
- StreamSrc = fun(Sp) -> couch_db:open_read_stream(Db, Sp) end,
- Atts2 = [couch_att:from_disk_term(StreamSrc, Att) || Att <- Atts1],
- Atts = [att_to_term(Att) || Att <- Atts2],
-
- NewLeaf = Leaf#leaf{
- ptr = Body,
- sizes = #size_info{active = -1, external = -1},
- atts = Atts
- },
- {NewLeaf, Db}.
-
-
-att_to_term(Att) ->
- Bin = couch_att:to_binary(Att),
- couch_att:store(data, Bin, Att).
-
-
-term_diff(T1, T2) when is_tuple(T1), is_tuple(T2) ->
- tuple_diff(tuple_to_list(T1), tuple_to_list(T2));
-
-term_diff(L1, L2) when is_list(L1), is_list(L2) ->
- list_diff(L1, L2);
-
-term_diff(V1, V2) when V1 == V2 ->
- nodiff;
-
-term_diff(V1, V2) ->
- {V1, V2}.
-
-
-tuple_diff([], []) ->
- nodiff;
-
-tuple_diff([T1 | _], []) ->
- {longer, T1};
-
-tuple_diff([], [T2 | _]) ->
- {shorter, T2};
-
-tuple_diff([T1 | R1], [T2 | R2]) ->
- case term_diff(T1, T2) of
- nodiff ->
- tuple_diff(R1, R2);
- Else ->
- {T1, Else}
- end.
-
-
-list_diff([], []) ->
- nodiff;
-
-list_diff([T1 | _], []) ->
- {longer, T1};
-
-list_diff([], [T2 | _]) ->
- {shorter, T2};
-
-list_diff([T1 | R1], [T2 | R2]) ->
- case term_diff(T1, T2) of
- nodiff ->
- list_diff(R1, R2);
- Else ->
- {T1, Else}
- end.
-
-
-compact(Db) ->
- {ok, Pid} = couch_db:start_compact(Db),
- Ref = erlang:monitor(process, Pid),
-
- % Ideally I'd assert that Pid is linked to us
- % at this point but its technically possible
- % that it could have finished compacting by
- % the time we check... Quite the quandry.
-
- receive
- {'DOWN', Ref, _, _, normal} ->
- ok;
- {'DOWN', Ref, _, _, noproc} ->
- ok;
- {'DOWN', Ref, _, _, Reason} ->
- erlang:error({compactor_died, Reason})
- after ?COMPACTOR_TIMEOUT ->
- erlang:error(compactor_timed_out)
- end,
-
- test_util:wait(fun() ->
- {ok, Db2} = couch_db:open_int(couch_db:name(Db), []),
- try
- CPid = couch_db:get_compactor_pid(Db2),
- case is_pid(CPid) of
- true -> wait;
- false -> ok
- end
- after
- couch_db:close(Db2)
- end
- end).
-
-
-with_config(Config, Fun) ->
- OldConfig = apply_config(Config),
- try
- Fun()
- after
- apply_config(OldConfig)
- end.
-
-
-apply_config([]) ->
- [];
-
-apply_config([{Section, Key, Value} | Rest]) ->
- Orig = config:get(Section, Key),
- case Value of
- undefined -> config:delete(Section, Key, false);
- _ -> config:set(Section, Key, Value, false)
- end,
- [{Section, Key, Orig} | apply_config(Rest)].
diff --git a/src/couch_replicator/README.md b/src/couch_replicator/README.md
index 4eced760f..5fe55ac82 100644
--- a/src/couch_replicator/README.md
+++ b/src/couch_replicator/README.md
@@ -7,9 +7,9 @@ everything is connected together. A higher level overview is available in the
[RFC](https://github.com/apache/couchdb-documentation/pull/581). This
documention assumes the audience is familiar with that description as well as
with the [Couch Jobs
-RFC](https://github.com/apache/couchdb-documentation/blob/master/rfcs/007-background-jobs.md)
+RFC](https://github.com/apache/couchdb-documentation/blob/main/rfcs/007-background-jobs.md)
as well as with the [Node Types
-RFC](https://github.com/apache/couchdb-documentation/blob/master/rfcs/013-node-types.md).
+RFC](https://github.com/apache/couchdb-documentation/blob/main/rfcs/013-node-types.md).
A natural place to start is the top application supervisor:
`couch_replicator_sup`. The set of children in the supervisor is split into
diff --git a/src/couch_replicator/src/couch_replicator.erl b/src/couch_replicator/src/couch_replicator.erl
index a690d37c3..a0c6d2b8a 100644
--- a/src/couch_replicator/src/couch_replicator.erl
+++ b/src/couch_replicator/src/couch_replicator.erl
@@ -38,6 +38,7 @@
-include_lib("ibrowse/include/ibrowse.hrl").
-include_lib("couch/include/couch_db.hrl").
-include("couch_replicator.hrl").
+-include_lib("kernel/include/logger.hrl").
-spec replicate({[_]}, any()) ->
@@ -287,6 +288,7 @@ cancel_replication(JobId) when is_binary(JobId) ->
_ ->
JobId
end,
+ ?LOG_NOTICE(#{what => cancel_replication, in => replicator, id => Id}),
couch_log:notice("Canceling replication '~s'", [Id]),
case couch_replicator_jobs:remove_job(JTx, JobId) of
{error, not_found} ->
@@ -327,6 +329,13 @@ process_change(#{} = Db, #doc{deleted = false} = Doc) ->
?ST_PENDING, null, DocState)
end,
+ ?LOG_NOTICE(#{
+ what => replication_update,
+ db => DbName,
+ docid => DocId,
+ job_id => JobId,
+ job_state => DocState
+ }),
LogMsg = "~p : replication doc update db:~s doc:~s job_id:~s doc_state:~s",
couch_log:notice(LogMsg, [?MODULE, DbName, DocId, JobId, DocState]),
diff --git a/src/couch_replicator/src/couch_replicator_api_wrap.erl b/src/couch_replicator/src/couch_replicator_api_wrap.erl
index 1df8ee0c7..fd592a87f 100644
--- a/src/couch_replicator/src/couch_replicator_api_wrap.erl
+++ b/src/couch_replicator/src/couch_replicator_api_wrap.erl
@@ -19,8 +19,9 @@
% Many options and apis aren't yet supported here, they are added as needed.
-include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
-include("couch_replicator_api_wrap.hrl").
+-include_lib("kernel/include/logger.hrl").
-export([
db_open/1,
@@ -178,6 +179,12 @@ open_doc_revs(#httpdb{retries = 0} = HttpDb, Id, Revs, Options, _Fun, _Acc) ->
Url = couch_util:url_strip_password(
couch_replicator_httpc:full_url(HttpDb, [{path,Path}, {qs,QS}])
),
+ ?LOG_ERROR(#{
+ what => permanent_request_failure,
+ in => replicator,
+ url => Url,
+ retries_remaining => 0
+ }),
couch_log:error("Replication crashing because GET ~s failed", [Url]),
exit(kaboom);
open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
@@ -244,6 +251,13 @@ open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
true ->
throw(request_uri_too_long);
false ->
+ ?LOG_INFO(#{
+ what => request_uri_too_long,
+ in => replicator,
+ docid => Id,
+ new_max_length => NewMaxLen,
+ details => "reducing url length because of 414 response"
+ }),
couch_log:info("Reducing url length to ~B because of"
" 414 response", [NewMaxLen]),
Options1 = lists:keystore(max_url_len, 1, Options,
@@ -256,6 +270,13 @@ open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
),
#httpdb{retries = Retries, wait = Wait0} = HttpDb,
Wait = 2 * erlang:min(Wait0 * 2, ?MAX_WAIT),
+ ?LOG_NOTICE(#{
+ what => retry_request,
+ in => replicator,
+ url => Url,
+ delay_sec => Wait / 1000,
+ details => error_reason(Else)
+ }),
couch_log:notice("Retrying GET to ~s in ~p seconds due to error ~w",
[Url, Wait / 1000, error_reason(Else)]
),
diff --git a/src/couch_replicator/src/couch_replicator_auth_session.erl b/src/couch_replicator/src/couch_replicator_auth_session.erl
index a59c770b4..6ca30c806 100644
--- a/src/couch_replicator/src/couch_replicator_auth_session.erl
+++ b/src/couch_replicator/src/couch_replicator_auth_session.erl
@@ -74,6 +74,7 @@
-include_lib("ibrowse/include/ibrowse.hrl").
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
+-include_lib("kernel/include/logger.hrl").
-type headers() :: [{string(), string()}].
@@ -156,6 +157,11 @@ handle_call({update_headers, Headers, _Epoch}, _From, State) ->
Headers1 = [{"Cookie", Cookie} | Headers],
{reply, {Headers1, State1#state.epoch}, State1};
{error, Error} ->
+ ?LOG_ERROR(#{
+ what => terminate_session_auth_plugin,
+ in => replicator,
+ details => Error
+ }),
LogMsg = "~p: Stopping session auth plugin because of error ~p",
couch_log:error(LogMsg, [?MODULE, Error]),
{stop, Error, State}
@@ -170,11 +176,13 @@ handle_call(stop, _From, State) ->
handle_cast(Msg, State) ->
+ ?LOG_ERROR(#{what => unexpected_cast, in => replicator, msg => Msg}),
couch_log:error("~p: Received un-expected cast ~p", [?MODULE, Msg]),
{noreply, State}.
handle_info(Msg, State) ->
+ ?LOG_ERROR(#{what => unexpected_message, in => replicator, msg => Msg}),
couch_log:error("~p : Received un-expected message ~p", [?MODULE, Msg]),
{noreply, State}.
@@ -288,7 +296,9 @@ extract_creds_from_url(Url) ->
Prefix = lists:concat([Proto, "://", User, ":", Pass, "@"]),
Suffix = lists:sublist(Url, length(Prefix) + 1, length(Url) + 1),
NoCreds = lists:concat([Proto, "://", Suffix]),
- {ok, User, Pass, NoCreds}
+ User1 = chttpd:unquote(User),
+ Pass1 = chttpd:unquote(Pass),
+ {ok, User1, Pass1, NoCreds}
end.
@@ -307,6 +317,11 @@ process_response(Code, Headers, _Epoch, State) when Code >= 200, Code < 300 ->
{error, cookie_not_found} ->
State;
{error, Other} ->
+ ?LOG_ERROR(#{
+ what => cookie_parse_error,
+ in => replicator,
+ details => Other
+ }),
LogMsg = "~p : Could not parse cookie from response headers ~p",
couch_log:error(LogMsg, [?MODULE, Other]),
State
@@ -576,14 +591,38 @@ extract_creds_success_test_() ->
{"u", "p", #httpdb{url = "http://x.y/db"}}
},
{
+ #httpdb{url = "http://u%40:p%40@x.y/db"},
+ {"u@", "p@", #httpdb{url = "http://x.y/db"}}
+ },
+ {
+ #httpdb{url = "http://u%40u:p%40p@x.y/db"},
+ {"u@u", "p@p", #httpdb{url = "http://x.y/db"}}
+ },
+ {
+ #httpdb{url = "http://u%40%401:p%40%401@x.y/db"},
+ {"u@@1", "p@@1", #httpdb{url = "http://x.y/db"}}
+ },
+ {
+ #httpdb{url = "http://u%40%2540:p%40%2540@x.y/db"},
+ {"u@%40", "p@%40", #httpdb{url = "http://x.y/db"}}
+ },
+ {
#httpdb{url = "http://u:p@h:80/db"},
{"u", "p", #httpdb{url = "http://h:80/db"}}
},
{
+ #httpdb{url = "http://u%3A:p%3A@h:80/db"},
+ {"u:", "p:", #httpdb{url = "http://h:80/db"}}
+ },
+ {
#httpdb{url = "https://u:p@h/db"},
{"u", "p", #httpdb{url = "https://h/db"}}
},
{
+ #httpdb{url = "https://u%2F:p%2F@h/db"},
+ {"u/", "p/", #httpdb{url = "https://h/db"}}
+ },
+ {
#httpdb{url = "http://u:p@127.0.0.1:5984/db"},
{"u", "p", #httpdb{url = "http://127.0.0.1:5984/db"}}
},
@@ -596,10 +635,18 @@ extract_creds_success_test_() ->
{"u", "p", #httpdb{url = "http://[2001:db8:a1b:12f9::1]:81/db"}}
},
{
+ #httpdb{url = "http://u:p%3A%2F%5B%5D%40@[2001:db8:a1b:12f9::1]:81/db"},
+ {"u", "p:/[]@", #httpdb{url = "http://[2001:db8:a1b:12f9::1]:81/db"}}
+ },
+ {
#httpdb{url = "http://u:p@x.y/db/other?query=Z&query=w"},
{"u", "p", #httpdb{url = "http://x.y/db/other?query=Z&query=w"}}
},
{
+ #httpdb{url = "http://u:p%3F@x.y/db/other?query=Z&query=w"},
+ {"u", "p?", #httpdb{url = "http://x.y/db/other?query=Z&query=w"}}
+ },
+ {
#httpdb{
url = "http://h/db",
headers = DefaultHeaders ++ [
@@ -612,6 +659,24 @@ extract_creds_success_test_() ->
#httpdb{
url = "http://h/db",
headers = DefaultHeaders ++ [
+ {"Authorization", "Basic " ++ b64creds("u", "p@")}
+ ]
+ },
+ {"u", "p@", #httpdb{url = "http://h/db"}}
+ },
+ {
+ #httpdb{
+ url = "http://h/db",
+ headers = DefaultHeaders ++ [
+ {"Authorization", "Basic " ++ b64creds("u", "p@%40")}
+ ]
+ },
+ {"u", "p@%40", #httpdb{url = "http://h/db"}}
+ },
+ {
+ #httpdb{
+ url = "http://h/db",
+ headers = DefaultHeaders ++ [
{"aUthoriZation", "bASIC " ++ b64creds("U", "p")}
]
},
diff --git a/src/couch_replicator/src/couch_replicator_changes_reader.erl b/src/couch_replicator/src/couch_replicator_changes_reader.erl
index 6adf1af5e..97c728971 100644
--- a/src/couch_replicator/src/couch_replicator_changes_reader.erl
+++ b/src/couch_replicator/src/couch_replicator_changes_reader.erl
@@ -21,6 +21,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-include("couch_replicator.hrl").
+-include_lib("kernel/include/logger.hrl").
start_link(StartSeq, #httpdb{} = Db, ChangesQueue, #{} = Options) ->
@@ -60,14 +61,23 @@ read_changes(Parent, StartSeq, Db, ChangesQueue, Options) ->
N when N > 0 ->
put(retries_left, N - 1),
LastSeq = get(last_seq),
+ LogMsg = #{
+ what => retry_changes_feed,
+ in => replicator,
+ source => couch_replicator_api_wrap:db_uri(Db),
+ sequence => LastSeq,
+ retries_remaining => N
+ },
Db2 = case LastSeq of
StartSeq ->
+ ?LOG_NOTICE(LogMsg#{delay_sec => Db#httpdb.wait / 1000}),
couch_log:notice("Retrying _changes request to source database ~s"
" with since=~p in ~p seconds",
[couch_replicator_api_wrap:db_uri(Db), LastSeq, Db#httpdb.wait / 1000]),
ok = timer:sleep(Db#httpdb.wait),
Db#httpdb{wait = 2 * Db#httpdb.wait};
_ ->
+ ?LOG_NOTICE(LogMsg),
couch_log:notice("Retrying _changes request to source database ~s"
" with since=~p", [couch_replicator_api_wrap:db_uri(Db), LastSeq]),
Db
@@ -82,6 +92,12 @@ read_changes(Parent, StartSeq, Db, ChangesQueue, Options) ->
process_change(#doc_info{id = <<>>} = DocInfo, {_, Db, _, _}) ->
% Previous CouchDB releases had a bug which allowed a doc with an empty ID
% to be inserted into databases. Such doc is impossible to GET.
+ ?LOG_ERROR(#{
+ what => ignore_empty_docid,
+ in => replicator,
+ source => couch_replicator_api_wrap:db_uri(Db),
+ sequence => DocInfo#doc_info.high_seq
+ }),
couch_log:error("Replicator: ignoring document with empty ID in "
"source database `~s` (_changes sequence ~p)",
[couch_replicator_api_wrap:db_uri(Db), DocInfo#doc_info.high_seq]);
@@ -90,6 +106,13 @@ process_change(#doc_info{id = Id} = DocInfo, {Parent, Db, ChangesQueue, _}) ->
case is_doc_id_too_long(byte_size(Id)) of
true ->
SourceDb = couch_replicator_api_wrap:db_uri(Db),
+ ?LOG_ERROR(#{
+ what => doc_write_failure,
+ in => replicator,
+ source => SourceDb,
+ docid => Id,
+ details => "document ID too long"
+ }),
couch_log:error("Replicator: document id `~s...` from source db "
" `~64s` is too long, ignoring.", [Id, SourceDb]),
Stats = couch_replicator_stats:new([{doc_write_failures, 1}]),
diff --git a/src/couch_replicator/src/couch_replicator_connection.erl b/src/couch_replicator/src/couch_replicator_connection.erl
index f31baf41d..ace93d545 100644
--- a/src/couch_replicator/src/couch_replicator_connection.erl
+++ b/src/couch_replicator/src/couch_replicator_connection.erl
@@ -40,6 +40,7 @@
]).
-include_lib("ibrowse/include/ibrowse.hrl").
+-include_lib("kernel/include/logger.hrl").
-define(DEFAULT_CLOSE_INTERVAL, 90000).
-define(RELISTEN_DELAY, 5000).
@@ -232,6 +233,13 @@ maybe_log_worker_death(_Host, _Port, normal) ->
ok;
maybe_log_worker_death(Host, Port, Reason) ->
+ ?LOG_INFO(#{
+ what => connection_failed,
+ in => replicator,
+ host => Host,
+ port => Port,
+ details => Reason
+ }),
ErrMsg = "Replication connection to: ~p:~p died with reason ~p",
couch_log:info(ErrMsg, [Host, Port, Reason]).
diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl
index f84d1299a..d70ad1cc0 100644
--- a/src/couch_replicator/src/couch_replicator_docs.erl
+++ b/src/couch_replicator/src/couch_replicator_docs.erl
@@ -24,6 +24,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-include("couch_replicator.hrl").
+-include_lib("kernel/include/logger.hrl").
-define(OWNER, <<"owner">>).
@@ -67,6 +68,13 @@ update_failed(null, null, null, _) ->
update_failed(DbName, DbUUID, DocId, Error) ->
Reason = error_reason(Error),
+ ?LOG_ERROR(#{
+ what => replication_failed,
+ in => replicator,
+ replicator_db => DbName,
+ replicator_doc => DocId,
+ details => Reason
+ }),
couch_log:error("Error processing replication doc `~s` from `~s`: ~s",
[DocId, DbName, Reason]),
update_rep_doc(DbName, DbUUID, DocId, [
@@ -154,9 +162,17 @@ update_rep_doc(RepDbName, RepDbUUID, RepDocId, KVs, Wait)
end
catch
throw:conflict ->
+ Delay = couch_rand:uniform(erlang:min(128, Wait)) * 100,
+ ?LOG_ERROR(#{
+ what => replication_doc_conflict,
+ in => replicator,
+ replication_db => RepDbName,
+ replication_doc => RepDocId,
+ retry_delay_sec => Delay
+ }),
Msg = "Conflict when updating replication doc `~s`. Retrying.",
couch_log:error(Msg, [RepDocId]),
- ok = timer:sleep(couch_rand:uniform(erlang:min(128, Wait)) * 100),
+ ok = timer:sleep(Delay),
update_rep_doc(RepDbName, RepDbUUID, RepDocId, KVs, Wait * 2)
end;
@@ -213,6 +229,13 @@ save_rep_doc(DbName, DbUUID, Doc) when is_binary(DbName), is_binary(DbUUID) ->
% updating replication documents. Avoid crashing replicator and thus
% preventing all other replication jobs on the node from running.
throw:{forbidden, Reason} ->
+ ?LOG_ERROR(#{
+ what => replication_doc_update_forbidden,
+ in => replicator,
+ replication_db => DbName,
+ replication_doc => Doc#doc.id,
+ details => Reason
+ }),
Msg = "~p VDU or BDU function preventing doc update to ~s ~s ~p",
couch_log:error(Msg, [?MODULE, DbName, Doc#doc.id, Reason]),
{ok, forbidden}
diff --git a/src/couch_replicator/src/couch_replicator_httpc.erl b/src/couch_replicator/src/couch_replicator_httpc.erl
index f11d1895d..28b0f3811 100644
--- a/src/couch_replicator/src/couch_replicator_httpc.erl
+++ b/src/couch_replicator/src/couch_replicator_httpc.erl
@@ -15,6 +15,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("ibrowse/include/ibrowse.hrl").
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
+-include_lib("kernel/include/logger.hrl").
-export([setup/1]).
-export([send_req/3]).
@@ -55,6 +56,12 @@ setup(Db) ->
{error, Error} ->
LogMsg = "~p: auth plugin initialization failed ~p ~p",
LogUrl = couch_util:url_strip_password(Url),
+ ?LOG_ERROR(#{
+ what => auth_plugin_initialization_failure,
+ in => replicator,
+ db => LogUrl,
+ details => Error
+ }),
couch_log:error(LogMsg, [?MODULE, LogUrl, Error]),
throw({replication_auth_error, Error})
end.
@@ -97,8 +104,8 @@ send_req(HttpDb, Params1, Callback) ->
send_ibrowse_req(#httpdb{headers = BaseHeaders} = HttpDb0, Params) ->
Method = get_value(method, Params, get),
- UserHeaders = lists:keysort(1, get_value(headers, Params, [])),
- Headers1 = lists:ukeymerge(1, UserHeaders, BaseHeaders),
+ UserHeaders = get_value(headers, Params, []),
+ Headers1 = merge_headers(BaseHeaders, UserHeaders),
{Headers2, HttpDb} = couch_replicator_auth:update_headers(HttpDb0, Headers1),
Url = full_url(HttpDb, Params),
Body = get_value(body, Params, []),
@@ -155,9 +162,15 @@ process_response({ibrowse_req_id, ReqId}, Worker, HttpDb, Params, Callback) ->
process_response({ok, Code, Headers, Body}, Worker, HttpDb, Params, Callback) ->
case list_to_integer(Code) of
+ R when R =:= 301 ; R =:= 302 ; R =:= 303 ->
+ backoff_success(HttpDb, Params),
+ do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
429 ->
backoff(HttpDb, Params);
- Ok when (Ok >= 200 andalso Ok < 300) ; (Ok >= 400 andalso Ok < 500) ->
+ Error when Error =:= 408 ; Error >= 500 ->
+ couch_stats:increment_counter([couch_replicator, responses, failure]),
+ maybe_retry({code, Error}, Worker, HttpDb, Params);
+ Ok when Ok >= 200 , Ok < 500 ->
backoff_success(HttpDb, Params),
couch_stats:increment_counter([couch_replicator, responses, success]),
EJson = case Body of
@@ -168,13 +181,7 @@ process_response({ok, Code, Headers, Body}, Worker, HttpDb, Params, Callback) ->
end,
process_auth_response(HttpDb, Ok, Headers, Params),
if Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop); true -> ok end,
- Callback(Ok, Headers, EJson);
- R when R =:= 301 ; R =:= 302 ; R =:= 303 ->
- backoff_success(HttpDb, Params),
- do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
- Error ->
- couch_stats:increment_counter([couch_replicator, responses, failure]),
- maybe_retry({code, Error}, Worker, HttpDb, Params)
+ Callback(Ok, Headers, EJson)
end;
process_response(Error, Worker, HttpDb, Params, _Callback) ->
@@ -185,10 +192,18 @@ process_stream_response(ReqId, Worker, HttpDb, Params, Callback) ->
receive
{ibrowse_async_headers, ReqId, Code, Headers} ->
case list_to_integer(Code) of
+ R when R =:= 301 ; R =:= 302 ; R =:= 303 ->
+ backoff_success(HttpDb, Params),
+ do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
429 ->
Timeout = couch_replicator_rate_limiter:max_interval(),
backoff(HttpDb#httpdb{timeout = Timeout}, Params);
- Ok when (Ok >= 200 andalso Ok < 300) ; (Ok >= 400 andalso Ok < 500) ->
+ Error when Error =:= 408 ; Error >= 500 ->
+ couch_stats:increment_counter(
+ [couch_replicator, stream_responses, failure]
+ ),
+ report_error(Worker, HttpDb, Params, {code, Error});
+ Ok when Ok >= 200 , Ok < 500 ->
backoff_success(HttpDb, Params),
HttpDb1 = process_auth_response(HttpDb, Ok, Headers, Params),
StreamDataFun = fun() ->
@@ -206,15 +221,7 @@ process_stream_response(ReqId, Worker, HttpDb, Params, Callback) ->
Worker, HttpDb1, Params);
throw:{maybe_retry_req, Err} ->
maybe_retry(Err, Worker, HttpDb1, Params)
- end;
- R when R =:= 301 ; R =:= 302 ; R =:= 303 ->
- backoff_success(HttpDb, Params),
- do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
- Error ->
- couch_stats:increment_counter(
- [couch_replicator, stream_responses, failure]
- ),
- report_error(Worker, HttpDb, Params, {code, Error})
+ end
end;
{ibrowse_async_response, ReqId, {error, _} = Error} ->
couch_stats:increment_counter(
@@ -348,6 +355,14 @@ update_first_error_timestamp(HttpDb) ->
log_retry_error(Params, HttpDb, Wait, Error) ->
Method = string:to_upper(atom_to_list(get_value(method, Params, get))),
Url = couch_util:url_strip_password(full_url(HttpDb, Params)),
+ ?LOG_NOTICE(#{
+ what => retry_request,
+ in => replicator,
+ method => Method,
+ url => Url,
+ retry_delay_sec => Wait / 1000,
+ details => error_cause(Error)
+ }),
couch_log:notice("Retrying ~s request to ~s in ~p seconds due to error ~s",
[Method, Url, Wait / 1000, error_cause(Error)]).
@@ -360,10 +375,24 @@ report_error(_Worker, HttpDb, Params, Error) ->
do_report_error(Url, Method, {code, Code}) ->
+ ?LOG_ERROR(#{
+ what => failed_request,
+ in => replicator,
+ method => Method,
+ url => Url,
+ status_code => Code
+ }),
couch_log:error("Replicator, request ~s to ~p failed. The received "
"HTTP error code is ~p", [Method, Url, Code]);
do_report_error(FullUrl, Method, Error) ->
+ ?LOG_ERROR(#{
+ what => failed_request,
+ in => replicator,
+ method => Method,
+ url => FullUrl,
+ details => error_cause(Error)
+ }),
couch_log:error("Replicator, request ~s to ~p failed due to error ~s",
[Method, FullUrl, error_cause(Error)]).
@@ -493,3 +522,27 @@ backoff_before_request(Worker, HttpDb, Params) ->
Sleep when Sleep == 0 ->
ok
end.
+
+
+merge_headers(Headers1, Headers2) when is_list(Headers1), is_list(Headers2) ->
+ Empty = mochiweb_headers:empty(),
+ Merged = mochiweb_headers:enter_from_list(Headers1 ++ Headers2, Empty),
+ mochiweb_headers:to_list(Merged).
+
+
+-ifdef(TEST).
+
+-include_lib("couch/include/couch_eunit.hrl").
+
+
+merge_headers_test() ->
+ ?assertEqual([], merge_headers([], [])),
+ ?assertEqual([{"a", "x"}], merge_headers([], [{"a", "x"}])),
+ ?assertEqual([{"a", "x"}], merge_headers([{"a", "x"}], [])),
+ ?assertEqual([{"a", "y"}], merge_headers([{"A", "x"}], [{"a", "y"}])),
+ ?assertEqual([{"a", "y"}, {"B", "x"}], merge_headers([{"B", "x"}],
+ [{"a", "y"}])),
+ ?assertEqual([{"a", "y"}], merge_headers([{"A", "z"}, {"a", "y"}], [])),
+ ?assertEqual([{"a", "y"}], merge_headers([], [{"A", "z"}, {"a", "y"}])).
+
+-endif.
diff --git a/src/couch_replicator/src/couch_replicator_httpd.erl b/src/couch_replicator/src/couch_replicator_httpd.erl
index 196fcf203..0934ffe66 100644
--- a/src/couch_replicator/src/couch_replicator_httpd.erl
+++ b/src/couch_replicator/src/couch_replicator_httpd.erl
@@ -17,7 +17,7 @@
handle_scheduler_req/1
]).
--import(couch_httpd, [
+-import(chttpd, [
send_json/2,
send_json/3,
send_method_not_allowed/2
diff --git a/src/couch_replicator/src/couch_replicator_ids.erl b/src/couch_replicator/src/couch_replicator_ids.erl
index d1cbe571c..44b9e47e6 100644
--- a/src/couch_replicator/src/couch_replicator_ids.erl
+++ b/src/couch_replicator/src/couch_replicator_ids.erl
@@ -58,7 +58,7 @@ base_id(#{?SOURCE := Src0, ?TARGET := Tgt0} = Rep, 3) ->
base_id(#{?SOURCE := Src0, ?TARGET := Tgt0} = Rep, 2) ->
{ok, HostName} = inet:gethostname(),
- Port = case (catch mochiweb_socket_server:get(couch_httpd, port)) of
+ Port = case (catch mochiweb_socket_server:get(chttpd, port)) of
P when is_number(P) ->
P;
_ ->
diff --git a/src/couch_replicator/src/couch_replicator_job.erl b/src/couch_replicator/src/couch_replicator_job.erl
index c8c143a58..951471a14 100644
--- a/src/couch_replicator/src/couch_replicator_job.erl
+++ b/src/couch_replicator/src/couch_replicator_job.erl
@@ -39,6 +39,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-include("couch_replicator.hrl").
+-include_lib("kernel/include/logger.hrl").
-define(LOWEST_SEQ, 0).
@@ -116,6 +117,12 @@ terminate(shutdown, #rep_state{} = State0) ->
{ok, State2} ->
State2;
Error ->
+ ?LOG_ERROR(#{
+ what => checkpoint_failure,
+ in => replicator,
+ jobid => State1#rep_state.id,
+ details => Error
+ }),
Msg = "~p : Failed last checkpoint. Job: ~p Error: ~p",
couch_log:error(Msg, [?MODULE, State1#rep_state.id, Error]),
State1
@@ -127,9 +134,20 @@ terminate(shutdown, #rep_state{} = State0) ->
terminate({shutdown, Error}, {init_error, Stack}) ->
% Termination in init, before the job had initialized
case Error of
- max_backoff -> couch_log:warning("~p job backed off", [?MODULE]);
- finished -> couch_log:notice("~p job finished in init", [?MODULE]);
- _ -> couch_log:error("~p job failed ~p ~p", [?MODULE, Error, Stack])
+ max_backoff ->
+ ?LOG_WARNING(#{what => job_backoff, in => replicator}),
+ couch_log:warning("~p job backed off", [?MODULE]);
+ finished ->
+ ?LOG_NOTICE(#{what => job_finished_during_init, in => replicator}),
+ couch_log:notice("~p job finished in init", [?MODULE]);
+ _ ->
+ ?LOG_ERROR(#{
+ what => job_failure,
+ in => replicator,
+ details => Error,
+ stacktrace => Stack
+ }),
+ couch_log:error("~p job failed ~p ~p", [?MODULE, Error, Stack])
end,
ok;
@@ -139,6 +157,11 @@ terminate({shutdown, finished}, #rep_state{} = State) ->
terminate({shutdown, halt}, #rep_state{} = State) ->
% Job is re-enqueued and possibly already running somewhere else
+ ?LOG_ERROR(#{
+ what => job_halted,
+ in => replicator,
+ jobid => State#rep_state.id
+ }),
couch_log:error("~p job ~p halted", [?MODULE, State#rep_state.id]),
ok = close_endpoints(State);
@@ -155,6 +178,14 @@ terminate(Reason0, #rep_state{} = State0) ->
source_name = Source,
target_name = Target
} = State,
+ ?LOG_ERROR(#{
+ what => job_failure,
+ in => replicator,
+ replication_id => RepId,
+ source => Source,
+ target => Target,
+ details => Reason
+ }),
couch_log:error("Replication `~s` (`~s` -> `~s`) failed: ~p",
[RepId, Source, Target, Reason]),
ok = reschedule_on_error(undefined, Job, JobData, Reason),
@@ -189,6 +220,21 @@ handle_call({report_seq_done, Seq, StatsInc}, From, #rep_state{} = State) ->
_ ->
NewThroughSeq0
end,
+ ?LOG_DEBUG(#{
+ what => progress_report,
+ in => replicator,
+ old => #{
+ highest_seq_done => HighestDone,
+ current_through_seq => ThroughSeq,
+ seqs_in_progress => SeqsInProgress
+ },
+ new => #{
+ highest_seq_done => NewHighestDone,
+ current_through_seq => NewThroughSeq,
+ seqs_in_progress => NewSeqsInProgress
+ },
+ worker_reported_seq => Seq
+ }),
couch_log:debug("Worker reported seq ~p, through seq was ~p, "
"new through seq is ~p, highest seq done was ~p, "
"new highest seq done is ~p~n"
@@ -221,12 +267,10 @@ handle_info(timeout, delayed_init) ->
{ok, State} -> {noreply, State};
{stop, Reason, State} -> {stop, Reason, State}
catch
- exit:{shutdown, Exit} when Exit =:= finished orelse Exit =:= halt ->
- Stack = erlang:get_stacktrace(),
+ exit:{shutdown, Exit}:Stack when Exit =:= finished orelse Exit =:= halt ->
{stop, {shutdown, Exit}, {init_error, Stack}};
- _Tag:Error ->
+ _Tag:Error:Stack ->
ShutdownReason = {error, replication_start_error(Error)},
- Stack = erlang:get_stacktrace(),
{stop, {shutdown, ShutdownReason}, {init_error, Stack}}
end;
@@ -253,10 +297,12 @@ handle_info(shutdown, St) ->
{stop, shutdown, St};
handle_info({'EXIT', Pid, max_backoff}, State) ->
+ ?LOG_ERROR(#{what => max_backoff, in => replicator, pid => Pid}),
couch_log:error("Max backoff reached child process ~p", [Pid]),
{stop, {shutdown, max_backoff}, State};
handle_info({'EXIT', Pid, {shutdown, max_backoff}}, State) ->
+ ?LOG_ERROR(#{what => max_backoff, in => replicator, pid => Pid}),
couch_log:error("Max backoff reached child process ~p", [Pid]),
{stop, {shutdown, max_backoff}, State};
@@ -275,6 +321,7 @@ handle_info({'EXIT', Pid, Reason0}, #rep_state{changes_reader=Pid} = State) ->
Other ->
{changes_reader_died, Other}
end,
+ ?LOG_ERROR(#{what => changes_reader_crash, in => replicator, details => Reason}),
couch_log:error("ChangesReader process died with reason: ~p", [Reason]),
{stop, {shutdown, Reason}, cancel_timers(State)};
@@ -283,6 +330,7 @@ handle_info({'EXIT', Pid, normal}, #rep_state{changes_manager=Pid} = State) ->
handle_info({'EXIT', Pid, Reason}, #rep_state{changes_manager=Pid} = State) ->
couch_stats:increment_counter([couch_replicator, changes_manager_deaths]),
+ ?LOG_ERROR(#{what => changes_manager_crash, in => replicator, details => Reason}),
couch_log:error("ChangesManager process died with reason: ~p", [Reason]),
{stop, {shutdown, {changes_manager_died, Reason}}, cancel_timers(State)};
@@ -291,6 +339,7 @@ handle_info({'EXIT', Pid, normal}, #rep_state{changes_queue=Pid} = State) ->
handle_info({'EXIT', Pid, Reason}, #rep_state{changes_queue=Pid} = State) ->
couch_stats:increment_counter([couch_replicator, changes_queue_deaths]),
+ ?LOG_ERROR(#{what => changes_queue_crash, in => replicator, details => Reason}),
couch_log:error("ChangesQueue process died with reason: ~p", [Reason]),
{stop, {shutdown, {changes_queue_died, Reason}}, cancel_timers(State)};
@@ -299,6 +348,12 @@ handle_info({'EXIT', Pid, normal}, #rep_state{workers = Workers} = State) ->
Workers ->
%% Processes might be linked by replicator's auth plugins so
%% we tolerate them exiting `normal` here and don't crash
+ ?LOG_WARNING(#{
+ what => linked_process_exit,
+ in => replicator,
+ pid => Pid,
+ reason => normal
+ }),
LogMsg = "~p: unknown pid exited `normal` ~p",
couch_log:error(LogMsg, [?MODULE, Pid]),
{noreply, State#rep_state{workers = Workers}};
@@ -321,6 +376,12 @@ handle_info({'EXIT', Pid, Reason}, #rep_state{workers = Workers} = State) ->
{shutdown, _} = Err ->
Err;
Other ->
+ ?LOG_ERROR(#{
+ what => worker_crash,
+ in => replicator,
+ pid => Pid,
+ details => Reason
+ }),
ErrLog = "Worker ~p died with reason: ~p",
couch_log:error(ErrLog, [Pid, Reason]),
{worker_died, Pid, Other}
@@ -329,6 +390,11 @@ handle_info({'EXIT', Pid, Reason}, #rep_state{workers = Workers} = State) ->
end;
handle_info({Ref, ready}, St) when is_reference(Ref) ->
+ ?LOG_NOTICE(#{
+ what => spurious_future_ready_message,
+ in => replicator,
+ ref => Ref
+ }),
LogMsg = "~p : spurious erlfdb future ready message ~p",
couch_log:notice(LogMsg, [?MODULE, Ref]),
{noreply, St};
@@ -406,16 +472,19 @@ delayed_init() ->
try do_init(Job, JobData) of
State = #rep_state{} -> {ok, State}
catch
- exit:{http_request_failed, _, _, max_backoff} ->
- Stack = erlang:get_stacktrace(),
+ exit:{http_request_failed, _, _, max_backoff}:Stack ->
reschedule_on_error(undefined, Job, JobData, max_backoff),
{stop, {shutdown, max_backoff}, {init_error, Stack}};
- exit:{shutdown, Exit} when Exit =:= finished orelse Exit =:= halt ->
- Stack = erlang:get_stacktrace(),
+ exit:{shutdown, Exit}:Stack when Exit =:= finished orelse Exit =:= halt ->
{stop, {shutdown, Exit}, {init_error, Stack}};
- _Tag:Error ->
+ _Tag:Error:Stack ->
Reason = {error, replication_start_error(Error)},
- Stack = erlang:get_stacktrace(),
+ ?LOG_ERROR(#{
+ what => job_failure_during_init,
+ job => Job,
+ details => Reason,
+ stacktrace => Stack
+ }),
ErrMsg = "~p : job ~p failed during startup ~p stack:~p",
couch_log:error(ErrMsg, [?MODULE, Job, Reason, Stack]),
reschedule_on_error(undefined, Job, JobData, Reason),
@@ -576,12 +645,25 @@ check_ownership(#{jtx := true} = JTx, Job, JobData) ->
fail_job(JTx, Job, JobData, Error),
not_owner;
{ok, #{}} ->
+ ?LOG_WARNING(#{
+ what => duplicate_job_detected,
+ in => replicator,
+ jobid => JobId,
+ other_jobid => OtherJobId,
+ replication_id => RepId
+ }),
LogMsg = "~p : Job ~p usurping job ~p for replication ~p",
couch_log:warning(LogMsg, [?MODULE, JobId, OtherJobId,
RepId]),
couch_replicator_jobs:update_rep_id(JTx, JobId, RepId),
owner;
{error, not_found} ->
+ ?LOG_ERROR(#{
+ what => orphaned_job_mapping,
+ in => replicator,
+ replication_id => RepId,
+ jobid => OtherJobId
+ }),
LogMsg = "~p : Orphan replication job reference ~p -> ~p",
couch_log:error(LogMsg, [?MODULE, RepId, OtherJobId]),
couch_replicator_jobs:update_rep_id(JTx, JobId, RepId),
@@ -866,6 +948,12 @@ state_strip_creds(#rep_state{source = Source, target = Target} = State) ->
adjust_maxconn(Src = #{<<"http_connections">> := 1}, RepId) ->
+ ?LOG_NOTICE(#{
+ what => minimum_source_connections_override,
+ in => replicator,
+ replication_id => RepId,
+ details => "adjusting minimum source connections to 2"
+ }),
Msg = "Adjusting minimum number of HTTP source connections to 2 for ~p",
couch_log:notice(Msg, [RepId]),
Src#{<<"http_connections">> := 2};
@@ -965,7 +1053,21 @@ init_state(#{} = Job, #{} = JobData) ->
[SourceLog, TargetLog] = find_and_migrate_logs([Source, Target], Rep,
BaseId),
- {StartSeq0, History} = compare_replication_logs(SourceLog, TargetLog),
+ {StartSeq0, History, MatchedSessionIds} = compare_replication_logs(SourceLog, TargetLog),
+
+ if not MatchedSessionIds ->
+ ?LOG_NOTICE(#{
+ what => session_history_mismatch,
+ in => replicator,
+ calculated_start_seq => StartSeq0,
+ source => couch_replicator_api_wrap:db_uri(Source),
+ target => couch_replicator_api_wrap:db_uri(Target),
+ replication_id => Id,
+ details => "scanned histories to find common ancestor"
+ });
+ true ->
+ ok
+ end,
#{?REP_STATS := Stats0} = JobData,
Stats1 = couch_replicator_stats:new(Stats0),
@@ -1048,6 +1150,13 @@ maybe_save_migrated_log(#{?OPTIONS := Options}, Db, #doc{} = Doc, OldId) ->
case maps:get(<<"use_checkpoints">>, Options) of
true ->
update_checkpoint(Db, Doc),
+ ?LOG_NOTICE(#{
+ what => migrated_checkpoint,
+ in => replicator,
+ db => httpdb_strip_creds(Db),
+ old_id => OldId,
+ new_id => Doc#doc.id
+ }),
Msg = "Migrated replication checkpoint. Db:~p ~p -> ~p",
couch_log:notice(Msg, [httpdb_strip_creds(Db), OldId, Doc#doc.id]);
false ->
@@ -1116,6 +1225,13 @@ do_checkpoint(State) ->
{checkpoint_commit_failure, <<"Failure on target commit: ",
(couch_util:to_binary(Reason))/binary>>};
{SrcInstanceStartTime, TgtInstanceStartTime} ->
+ ?LOG_NOTICE(#{
+ what => checkpoint,
+ in => replicator,
+ source => SourceName,
+ target => TargetName,
+ sequence => NewSeq
+ }),
couch_log:notice("recording a checkpoint for `~s` -> `~s` at "
"source update_seq ~p", [SourceName, TargetName, NewSeq]),
StartTime = couch_replicator_utils:rfc1123_local(RepStartTime),
@@ -1276,7 +1392,7 @@ compare_replication_logs(SrcDoc, TgtDoc) ->
OldSeqNum = get_value(<<"source_last_seq">>, RepRecProps,
?LOWEST_SEQ),
OldHistory = get_value(<<"history">>, RepRecProps, []),
- {OldSeqNum, OldHistory};
+ {OldSeqNum, OldHistory, true};
false ->
SourceHistory = get_value(<<"history">>, RepRecProps, []),
TargetHistory = get_value(<<"history">>, RepRecPropsTgt, []),
@@ -1284,7 +1400,8 @@ compare_replication_logs(SrcDoc, TgtDoc) ->
"Scanning histories to find a common ancestor.", []),
couch_log:debug("Record on source:~p~nRecord on target:~p~n",
[RepRecProps, RepRecPropsTgt]),
- compare_rep_history(SourceHistory, TargetHistory)
+ {StartSeq, History} = compare_rep_history(SourceHistory, TargetHistory),
+ {StartSeq, History, false}
end.
@@ -1431,6 +1548,17 @@ log_replication_start(#rep_state{} = RepState) ->
_ ->
"from _replicate endpoint"
end,
+ ?LOG_NOTICE(#{
+ what => starting_replication,
+ in => replicator,
+ source => Source,
+ target => Target,
+ replication_db => DbName,
+ replication_doc => DocId,
+ session_id => Sid,
+ worker_processes => Workers,
+ worker_batch_size => BatchSize
+ }),
Msg = "Starting replication ~s (~s -> ~s) ~s worker_procesess:~p"
" worker_batch_size:~p session_id:~s",
couch_log:notice(Msg, [Id, Source, Target, From, Workers, BatchSize, Sid]).
@@ -1447,6 +1575,13 @@ check_user_filter(#rep_state{} = State) ->
{RepId, BaseId} ->
ok;
{NewId, NewBaseId} when is_binary(NewId), is_binary(NewBaseId) ->
+ ?LOG_ERROR(#{
+ what => replication_id_updated,
+ in => replicator,
+ old_id => RepId,
+ new_id => NewId,
+ details => "replication job shutting down"
+ }),
LogMsg = "~p : Replication id was updated ~p -> ~p",
couch_log:error(LogMsg, [?MODULE, RepId, NewId]),
reschedule(undefined, Job, JobData),
diff --git a/src/couch_replicator/src/couch_replicator_job_server.erl b/src/couch_replicator/src/couch_replicator_job_server.erl
index a2e90b061..2452a222d 100644
--- a/src/couch_replicator/src/couch_replicator_job_server.erl
+++ b/src/couch_replicator/src/couch_replicator_job_server.erl
@@ -38,6 +38,7 @@
-include("couch_replicator.hrl").
+-include_lib("kernel/include/logger.hrl").
-define(MAX_ACCEPTORS, 2).
@@ -95,6 +96,11 @@ handle_call({accepted, Pid, Normal}, _From, #{} = St) ->
},
{reply, ok, spawn_acceptors(St1)};
false ->
+ ?LOG_ERROR(#{
+ what => unknown_acceptor,
+ in => replicator,
+ pid => Pid
+ }),
LogMsg = "~p : unknown acceptor processs ~p",
couch_log:error(LogMsg, [?MODULE, Pid]),
{stop, {unknown_acceptor_pid, Pid}, St}
@@ -234,6 +240,11 @@ transient_job_cleanup(#{} = St) ->
case State =:= finished andalso IsTransient andalso IsOld of
true ->
ok = couch_replicator_jobs:remove_job(undefined, JobId),
+ ?LOG_INFO(#{
+ what => removing_old_job,
+ in => replicator,
+ jobid => JobId
+ }),
couch_log:info("~p : Removed old job ~p", [?MODULE, JobId]),
ok;
false ->
@@ -301,6 +312,11 @@ wait_jobs_exit(#{} = Jobs, Timeout) ->
wait_jobs_exit(maps:remove(Pid, Jobs), Timeout)
after
Timeout ->
+ ?LOG_ERROR(#{
+ what => unclean_job_termination,
+ in => replicator,
+ job_count => map_size(Jobs)
+ }),
LogMsg = "~p : ~p jobs didn't terminate cleanly",
couch_log:error(LogMsg, [?MODULE, map_size(Jobs)]),
ok
@@ -329,6 +345,12 @@ spawn_acceptors(St) ->
handle_acceptor_exit(#{acceptors := Acceptors} = St, Pid, Reason) ->
St1 = St#{acceptors := maps:remove(Pid, Acceptors)},
+ ?LOG_ERROR(#{
+ what => acceptor_crash,
+ in => replicator,
+ pid => Pid,
+ details => Reason
+ }),
LogMsg = "~p : acceptor process ~p exited with ~p",
couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
{noreply, spawn_acceptors(St1)}.
@@ -344,6 +366,12 @@ handle_worker_exit(#{workers := Workers} = St, Pid, Reason) ->
{shutdown, _} ->
ok;
_ ->
+ ?LOG_ERROR(#{
+ what => worker_crash,
+ in => replicator,
+ pid => Pid,
+ details => Reason
+ }),
LogMsg = "~p : replicator job process ~p exited with ~p",
couch_log:error(LogMsg, [?MODULE, Pid, Reason])
end,
@@ -351,6 +379,11 @@ handle_worker_exit(#{workers := Workers} = St, Pid, Reason) ->
handle_unknown_exit(St, Pid, Reason) ->
+ ?LOG_ERROR(#{
+ what => unknown_process_crash,
+ in => replicator,
+ pid => Pid
+ }),
LogMsg = "~p : unknown process ~p exited with ~p",
couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
{stop, {unknown_pid_exit, Pid}, St}.
diff --git a/src/couch_replicator/src/couch_replicator_parse.erl b/src/couch_replicator/src/couch_replicator_parse.erl
index 5996ec507..ac25bee41 100644
--- a/src/couch_replicator/src/couch_replicator_parse.erl
+++ b/src/couch_replicator/src/couch_replicator_parse.erl
@@ -23,6 +23,7 @@
-include_lib("ibrowse/include/ibrowse.hrl").
-include("couch_replicator.hrl").
+-include_lib("kernel/include/logger.hrl").
-define(DEFAULT_SOCK_OPTS, "[{keepalive, true}, {nodelay, false}]").
@@ -59,13 +60,24 @@ parse_rep_doc(RepDoc) ->
{ok, Rep} = try
parse_rep(RepDoc, null)
catch
- throw:{error, Reason} ->
- Stack = erlang:get_stacktrace(),
+ throw:{error, Reason}:Stack ->
+ ?LOG_ERROR(#{
+ what => replication_doc_parse_error,
+ in => replicator,
+ details => Reason,
+ stacktrace => Stack
+ }),
LogErr1 = "~p parse_rep_doc fail ~p ~p",
couch_log:error(LogErr1, [?MODULE, Reason, Stack]),
throw({bad_rep_doc, Reason});
- Tag:Err ->
- Stack = erlang:get_stacktrace(),
+ Tag:Err:Stack ->
+ ?LOG_ERROR(#{
+ what => replication_doc_parse_error,
+ in => replicator,
+ tag => Tag,
+ details => Err,
+ stacktrace => Stack
+ }),
LogErr2 = "~p parse_rep_doc fail ~p:~p ~p",
couch_log:error(LogErr2, [?MODULE, Tag, Err, Stack]),
throw({bad_rep_doc, couch_util:to_binary({Tag, Err})})
@@ -83,13 +95,24 @@ parse_transient_rep(#{} = Body, UserName) ->
{ok, Rep} = try
parse_rep(Body, UserName)
catch
- throw:{error, Reason} ->
- Stack = erlang:get_stacktrace(),
+ throw:{error, Reason}:Stack ->
+ ?LOG_ERROR(#{
+ what => transient_replication_parse_error,
+ in => replicator,
+ details => Reason,
+ stacktrace => Stack
+ }),
LogErr1 = "~p parse_transient_rep fail ~p ~p",
couch_log:error(LogErr1, [?MODULE, Reason, Stack]),
throw({bad_request, Reason});
- Tag:Err ->
- Stack = erlang:get_stacktrace(),
+ Tag:Err:Stack ->
+ ?LOG_ERROR(#{
+ what => transient_replication_parse_error,
+ in => replicator,
+ tag => Tag,
+ details => Err,
+ stacktrace => Stack
+ }),
LogErr2 = "~p parse_transient_rep fail ~p ~p",
couch_log:error(LogErr2, [?MODULE, Tag, Err, Stack]),
throw({bad_request, couch_util:to_binary({Tag, Err})})
diff --git a/src/couch_replicator/src/couch_replicator_worker.erl b/src/couch_replicator/src/couch_replicator_worker.erl
index 4cd984c1a..b57727f98 100644
--- a/src/couch_replicator/src/couch_replicator_worker.erl
+++ b/src/couch_replicator/src/couch_replicator_worker.erl
@@ -25,6 +25,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-include("couch_replicator.hrl").
+-include_lib("kernel/include/logger.hrl").
% TODO: maybe make both buffer max sizes configurable
-define(DOC_BUFFER_BYTE_SIZE, 512 * 1024). % for remote targets
@@ -225,6 +226,11 @@ queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager) ->
{ok, Stats} = gen_server:call(Parent, flush, infinity),
ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity),
erlang:put(last_stats_report, os:timestamp()),
+ ?LOG_DEBUG(#{
+ what => worker_progress_report,
+ in => replicator,
+ seq => ReportSeq
+ }),
couch_log:debug("Worker reported completion of seq ~p", [ReportSeq]),
queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager)
end.
@@ -265,6 +271,14 @@ fetch_doc(Source, {Id, Revs, PAs}, DocHandler, Acc) ->
[Id, couch_doc:revs_to_strs(Revs)]),
WaitMSec = config:get_integer("replicator", "missing_doc_retry_msec",
?MISSING_DOC_RETRY_MSEC),
+ ?LOG_ERROR(#{
+ what => missing_document,
+ in => replicator,
+ source => couch_replicator_api_wrap:db_uri(Source),
+ docid => Id,
+ revisions => couch_doc:revs_to_strs(Revs),
+ retry_delay_sec => WaitMSec / 1000
+ }),
timer:sleep(WaitMSec),
couch_replicator_api_wrap:open_doc_revs(Source, Id, Revs, [latest], DocHandler, Acc);
throw:{missing_stub, _} ->
@@ -273,6 +287,14 @@ fetch_doc(Source, {Id, Revs, PAs}, DocHandler, Acc) ->
[Id, couch_doc:revs_to_strs(Revs)]),
WaitMSec = config:get_integer("replicator", "missing_doc_retry_msec",
?MISSING_DOC_RETRY_MSEC),
+ ?LOG_ERROR(#{
+ what => missing_attachment_stub,
+ in => replicator,
+ source => couch_replicator_api_wrap:db_uri(Source),
+ docid => Id,
+ revisions => couch_doc:revs_to_strs(Revs),
+ retry_delay_sec => WaitMSec / 1000
+ }),
timer:sleep(WaitMSec),
couch_replicator_api_wrap:open_doc_revs(Source, Id, Revs, [latest], DocHandler, Acc)
end.
@@ -282,6 +304,7 @@ remote_doc_handler({ok, #doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc},
Acc) ->
% Flush design docs in their own PUT requests to correctly process
% authorization failures for design doc updates.
+ ?LOG_DEBUG(#{what => flush_ddoc, in => replicator}),
couch_log:debug("Worker flushing design doc", []),
doc_handler_flush_doc(Doc, Acc);
remote_doc_handler({ok, #doc{atts = [_ | _]} = Doc}, Acc) ->
@@ -289,6 +312,7 @@ remote_doc_handler({ok, #doc{atts = [_ | _]} = Doc}, Acc) ->
% source. The data property of each attachment is a function that starts
% streaming the attachment data from the remote source, therefore it's
% convenient to call it ASAP to avoid ibrowse inactivity timeouts.
+ ?LOG_DEBUG(#{what => flush_doc_with_attachments, in => replicator}),
couch_log:debug("Worker flushing doc with attachments", []),
doc_handler_flush_doc(Doc, Acc);
remote_doc_handler({ok, #doc{atts = []} = Doc}, {Parent, _} = Acc) ->
@@ -314,6 +338,11 @@ doc_handler_flush_doc(#doc{} = Doc, {Parent, Target} = Acc) ->
spawn_writer(Target, #batch{docs = DocList, size = Size}) ->
case {Target, Size > 0} of
{#httpdb{}, true} ->
+ ?LOG_DEBUG(#{
+ what => flush_doc_batch,
+ in => replicator,
+ batch_size_bytes => Size
+ }),
couch_log:debug("Worker flushing doc batch of size ~p bytes", [Size]);
_ ->
ok
@@ -354,6 +383,11 @@ maybe_flush_docs(#httpdb{} = Target, Batch, Doc) ->
JsonDoc = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [revs, attachments])),
case SizeAcc + iolist_size(JsonDoc) of
SizeAcc2 when SizeAcc2 > ?DOC_BUFFER_BYTE_SIZE ->
+ ?LOG_DEBUG(#{
+ what => flush_doc_batch,
+ in => replicator,
+ batch_size_bytes => SizeAcc2
+ }),
couch_log:debug("Worker flushing doc batch of size ~p bytes", [SizeAcc2]),
Stats = flush_docs(Target, [JsonDoc | DocAcc]),
{#batch{}, Stats};
@@ -371,12 +405,27 @@ flush_docs(Target, DocList) ->
handle_flush_docs_result(FlushResult, Target, DocList).
-handle_flush_docs_result({error, request_body_too_large}, _Target, [Doc]) ->
+handle_flush_docs_result({error, request_body_too_large}, Target, [Doc]) ->
+ ?LOG_ERROR(#{
+ what => doc_write_failure,
+ in => replicator,
+ target => couch_replicator_api_wrap:db_uri(Target),
+ reason => request_body_too_large,
+ docid => extract_value(<<"_id">>, Doc)
+ }),
couch_log:error("Replicator: failed to write doc ~p. Too large", [Doc]),
couch_replicator_stats:new([{doc_write_failures, 1}]);
handle_flush_docs_result({error, request_body_too_large}, Target, DocList) ->
Len = length(DocList),
{DocList1, DocList2} = lists:split(Len div 2, DocList),
+ ?LOG_NOTICE(#{
+ what => split_large_batch,
+ in => replicator,
+ target => couch_replicator_api_wrap:db_uri(Target),
+ reason => request_body_too_large,
+ original_batch_size_bytes => Len,
+ details => "splitting into two smaller batches and retrying"
+ }),
couch_log:notice("Replicator: couldn't write batch of size ~p to ~p because"
" request body is too large. Splitting batch into 2 separate batches of"
" sizes ~p and ~p", [Len, couch_replicator_api_wrap:db_uri(Target),
@@ -388,6 +437,15 @@ handle_flush_docs_result({ok, Errors}, Target, DocList) ->
DbUri = couch_replicator_api_wrap:db_uri(Target),
lists:foreach(
fun({Props}) ->
+ ?LOG_ERROR(#{
+ what => doc_write_failure,
+ in => replicator,
+ target => couch_replicator_api_wrap:db_uri(Target),
+ docid => get_value(id, Props, undefined),
+ revision => get_value(rev, Props, undefined),
+ error => get_value(error, Props, undefined),
+ details => get_value(reason, Props, undefined)
+ }),
couch_log:error("Replicator: couldn't write document `~s`, revision"
" `~s`, to target database `~s`. Error: `~s`, reason: `~s`.", [
get_value(id, Props, ""), get_value(rev, Props, ""), DbUri,
@@ -400,12 +458,29 @@ handle_flush_docs_result({ok, Errors}, Target, DocList) ->
handle_flush_docs_result({error, {bulk_docs_failed, _, _} = Err}, _, _) ->
exit(Err).
+extract_value(Prop, Json) when is_binary(Json) ->
+ try
+ {Props} = ?JSON_DECODE(Json),
+ get_value(Prop, Props, undefined)
+ catch _:_ ->
+ undefined
+ end;
+extract_value(_, _) ->
+ undefined.
flush_doc(Target, #doc{id = Id, revs = {Pos, [RevId | _]}} = Doc) ->
try couch_replicator_api_wrap:update_doc(Target, Doc, [], replicated_changes) of
{ok, _} ->
ok;
Error ->
+ ?LOG_ERROR(#{
+ what => doc_write_failure,
+ in => replicator,
+ target => couch_replicator_api_wrap:db_uri(Target),
+ docid => Id,
+ revision => couch_doc:rev_to_str({Pos, RevId}),
+ details => Error
+ }),
couch_log:error("Replicator: error writing document `~s` to `~s`: ~s",
[Id, couch_replicator_api_wrap:db_uri(Target), couch_util:to_binary(Error)]),
Error
@@ -413,12 +488,29 @@ flush_doc(Target, #doc{id = Id, revs = {Pos, [RevId | _]}} = Doc) ->
throw:{missing_stub, _} = MissingStub ->
throw(MissingStub);
throw:{Error, Reason} ->
+ ?LOG_ERROR(#{
+ what => doc_write_failure,
+ in => replicator,
+ target => couch_replicator_api_wrap:db_uri(Target),
+ docid => Id,
+ revision => couch_doc:rev_to_str({Pos, RevId}),
+ error => Error,
+ details => Reason
+ }),
couch_log:error("Replicator: couldn't write document `~s`, revision `~s`,"
" to target database `~s`. Error: `~s`, reason: `~s`.",
[Id, couch_doc:rev_to_str({Pos, RevId}),
couch_replicator_api_wrap:db_uri(Target), to_binary(Error), to_binary(Reason)]),
{error, Error};
throw:Err ->
+ ?LOG_ERROR(#{
+ what => doc_write_failure,
+ in => replicator,
+ target => couch_replicator_api_wrap:db_uri(Target),
+ docid => Id,
+ revision => couch_doc:rev_to_str({Pos, RevId}),
+ details => Err
+ }),
couch_log:error("Replicator: couldn't write document `~s`, revision `~s`,"
" to target database `~s`. Error: `~s`.",
[Id, couch_doc:rev_to_str({Pos, RevId}),
diff --git a/src/couch_replicator/test/eunit/couch_replicator_db_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_db_tests.erl
index 053441007..46133e2a8 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_db_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_db_tests.erl
@@ -36,6 +36,7 @@ couch_replicator_db_test_() ->
?TDEF_FE(replicator_db_deleted, 15),
?TDEF_FE(replicator_db_recreated, 15),
?TDEF_FE(invalid_replication_docs),
+ ?TDEF_FE(scheduler_default_headers_returned),
?TDEF_FE(duplicate_persistent_replication, 15),
?TDEF_FE(duplicate_transient_replication, 30)
]
@@ -250,6 +251,14 @@ duplicate_transient_replication({Source, Target, RepDb}) ->
wait_scheduler_docs_not_found(RepDb, DocId).
+scheduler_default_headers_returned({_, _, _}) ->
+ SUrl = couch_replicator_test_helper:server_url(),
+ Url = lists:flatten(io_lib:format("~s/_scheduler/jobs", [SUrl])),
+ {ok, _, Headers, _} = test_request:get(Url, []),
+ ?assertEqual(true, lists:keymember("X-Couch-Request-ID", 1, Headers)),
+ ?assertEqual(true, lists:keymember("X-CouchDB-Body-Time", 1, Headers)).
+
+
scheduler_jobs(Id) ->
SUrl = couch_replicator_test_helper:server_url(),
Url = lists:flatten(io_lib:format("~s/_scheduler/jobs/~s", [SUrl, Id])),
diff --git a/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl
index 3dbfa6aba..a6623eb04 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl
@@ -20,7 +20,7 @@
-define(DOCS_CONFLICTS, [
{<<"doc1">>, 10},
% use some _design docs as well to test the special handling for them
- {<<"_design/doc2">>, 100},
+ {<<"_design/doc2">>, 10},
% a number > MaxURLlength (7000) / length(DocRevisionString)
{<<"doc3">>, 210}
]).
@@ -41,7 +41,7 @@ docs_with_many_leaves_test_() ->
fun setup/0,
fun teardown/1,
[
- ?TDEF_FE(should_replicate_doc_with_many_leaves, 180)
+ ?TDEF_FE(should_replicate_doc_with_many_leaves, 240)
]
}
}
@@ -132,7 +132,7 @@ add_attachments(#{} = SourceDb, NumAtts,
NewDocs = lists:foldl(fun
(#doc{atts = Atts, revs = {Pos, [Rev | _]}} = Doc, Acc) ->
NewAtts = lists:foldl(fun(I, AttAcc) ->
- [att(I, {Pos, Rev}, 100) | AttAcc]
+ [att(I, {Pos, Rev}, 10) | AttAcc]
end, [], lists:seq(1, NumAtts)),
[Doc#doc{atts = Atts ++ NewAtts} | Acc]
end, [], SourceDocs),
diff --git a/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl b/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl
index 2ac447eb3..39717dd0d 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl
@@ -56,7 +56,7 @@
start_couch() ->
- Ctx = test_util:start_couch([fabric, chttpd, couch_replicator]),
+ Ctx = test_util:start_couch([fabric, chttpd, couch_replicator, couch_js]),
Hashed = couch_passwords:hash_admin_password(?PASSWORD),
ok = config:set("admins", ?USERNAME, ?b2l(Hashed), _Persist = false),
Ctx.
diff --git a/src/couch_views/include/couch_views.hrl b/src/couch_views/include/couch_views.hrl
index 92b8f46fb..86f73a325 100644
--- a/src/couch_views/include/couch_views.hrl
+++ b/src/couch_views/include/couch_views.hrl
@@ -40,3 +40,102 @@
% indexing progress
-define(INDEX_BUILDING, <<"building">>).
-define(INDEX_READY, <<"ready">>).
+
+% Views/db marker to indicate that the current (latest) FDB GRV version should
+% be used. Use `null` so it can can be round-tripped through json serialization
+% with couch_jobs.
+-define(VIEW_CURRENT_VSN, null).
+
+
+-record(mrst, {
+ sig=nil,
+ fd=nil,
+ fd_monitor,
+ db_name,
+ idx_name,
+ language,
+ design_opts=[],
+ partitioned=false,
+ lib,
+ views,
+ id_btree=nil,
+ update_seq=0,
+ purge_seq=0,
+ first_build,
+ partial_resp_pid,
+ doc_acc,
+ doc_queue,
+ write_queue,
+ qserver=nil
+}).
+
+
+-record(mrview, {
+ id_num,
+ update_seq=0,
+ purge_seq=0,
+ map_names=[],
+ reduce_funs=[],
+ def,
+ btree=nil,
+ options=[]
+}).
+
+
+-define(MAX_VIEW_LIMIT, 16#10000000).
+
+-record(mrargs, {
+ view_type,
+ reduce,
+
+ preflight_fun,
+
+ start_key,
+ start_key_docid,
+ end_key,
+ end_key_docid,
+ keys,
+
+ direction = fwd,
+ limit = ?MAX_VIEW_LIMIT,
+ skip = 0,
+ group_level = 0,
+ group = undefined,
+ stable = false,
+ update = true,
+ multi_get = false,
+ inclusive_end = true,
+ include_docs = false,
+ doc_options = [],
+ update_seq=false,
+ conflicts,
+ callback,
+ sorted = true,
+ extra = [],
+ page_size = undefined,
+ bookmark=nil
+}).
+
+-record(vacc, {
+ db,
+ req,
+ resp,
+ prepend,
+ etag,
+ should_close = false,
+ buffer = [],
+ bufsize = 0,
+ threshold = 1490,
+ row_sent = false,
+ meta_sent = false,
+ paginated = false,
+ meta = #{}
+}).
+
+
+-record(view_row, {
+ key,
+ id,
+ value,
+ doc
+}).
diff --git a/src/couch_views/src/couch_views.app.src b/src/couch_views/src/couch_views.app.src
index 985c503cd..8ec3a3243 100644
--- a/src/couch_views/src/couch_views.app.src
+++ b/src/couch_views/src/couch_views.app.src
@@ -23,6 +23,7 @@
stdlib,
erlfdb,
couch_epi,
+ couch_lib,
couch_log,
config,
couch_stats,
diff --git a/src/couch_views/src/couch_views.erl b/src/couch_views/src/couch_views.erl
index 2d916314f..5804db092 100644
--- a/src/couch_views/src/couch_views.erl
+++ b/src/couch_views/src/couch_views.erl
@@ -26,7 +26,6 @@
]).
-include("couch_views.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
query(Db, DDoc, ViewName, Callback, Acc0, Args0) ->
@@ -46,18 +45,19 @@ query(Db, DDoc, ViewName, Callback, Acc0, Args0) ->
} = Mrst,
Args1 = to_mrargs(Args0),
- Args2 = couch_mrview_util:set_view_type(Args1, ViewName, Views),
- Args3 = couch_mrview_util:validate_args(Args2),
+ Args2 = couch_views_util:set_view_type(Args1, ViewName, Views),
+ Args3 = couch_views_validate:validate_args(Args2),
ok = check_range(Mrst, ViewName, Args3),
try
fabric2_fdb:transactional(Db, fun(TxDb) ->
ok = maybe_update_view(TxDb, Mrst, IsInteractive, Args3),
- read_view(TxDb, Mrst, ViewName, Callback, Acc0, Args3)
+ IdxVStamps = {?VIEW_CURRENT_VSN, ?VIEW_CURRENT_VSN},
+ read_view(TxDb, Mrst, ViewName, Callback, Acc0, Args3, IdxVStamps)
end)
catch throw:{build_view, WaitSeq} ->
- couch_views_jobs:build_view(Db, Mrst, WaitSeq),
- read_view(Db, Mrst, ViewName, Callback, Acc0, Args3)
+ {ok, IdxVStamps} = couch_views_jobs:build_view(Db, Mrst, WaitSeq),
+ read_view(Db, Mrst, ViewName, Callback, Acc0, Args3, IdxVStamps)
end.
@@ -126,14 +126,32 @@ get_total_view_size(TxDb, Mrst) ->
end, 0, Mrst#mrst.views).
-read_view(Db, Mrst, ViewName, Callback, Acc0, Args) ->
+read_view(Db, Mrst, ViewName, Callback, Acc0, Args, {_, _} = IdxVStamps) ->
+ {DbReadVsn, ViewReadVsn} = IdxVStamps,
fabric2_fdb:transactional(Db, fun(TxDb) ->
+ case ViewReadVsn of
+ ?VIEW_CURRENT_VSN ->
+ ok;
+ _ when is_integer(ViewReadVsn) ->
+ % Set the GRV of the transaction to the committed
+ % version of the indexer. That is the version at which
+ % the indexer has committed the view data.
+ erlfdb:set_read_version(maps:get(tx, TxDb), ViewReadVsn)
+ end,
try
- couch_views_reader:read(TxDb, Mrst, ViewName, Callback, Acc0, Args)
+ couch_views_reader:read(TxDb, Mrst, ViewName, Callback, Acc0, Args,
+ DbReadVsn)
after
UpdateAfter = Args#mrargs.update == lazy,
if UpdateAfter == false -> ok; true ->
- couch_views_jobs:build_view_async(TxDb, Mrst)
+ % Make sure to use a separate transaction if we are
+ % reading from a stale snapshot
+ case ViewReadVsn of
+ ?VIEW_CURRENT_VSN ->
+ couch_views_jobs:build_view_async(TxDb, Mrst);
+ _ ->
+ couch_views_jobs:build_view_async(Db, Mrst)
+ end
end
end
end).
@@ -180,7 +198,7 @@ check_range(Mrst, ViewName, Args) ->
language = Lang,
views = Views
} = Mrst,
- View = case couch_mrview_util:extract_view(Lang, Args, ViewName, Views) of
+ View = case couch_views_util:extract_view(Lang, Args, ViewName, Views) of
{map, V, _} -> V;
{red, {_, _, V}, _} -> V
end,
diff --git a/src/couch_views/src/couch_views_batch.erl b/src/couch_views/src/couch_views_batch.erl
index ba2a22782..555eac9ed 100644
--- a/src/couch_views/src/couch_views_batch.erl
+++ b/src/couch_views/src/couch_views_batch.erl
@@ -20,7 +20,7 @@
]).
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
-type update_stats() :: #{
docs_read => non_neg_integer(),
diff --git a/src/couch_views/src/couch_views_batch_impl.erl b/src/couch_views/src/couch_views_batch_impl.erl
index d315a3bf6..d17b5b1ec 100644
--- a/src/couch_views/src/couch_views_batch_impl.erl
+++ b/src/couch_views/src/couch_views_batch_impl.erl
@@ -22,7 +22,7 @@
]).
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
-record(batch_st, {
@@ -48,7 +48,7 @@ start(Mrst, undefined) ->
search_incr = get_config(batch_search_increment, "500"),
sense_incr = get_config(batch_sense_increment, "100"),
max_tx_size_bytes = get_config(batch_max_tx_size_bytes, "9000000"),
- max_tx_time_msec = get_config(batch_max_tx_time_msec, "4500"),
+ max_tx_time_msec = get_config(batch_max_tx_time_msec, "1500"),
threshold_penalty = get_config(
batch_threshold_penalty,
"0.2",
diff --git a/src/couch_views/src/couch_views_fdb.erl b/src/couch_views/src/couch_views_fdb.erl
index b0fb82e85..d8c981300 100644
--- a/src/couch_views/src/couch_views_fdb.erl
+++ b/src/couch_views/src/couch_views_fdb.erl
@@ -38,7 +38,6 @@
-include("couch_views.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-include_lib("fabric/include/fabric2.hrl").
diff --git a/src/couch_views/src/couch_views_http.erl b/src/couch_views/src/couch_views_http.erl
index e21acfb9f..67e2a7708 100644
--- a/src/couch_views/src/couch_views_http.erl
+++ b/src/couch_views/src/couch_views_http.erl
@@ -13,7 +13,7 @@
-module(couch_views_http).
-include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
-export([
parse_body_and_query/2,
@@ -24,7 +24,8 @@
row_to_obj/2,
view_cb/2,
paginated/5,
- paginated/6
+ paginated/6,
+ transform_row/1
]).
-define(BOOKMARK_VSN, 1).
@@ -63,7 +64,7 @@ parse_params(Props, Keys, #mrargs{}=Args, Options) ->
_ ->
throw({bad_request, "Cannot use `bookmark` with other options"})
end,
- couch_mrview_http:parse_params(Props, Keys, Args, Options).
+ couch_views_http_util:parse_params(Props, Keys, Args, Options).
row_to_obj(Row) ->
@@ -72,11 +73,11 @@ row_to_obj(Row) ->
row_to_obj(Id, Row) ->
- couch_mrview_http:row_to_obj(Id, Row).
+ couch_views_http_util:row_to_obj(Id, Row).
view_cb(Msg, #vacc{paginated = false}=Acc) ->
- couch_mrview_http:view_cb(Msg, Acc);
+ couch_views_http_util:view_cb(Msg, Acc);
view_cb(Msg, #vacc{paginated = true}=Acc) ->
paginated_cb(Msg, Acc).
@@ -95,9 +96,8 @@ paginated_cb({meta, Meta}, #vacc{}=VAcc) ->
case MetaData of
{_Key, undefined} ->
Acc;
- {total, _Value} ->
- %% We set total_rows elsewere
- Acc;
+ {total, Value} ->
+ maps:put(total_rows, Value, Acc);
{Key, Value} ->
maps:put(list_to_binary(atom_to_list(Key)), Value, Acc)
end
@@ -129,14 +129,12 @@ do_paginated(PageSize, QueriesArgs, KeyFun, Fun) when is_list(QueriesArgs) ->
Result0 = maybe_add_next_bookmark(
OriginalLimit, PageSize, Args, Meta, Items, KeyFun),
Result = maybe_add_previous_bookmark(Args, Result0, KeyFun),
- #{total_rows := Total} = Result,
- {Limit - Total, [Result | Acc]};
+ {Limit - length(maps:get(rows, Result)), [Result | Acc]};
false ->
Bookmark = bookmark_encode(Args0),
Result = #{
rows => [],
- next => Bookmark,
- total_rows => 0
+ next => Bookmark
},
{Limit, [Result | Acc]}
end
@@ -152,8 +150,7 @@ maybe_add_next_bookmark(OriginalLimit, PageSize, Args0, Response, Items, KeyFun)
case check_completion(OriginalLimit, RequestedLimit, Items) of
{Rows, nil} ->
maps:merge(Response, #{
- rows => Rows,
- total_rows => length(Rows)
+ rows => Rows
});
{Rows, Next} ->
{FirstId, FirstKey} = first_key(KeyFun, Rows),
@@ -169,8 +166,7 @@ maybe_add_next_bookmark(OriginalLimit, PageSize, Args0, Response, Items, KeyFun)
Bookmark = bookmark_encode(Args),
maps:merge(Response, #{
rows => Rows,
- next => Bookmark,
- total_rows => length(Rows)
+ next => Bookmark
})
end.
@@ -284,6 +280,25 @@ mask_to_index(Mask, Pos, Acc) when is_integer(Mask), Mask > 0 ->
mask_to_index(Mask bsr 1, Pos + 1, NewAcc).
+transform_row(#view_row{value={[{reduce_overflow_error, Msg}]}}) ->
+ {row, [{key,null}, {id,error}, {value,reduce_overflow_error}, {reason,Msg}]};
+
+transform_row(#view_row{key=Key, id=reduced, value=Value}) ->
+ {row, [{key,Key}, {value,Value}]};
+
+transform_row(#view_row{key=Key, id=undefined}) ->
+ {row, [{key,Key}, {id,error}, {value,not_found}]};
+
+transform_row(#view_row{key=Key, id=Id, value=Value, doc=undefined}) ->
+ {row, [{id,Id}, {key,Key}, {value,Value}]};
+
+transform_row(#view_row{key=Key, id=_Id, value=_Value, doc={error,Reason}}) ->
+ {row, [{id,error}, {key,Key}, {value,Reason}]};
+
+transform_row(#view_row{key=Key, id=Id, value=Value, doc=Doc}) ->
+ {row, [{id,Id}, {key,Key}, {value,Value}, {doc,Doc}]}.
+
+
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -356,4 +371,4 @@ check_completion_test() ->
check_completion(2, 3, [1, 2, 3, 4, 5])
),
ok.
--endif. \ No newline at end of file
+-endif.
diff --git a/src/couch_views/src/couch_views_http_util.erl b/src/couch_views/src/couch_views_http_util.erl
new file mode 100644
index 000000000..b3fd7efc5
--- /dev/null
+++ b/src/couch_views/src/couch_views_http_util.erl
@@ -0,0 +1,337 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% The reason this module and couch_views_http exist is because they have
+% functions which are named the same but do slightly different things. The
+% general pattern is chttpd code would call into couch_view_http and those
+% function will in turn call into this module.
+
+-module(couch_views_http_util).
+
+-export([
+ prepend_val/1,
+ parse_body_and_query/2,
+ parse_body_and_query/3,
+ parse_params/2,
+ parse_params/3,
+ parse_params/4,
+ view_cb/2,
+ row_to_obj/1,
+ row_to_obj/2,
+ row_to_json/1,
+ row_to_json/2
+]).
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
+
+%% these clauses start (and possibly end) the response
+view_cb({error, Reason}, #vacc{resp=undefined}=Acc) ->
+ {ok, Resp} = chttpd:send_error(Acc#vacc.req, Reason),
+ {ok, Acc#vacc{resp=Resp}};
+
+view_cb(complete, #vacc{resp=undefined}=Acc) ->
+ % Nothing in view
+ {ok, Resp} = chttpd:send_json(Acc#vacc.req, 200, {[{rows, []}]}),
+ {ok, Acc#vacc{resp=Resp}};
+
+view_cb(Msg, #vacc{resp=undefined}=Acc) ->
+ %% Start response
+ Headers = [],
+ {ok, Resp} = chttpd:start_delayed_json_response(Acc#vacc.req, 200, Headers),
+ view_cb(Msg, Acc#vacc{resp=Resp, should_close=true});
+
+%% ---------------------------------------------------
+
+%% From here on down, the response has been started.
+
+view_cb({error, Reason}, #vacc{resp=Resp}=Acc) ->
+ {ok, Resp1} = chttpd:send_delayed_error(Resp, Reason),
+ {ok, Acc#vacc{resp=Resp1}};
+
+view_cb(complete, #vacc{resp=Resp, buffer=Buf, threshold=Max}=Acc) ->
+ % Finish view output and possibly end the response
+ {ok, Resp1} = chttpd:close_delayed_json_object(Resp, Buf, "\r\n]}", Max),
+ case Acc#vacc.should_close of
+ true ->
+ {ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
+ {ok, Acc#vacc{resp=Resp2}};
+ _ ->
+ {ok, Acc#vacc{resp=Resp1, meta_sent=false, row_sent=false,
+ prepend=",\r\n", buffer=[], bufsize=0}}
+ end;
+
+view_cb({meta, Meta}, #vacc{meta_sent=false, row_sent=false}=Acc) ->
+ % Sending metadata as we've not sent it or any row yet
+ Parts = case couch_util:get_value(total, Meta) of
+ undefined -> [];
+ Total -> [io_lib:format("\"total_rows\":~p", [Total])]
+ end ++ case couch_util:get_value(offset, Meta) of
+ undefined -> [];
+ Offset -> [io_lib:format("\"offset\":~p", [Offset])]
+ end ++ case couch_util:get_value(update_seq, Meta) of
+ undefined -> [];
+ null ->
+ ["\"update_seq\":null"];
+ UpdateSeq when is_integer(UpdateSeq) ->
+ [io_lib:format("\"update_seq\":~B", [UpdateSeq])];
+ UpdateSeq when is_binary(UpdateSeq) ->
+ [io_lib:format("\"update_seq\":\"~s\"", [UpdateSeq])]
+ end ++ ["\"rows\":["],
+ Chunk = [prepend_val(Acc), "{", string:join(Parts, ","), "\r\n"],
+ {ok, AccOut} = maybe_flush_response(Acc, Chunk, iolist_size(Chunk)),
+ {ok, AccOut#vacc{prepend="", meta_sent=true}};
+
+view_cb({meta, _Meta}, #vacc{}=Acc) ->
+ %% ignore metadata
+ {ok, Acc};
+
+view_cb({row, Row}, #vacc{meta_sent=false}=Acc) ->
+ %% sorted=false and row arrived before meta
+ % Adding another row
+ Chunk = [prepend_val(Acc), "{\"rows\":[\r\n", row_to_json(Row)],
+ maybe_flush_response(Acc#vacc{meta_sent=true, row_sent=true}, Chunk, iolist_size(Chunk));
+
+view_cb({row, Row}, #vacc{meta_sent=true}=Acc) ->
+ % Adding another row
+ Chunk = [prepend_val(Acc), row_to_json(Row)],
+ maybe_flush_response(Acc#vacc{row_sent=true}, Chunk, iolist_size(Chunk)).
+
+
+maybe_flush_response(#vacc{bufsize=Size, threshold=Max} = Acc, Data, Len)
+ when Size > 0 andalso (Size + Len) > Max ->
+ #vacc{buffer = Buffer, resp = Resp} = Acc,
+ {ok, R1} = chttpd:send_delayed_chunk(Resp, Buffer),
+ {ok, Acc#vacc{prepend = ",\r\n", buffer = Data, bufsize = Len, resp = R1}};
+maybe_flush_response(Acc0, Data, Len) ->
+ #vacc{buffer = Buf, bufsize = Size} = Acc0,
+ Acc = Acc0#vacc{
+ prepend = ",\r\n",
+ buffer = [Buf | Data],
+ bufsize = Size + Len
+ },
+ {ok, Acc}.
+
+prepend_val(#vacc{prepend=Prepend}) ->
+ case Prepend of
+ undefined ->
+ "";
+ _ ->
+ Prepend
+ end.
+
+
+row_to_json(Row) ->
+ ?JSON_ENCODE(row_to_obj(Row)).
+
+
+row_to_json(Kind, Row) ->
+ ?JSON_ENCODE(row_to_obj(Kind, Row)).
+
+
+row_to_obj(Row) ->
+ Id = couch_util:get_value(id, Row),
+ row_to_obj(Id, Row).
+
+
+row_to_obj(error, Row) ->
+ % Special case for _all_docs request with KEYS to
+ % match prior behavior.
+ Key = couch_util:get_value(key, Row),
+ Val = couch_util:get_value(value, Row),
+ Reason = couch_util:get_value(reason, Row),
+ ReasonProp = if Reason == undefined -> []; true ->
+ [{reason, Reason}]
+ end,
+ {[{key, Key}, {error, Val}] ++ ReasonProp};
+row_to_obj(Id0, Row) ->
+ Id = case Id0 of
+ undefined -> [];
+ Id0 -> [{id, Id0}]
+ end,
+ Key = couch_util:get_value(key, Row, null),
+ Val = couch_util:get_value(value, Row),
+ Doc = case couch_util:get_value(doc, Row) of
+ undefined -> [];
+ Doc0 -> [{doc, Doc0}]
+ end,
+ {Id ++ [{key, Key}, {value, Val}] ++ Doc}.
+
+
+parse_params(#httpd{}=Req, Keys) ->
+ parse_params(chttpd:qs(Req), Keys);
+parse_params(Props, Keys) ->
+ Args = #mrargs{},
+ parse_params(Props, Keys, Args).
+
+
+parse_params(Props, Keys, Args) ->
+ parse_params(Props, Keys, Args, []).
+
+parse_params(Props, Keys, #mrargs{}=Args0, Options) ->
+ IsDecoded = lists:member(decoded, Options),
+ Args1 = case lists:member(keep_group_level, Options) of
+ true ->
+ Args0;
+ _ ->
+ % group_level set to undefined to detect if explicitly set by user
+ Args0#mrargs{keys=Keys, group=undefined, group_level=undefined}
+ end,
+ lists:foldl(fun({K, V}, Acc) ->
+ parse_param(K, V, Acc, IsDecoded)
+ end, Args1, Props).
+
+
+parse_body_and_query(#httpd{method='POST'} = Req, Keys) ->
+ Props = chttpd:json_body_obj(Req),
+ parse_body_and_query(Req, Props, Keys);
+
+parse_body_and_query(Req, Keys) ->
+ parse_params(chttpd:qs(Req), Keys, #mrargs{keys=Keys, group=undefined,
+ group_level=undefined}, [keep_group_level]).
+
+parse_body_and_query(Req, {Props}, Keys) ->
+ Args = #mrargs{keys=Keys, group=undefined, group_level=undefined},
+ BodyArgs = parse_params(Props, Keys, Args, [decoded]),
+ parse_params(chttpd:qs(Req), Keys, BodyArgs, [keep_group_level]).
+
+parse_param(Key, Val, Args, IsDecoded) when is_binary(Key) ->
+ parse_param(binary_to_list(Key), Val, Args, IsDecoded);
+parse_param(Key, Val, Args, IsDecoded) ->
+ case Key of
+ "" ->
+ Args;
+ "reduce" ->
+ Args#mrargs{reduce=parse_boolean(Val)};
+ "key" when IsDecoded ->
+ Args#mrargs{start_key=Val, end_key=Val};
+ "key" ->
+ JsonKey = ?JSON_DECODE(Val),
+ Args#mrargs{start_key=JsonKey, end_key=JsonKey};
+ "keys" when IsDecoded ->
+ Args#mrargs{keys=Val};
+ "keys" ->
+ Args#mrargs{keys=?JSON_DECODE(Val)};
+ "startkey" when IsDecoded ->
+ Args#mrargs{start_key=Val};
+ "start_key" when IsDecoded ->
+ Args#mrargs{start_key=Val};
+ "startkey" ->
+ Args#mrargs{start_key=?JSON_DECODE(Val)};
+ "start_key" ->
+ Args#mrargs{start_key=?JSON_DECODE(Val)};
+ "startkey_docid" ->
+ Args#mrargs{start_key_docid=couch_util:to_binary(Val)};
+ "start_key_doc_id" ->
+ Args#mrargs{start_key_docid=couch_util:to_binary(Val)};
+ "endkey" when IsDecoded ->
+ Args#mrargs{end_key=Val};
+ "end_key" when IsDecoded ->
+ Args#mrargs{end_key=Val};
+ "endkey" ->
+ Args#mrargs{end_key=?JSON_DECODE(Val)};
+ "end_key" ->
+ Args#mrargs{end_key=?JSON_DECODE(Val)};
+ "endkey_docid" ->
+ Args#mrargs{end_key_docid=couch_util:to_binary(Val)};
+ "end_key_doc_id" ->
+ Args#mrargs{end_key_docid=couch_util:to_binary(Val)};
+ "limit" ->
+ Args#mrargs{limit=parse_pos_int(Val)};
+ "page_size" ->
+ Args#mrargs{page_size=parse_pos_int(Val)};
+ "stale" when Val == "ok" orelse Val == <<"ok">> ->
+ Args#mrargs{stable=true, update=false};
+ "stale" when Val == "update_after" orelse Val == <<"update_after">> ->
+ Args#mrargs{stable=true, update=lazy};
+ "stale" ->
+ throw({query_parse_error, <<"Invalid value for `stale`.">>});
+ "stable" when Val == "true" orelse Val == <<"true">> orelse Val == true ->
+ Args#mrargs{stable=true};
+ "stable" when Val == "false" orelse Val == <<"false">> orelse Val == false ->
+ Args#mrargs{stable=false};
+ "stable" ->
+ throw({query_parse_error, <<"Invalid value for `stable`.">>});
+ "update" when Val == "true" orelse Val == <<"true">> orelse Val == true ->
+ Args#mrargs{update=true};
+ "update" when Val == "false" orelse Val == <<"false">> orelse Val == false ->
+ Args#mrargs{update=false};
+ "update" when Val == "lazy" orelse Val == <<"lazy">> ->
+ Args#mrargs{update=lazy};
+ "update" ->
+ throw({query_parse_error, <<"Invalid value for `update`.">>});
+ "descending" ->
+ case parse_boolean(Val) of
+ true -> Args#mrargs{direction=rev};
+ _ -> Args#mrargs{direction=fwd}
+ end;
+ "skip" ->
+ Args#mrargs{skip=parse_pos_int(Val)};
+ "group" ->
+ Args#mrargs{group=parse_boolean(Val)};
+ "group_level" ->
+ Args#mrargs{group_level=parse_pos_int(Val)};
+ "inclusive_end" ->
+ Args#mrargs{inclusive_end=parse_boolean(Val)};
+ "include_docs" ->
+ Args#mrargs{include_docs=parse_boolean(Val)};
+ "attachments" ->
+ case parse_boolean(Val) of
+ true ->
+ Opts = Args#mrargs.doc_options,
+ Args#mrargs{doc_options=[attachments|Opts]};
+ false ->
+ Args
+ end;
+ "att_encoding_info" ->
+ case parse_boolean(Val) of
+ true ->
+ Opts = Args#mrargs.doc_options,
+ Args#mrargs{doc_options=[att_encoding_info|Opts]};
+ false ->
+ Args
+ end;
+ "update_seq" ->
+ Args#mrargs{update_seq=parse_boolean(Val)};
+ "conflicts" ->
+ Args#mrargs{conflicts=parse_boolean(Val)};
+ "callback" ->
+ Args#mrargs{callback=couch_util:to_binary(Val)};
+ "sorted" ->
+ Args#mrargs{sorted=parse_boolean(Val)};
+ "partition" ->
+ Partition = couch_util:to_binary(Val),
+ couch_partition:validate_partition(Partition),
+ couch_views_util:set_extra(Args, partition, Partition);
+ _ ->
+ BKey = couch_util:to_binary(Key),
+ BVal = couch_util:to_binary(Val),
+ Args#mrargs{extra=[{BKey, BVal} | Args#mrargs.extra]}
+ end.
+
+
+parse_boolean(Val) ->
+ case couch_lib_parse:parse_boolean(Val) of
+ {error, Reason} ->
+ throw({query_parse_error, Reason});
+ Boolean ->
+ Boolean
+ end.
+
+
+parse_pos_int(Val) ->
+ case couch_lib_parse:parse_non_neg_integer(Val) of
+ {error, Reason} ->
+ throw({query_parse_error, Reason});
+ Int ->
+ Int
+ end.
diff --git a/src/couch_views/src/couch_views_indexer.erl b/src/couch_views/src/couch_views_indexer.erl
index 2735f66b7..7f0e2d5f2 100644
--- a/src/couch_views/src/couch_views_indexer.erl
+++ b/src/couch_views/src/couch_views_indexer.erl
@@ -30,20 +30,23 @@
-include("couch_views.hrl").
-include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-include_lib("fabric/include/fabric2.hrl").
+-include_lib("kernel/include/logger.hrl").
-define(KEY_SIZE_LIMIT, 8000).
-define(VALUE_SIZE_LIMIT, 64000).
+-define(DEFAULT_TX_RETRY_LIMIT, 5).
+
+
% These are all of the errors that we can fix by using
% a smaller batch size.
-define(IS_RECOVERABLE_ERROR(Code), (
- (Code == 1004) % timed_out
- orelse (Code == 1007) % transaction_too_old
- orelse (Code == 1031) % transaction_timed_out
- orelse (Code == 2101) % transaction_too_large
+ (Code == ?ERLFDB_TIMED_OUT) orelse
+ (Code == ?ERLFDB_TRANSACTION_TOO_OLD) orelse
+ (Code == ?ERLFDB_TRANSACTION_TIMED_OUT) orelse
+ (Code == ?ERLFDB_TRANSACTION_TOO_LARGE)
)).
@@ -86,20 +89,30 @@ init() ->
fail_job(Job, Data, sig_changed, "Design document was modified")
end,
+ DbSeq = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ fabric2_fdb:with_snapshot(TxDb, fun(SSDb) ->
+ fabric2_db:get_update_seq(SSDb)
+ end)
+ end),
+
State = #{
tx_db => undefined,
db_uuid => DbUUID,
- db_seq => undefined,
+ db_seq => DbSeq,
view_seq => undefined,
last_seq => undefined,
view_vs => undefined,
job => Job,
job_data => Data,
+ rows_processed => 0,
count => 0,
changes_done => 0,
doc_acc => [],
design_opts => Mrst#mrst.design_opts,
- update_stats => #{}
+ update_stats => #{},
+ tx_retry_limit => tx_retry_limit(),
+ db_read_vsn => ?VIEW_CURRENT_VSN,
+ view_read_vsn => ?VIEW_CURRENT_VSN
},
try
@@ -109,8 +122,15 @@ init() ->
ok;
error:database_does_not_exist ->
fail_job(Job, Data, db_deleted, "Database was deleted");
- Error:Reason ->
- Stack = erlang:get_stacktrace(),
+ Error:Reason:Stack ->
+ ?LOG_ERROR(#{
+ what => view_update_failure,
+ db => DbName,
+ ddoc => DDocId,
+ tag => Error,
+ details => Reason,
+ stacktrace => Stack
+ }),
Fmt = "Error building view for ddoc ~s in ~s: ~p:~p ~p",
couch_log:error(Fmt, [DbName, DDocId, Error, Reason, Stack]),
@@ -148,7 +168,7 @@ upgrade_data(Data) ->
% Transaction limit exceeded don't retry
-should_retry(_, _, {erlfdb_error, 2101}) ->
+should_retry(_, _, {erlfdb_error, ?ERLFDB_TRANSACTION_TOO_LARGE}) ->
false;
should_retry(Retries, RetryLimit, _) when Retries < RetryLimit ->
@@ -175,17 +195,19 @@ add_error(Error, Reason, Data) ->
update(#{} = Db, Mrst0, State0) ->
Limit = couch_views_batch:start(Mrst0),
- {Mrst1, State1} = try
+ Result = try
do_update(Db, Mrst0, State0#{limit => Limit})
catch
error:{erlfdb_error, Error} when ?IS_RECOVERABLE_ERROR(Error) ->
couch_views_batch:failure(Mrst0),
update(Db, Mrst0, State0)
end,
- case State1 of
- finished ->
+ case Result of
+ ok ->
+ ok; % Already finished and released map context
+ {Mrst1, finished} ->
couch_eval:release_map_context(Mrst1#mrst.qserver);
- _ ->
+ {Mrst1, State1} ->
#{
update_stats := UpdateStats
} = State1,
@@ -195,27 +217,27 @@ update(#{} = Db, Mrst0, State0) ->
do_update(Db, Mrst0, State0) ->
- fabric2_fdb:transactional(Db, fun(TxDb) ->
+ TxOpts = #{retry_limit => maps:get(tx_retry_limit, State0)},
+ TxResult = fabric2_fdb:transactional(Db, TxOpts, fun(TxDb) ->
#{
tx := Tx
} = TxDb,
+ Snapshot = TxDb#{ tx := erlfdb:snapshot(Tx) },
+
State1 = get_update_start_state(TxDb, Mrst0, State0),
Mrst1 = couch_views_trees:open(TxDb, Mrst0),
- {ok, State2} = fold_changes(State1),
+ {ok, State2} = fold_changes(Snapshot, State1),
#{
- count := Count,
- limit := Limit,
doc_acc := DocAcc,
last_seq := LastSeq,
- view_vs := ViewVS,
changes_done := ChangesDone0,
design_opts := DesignOpts
} = State2,
- DocAcc1 = fetch_docs(TxDb, DesignOpts, DocAcc),
+ DocAcc1 = fetch_docs(Snapshot, DesignOpts, DocAcc),
{Mrst2, MappedDocs} = map_docs(Mrst0, DocAcc1),
TotalKVs = write_docs(TxDb, Mrst1, MappedDocs, State2),
@@ -228,16 +250,20 @@ do_update(Db, Mrst0, State0) ->
total_kvs => TotalKVs
},
- case Count < Limit of
+ case is_update_finished(State2) of
true ->
- maybe_set_build_status(TxDb, Mrst2, ViewVS,
- ?INDEX_READY),
- report_progress(State2#{changes_done := ChangesDone},
- finished),
- {Mrst2, finished};
+ State3 = State2#{changes_done := ChangesDone},
+ % We must call report_progress/2 (which, in turn calls
+ % couch_jobs:update/3) in every transaction where indexing data
+ % is updated, otherwise we risk another indexer taking over and
+ % clobbering the indexing data
+ State4 = report_progress(State3, update),
+ {Mrst2, finished, State4#{
+ db_read_vsn := erlfdb:wait(erlfdb:get_read_version(Tx))
+ }};
false ->
State3 = report_progress(State2, update),
- {Mrst2, State3#{
+ {Mrst2, continue, State3#{
tx_db := undefined,
count := 0,
doc_acc := [],
@@ -246,9 +272,54 @@ do_update(Db, Mrst0, State0) ->
update_stats := UpdateStats
}}
end
+ end),
+ case TxResult of
+ {Mrst, continue, State} ->
+ {Mrst, State};
+ {Mrst, finished, State} ->
+ do_finalize(Mrst, State),
+ {Mrst, finished}
+ end.
+
+
+do_finalize(Mrst, State) ->
+ #{tx_db := OldDb} = State,
+ ViewReadVsn = erlfdb:get_committed_version(maps:get(tx, OldDb)),
+ fabric2_fdb:transactional(OldDb#{tx := undefined}, fun(TxDb) ->
+ % Use the recent committed version as the read
+ % version. However, if transaction retries due to an error,
+ % let it acquire its own version to avoid spinning
+ % continuously due to conflicts or other errors.
+ case erlfdb:get_last_error() of
+ undefined ->
+ erlfdb:set_read_version(maps:get(tx, TxDb), ViewReadVsn);
+ ErrorCode when is_integer(ErrorCode) ->
+ ok
+ end,
+ State1 = State#{
+ tx_db := TxDb,
+ view_read_vsn := ViewReadVsn
+ },
+ ViewVS = maps:get(view_vs, State1),
+ maybe_set_build_status(TxDb, Mrst, ViewVS, ?INDEX_READY),
+ report_progress(State1, finished)
end).
+is_update_finished(State) ->
+ #{
+ db_seq := DbSeq,
+ last_seq := LastSeq,
+ view_vs := ViewVs
+ } = State,
+ AtDbSeq = LastSeq == DbSeq,
+ AtViewVs = case ViewVs of
+ not_found -> false;
+ _ -> LastSeq == fabric2_fdb:vs_to_seq(ViewVs)
+ end,
+ AtDbSeq orelse AtViewVs.
+
+
maybe_set_build_status(_TxDb, _Mrst1, not_found, _State) ->
ok;
@@ -258,7 +329,7 @@ maybe_set_build_status(TxDb, Mrst1, _ViewVS, State) ->
% In the first iteration of update we need
% to populate our db and view sequences
-get_update_start_state(TxDb, Mrst, #{db_seq := undefined} = State) ->
+get_update_start_state(TxDb, Mrst, #{view_seq := undefined} = State) ->
#{
view_vs := ViewVS,
view_seq := ViewSeq
@@ -266,7 +337,6 @@ get_update_start_state(TxDb, Mrst, #{db_seq := undefined} = State) ->
State#{
tx_db := TxDb,
- db_seq := fabric2_db:get_update_seq(TxDb),
view_vs := ViewVS,
view_seq := ViewSeq,
last_seq := ViewSeq
@@ -278,21 +348,39 @@ get_update_start_state(TxDb, _Idx, State) ->
}.
-fold_changes(State) ->
+fold_changes(Snapshot, State) ->
#{
view_seq := SinceSeq,
- limit := Limit,
- tx_db := TxDb
+ db_seq := DbSeq,
+ limit := Limit
} = State,
+ FoldState = State#{
+ rows_processed := 0
+ },
+
Fun = fun process_changes/2,
- Opts = [{limit, Limit}, {restart_tx, false}],
- fabric2_db:fold_changes(TxDb, SinceSeq, Fun, State, Opts).
+ Opts = [
+ {end_key, fabric2_fdb:seq_to_vs(DbSeq)},
+ {limit, Limit},
+ {restart_tx, false}
+ ],
+
+ case fabric2_db:fold_changes(Snapshot, SinceSeq, Fun, FoldState, Opts) of
+ {ok, #{rows_processed := 0} = FinalState} when Limit > 0 ->
+ % If we read zero rows with a non-zero limit
+ % it means we've caught up to the DbSeq as our
+ % last_seq.
+ {ok, FinalState#{last_seq := DbSeq}};
+ Result ->
+ Result
+ end.
process_changes(Change, Acc) ->
#{
doc_acc := DocAcc,
+ rows_processed := RowsProcessed,
count := Count,
design_opts := DesignOpts,
view_vs := ViewVS
@@ -308,12 +396,14 @@ process_changes(Change, Acc) ->
Acc1 = case {Id, IncludeDesign} of
{<<?DESIGN_DOC_PREFIX, _/binary>>, false} ->
maps:merge(Acc, #{
+ rows_processed => RowsProcessed + 1,
count => Count + 1,
last_seq => LastSeq
});
_ ->
Acc#{
doc_acc := DocAcc ++ [Change],
+ rows_processed := RowsProcessed + 1,
count := Count + 1,
last_seq := LastSeq
}
@@ -511,6 +601,12 @@ check_kv_size_limit(Mrst, Doc, KeyLimit, ValLimit) ->
Doc
catch throw:{size_error, Type} ->
#{id := DocId} = Doc,
+ ?LOG_ERROR(#{
+ what => lists:concat(["oversized_", Type]),
+ db => DbName,
+ docid => DocId,
+ index => IdxName
+ }),
Fmt = "View ~s size error for docid `~s`, excluded from indexing "
"in db `~s` for design doc `~s`",
couch_log:error(Fmt, [Type, DocId, DbName, IdxName]),
@@ -538,7 +634,9 @@ report_progress(State, UpdateType) ->
job_data := JobData,
last_seq := LastSeq,
db_seq := DBSeq,
- changes_done := ChangesDone
+ changes_done := ChangesDone,
+ db_read_vsn := DbReadVsn,
+ view_read_vsn := ViewReadVsn
} = State,
#{
@@ -566,7 +664,9 @@ report_progress(State, UpdateType) ->
<<"ddoc_id">> => DDocId,
<<"sig">> => Sig,
<<"view_seq">> => LastSeq,
- <<"retries">> => Retries
+ <<"retries">> => Retries,
+ <<"db_read_vsn">> => DbReadVsn,
+ <<"view_read_vsn">> => ViewReadVsn
},
NewData = fabric2_active_tasks:update_active_task_info(NewData0,
NewActiveTasks),
@@ -577,6 +677,7 @@ report_progress(State, UpdateType) ->
{ok, Job2} ->
State#{job := Job2};
{error, halt} ->
+ ?LOG_ERROR(#{what => job_halted, job => Job1}),
couch_log:error("~s job halted :: ~w", [?MODULE, Job1]),
exit(normal)
end;
@@ -585,6 +686,7 @@ report_progress(State, UpdateType) ->
ok ->
State;
{error, halt} ->
+ ?LOG_ERROR(#{what => job_halted, job => Job1}),
couch_log:error("~s job halted :: ~w", [?MODULE, Job1]),
exit(normal)
end
@@ -607,3 +709,8 @@ key_size_limit() ->
value_size_limit() ->
config:get_integer("couch_views", "value_size_limit", ?VALUE_SIZE_LIMIT).
+
+
+tx_retry_limit() ->
+ config:get_integer("couch_views", "indexer_tx_retry_limit",
+ ?DEFAULT_TX_RETRY_LIMIT).
diff --git a/src/couch_views/src/couch_views_jobs.erl b/src/couch_views/src/couch_views_jobs.erl
index 4b0aa2660..17f0118b4 100644
--- a/src/couch_views/src/couch_views_jobs.erl
+++ b/src/couch_views/src/couch_views_jobs.erl
@@ -26,7 +26,6 @@
-endif.
--include_lib("couch_mrview/include/couch_mrview.hrl").
-include("couch_views.hrl").
@@ -37,7 +36,7 @@ set_timeout() ->
build_view(TxDb, Mrst, UpdateSeq) ->
{ok, JobId} = build_view_async(TxDb, Mrst),
case wait_for_job(JobId, Mrst#mrst.idx_name, UpdateSeq) of
- ok -> ok;
+ {ok, IdxVStamps} -> {ok, IdxVStamps};
retry -> build_view(TxDb, Mrst, UpdateSeq)
end.
@@ -90,7 +89,7 @@ wait_for_job(JobId, DDocId, UpdateSeq) ->
{ok, finished, Data} ->
case Data of
#{<<"view_seq">> := ViewSeq} when ViewSeq >= UpdateSeq ->
- ok;
+ {ok, idx_vstamps(Data)};
_ ->
retry
end
@@ -117,18 +116,24 @@ wait_for_job(JobId, Subscription, DDocId, UpdateSeq) ->
{finished, #{<<"error">> := Error, <<"reason">> := Reason}} ->
couch_jobs:remove(undefined, ?INDEX_JOB_TYPE, JobId),
erlang:error({binary_to_existing_atom(Error, latin1), Reason});
- {finished, #{<<"view_seq">> := ViewSeq}} when ViewSeq >= UpdateSeq ->
- ok;
+ {finished, #{<<"view_seq">> := ViewSeq} = JobData}
+ when ViewSeq >= UpdateSeq ->
+ {ok, idx_vstamps(JobData)};
{finished, _} ->
wait_for_job(JobId, DDocId, UpdateSeq);
- {_State, #{<<"view_seq">> := ViewSeq}} when ViewSeq >= UpdateSeq ->
- couch_jobs:unsubscribe(Subscription),
- ok;
{_, _} ->
wait_for_job(JobId, Subscription, DDocId, UpdateSeq)
end.
+idx_vstamps(#{} = JobData) ->
+ #{
+ <<"db_read_vsn">> := DbReadVsn,
+ <<"view_read_vsn">> := ViewReadVsn
+ } = JobData,
+ {DbReadVsn, ViewReadVsn}.
+
+
job_id(#{name := DbName}, #mrst{sig = Sig}) ->
job_id(DbName, Sig);
diff --git a/src/couch_views/src/couch_views_reader.erl b/src/couch_views/src/couch_views_reader.erl
index 3c5862749..ae7a3c393 100644
--- a/src/couch_views/src/couch_views_reader.erl
+++ b/src/couch_views/src/couch_views_reader.erl
@@ -13,25 +13,28 @@
-module(couch_views_reader).
-export([
- read/6
+ read/7
]).
-include("couch_views.hrl").
-include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-include_lib("fabric/include/fabric2.hrl").
-read(Db, Mrst, ViewName, UserCallback, UserAcc, Args) ->
+-define(LOAD_DOC_TIMEOUT_MSEC, 10000).
+
+
+read(Db, Mrst, ViewName, UserCallback, UserAcc, Args, DbReadVsn) ->
ReadFun = case Args of
- #mrargs{view_type = map} -> fun read_map_view/6;
- #mrargs{view_type = red} -> fun read_red_view/6
+ #mrargs{view_type = map} -> fun read_map_view/7;
+ #mrargs{view_type = red} -> fun read_red_view/7
end,
- ReadFun(Db, Mrst, ViewName, UserCallback, UserAcc, Args).
+ ReadFun(Db, Mrst, ViewName, UserCallback, UserAcc, Args, DbReadVsn).
-read_map_view(Db, Mrst0, ViewName, UserCallback, UserAcc0, Args) ->
+read_map_view(Db, Mrst0, ViewName, UserCallback, UserAcc0, Args, DbReadVsn) ->
+ DocLoader = maybe_start_doc_loader(Db, DbReadVsn),
try
fabric2_fdb:transactional(Db, fun(TxDb) ->
#mrst{
@@ -51,7 +54,8 @@ read_map_view(Db, Mrst0, ViewName, UserCallback, UserAcc0, Args) ->
limit => Args#mrargs.limit,
mrargs => undefined,
callback => UserCallback,
- acc => UserAcc1
+ acc => UserAcc1,
+ doc_loader => DocLoader
},
Acc1 = lists:foldl(fun(KeyArgs, KeyAcc0) ->
@@ -73,10 +77,12 @@ read_map_view(Db, Mrst0, ViewName, UserCallback, UserAcc0, Args) ->
{ok, Final};
throw:{done, Out} ->
{ok, Out}
+ after
+ stop_doc_loader(DocLoader)
end.
-read_red_view(Db, Mrst0, ViewName, UserCallback, UserAcc0, Args) ->
+read_red_view(Db, Mrst0, ViewName, UserCallback, UserAcc0, Args, _DbReadVsn) ->
#mrst{
language = Lang,
views = Views
@@ -179,7 +185,8 @@ handle_map_row(DocId, Key, Value, Acc) ->
limit := Limit,
mrargs := Args,
callback := UserCallback,
- acc := UserAcc0
+ acc := UserAcc0,
+ doc_loader := DocLoader
} = Acc,
BaseRow = [
@@ -196,7 +203,7 @@ handle_map_row(DocId, Key, Value, Acc) ->
end,
{TargetDocId, Rev} = get_doc_id(DocId, Value),
- DocObj = load_doc(TxDb, TargetDocId, Rev, DocOpts1),
+ DocObj = load_doc(TxDb, TargetDocId, Rev, DocOpts1, DocLoader),
[{doc, DocObj}]
end,
@@ -237,19 +244,27 @@ maybe_finalize(Finalizer, Red) ->
get_map_view(Lang, Args, ViewName, Views) ->
- case couch_mrview_util:extract_view(Lang, Args, ViewName, Views) of
+ case couch_views_util:extract_view(Lang, Args, ViewName, Views) of
{map, View, _Args} -> View;
{red, {_Idx, _Lang, View}, _} -> View
end.
get_red_view(Lang, Args, ViewName, Views) ->
- case couch_mrview_util:extract_view(Lang, Args, ViewName, Views) of
- {red, {Idx, Lang, View}, _} -> {Idx, Lang, View};
+ case couch_views_util:extract_view(Lang, Args, ViewName, Views) of
+ {red, {Idx, Lang, View}, _} -> check_red_enabled({Idx, Lang, View});
_ -> throw({not_found, missing_named_view})
end.
+check_red_enabled({Idx, _Lang, View} = Resp) ->
+ case lists:nth(Idx, View#mrview.reduce_funs) of
+ {_, disabled} ->
+ throw({disabled, <<"Custom reduce functions are disabled.">>});
+ _ ->
+ Resp
+ end.
+
expand_keys_args(#mrargs{keys = undefined} = Args) ->
[Args];
@@ -332,6 +347,19 @@ get_doc_id(Id, _Value) ->
{Id, null}.
+load_doc(TxDb, Id, Rev, DocOpts, undefined) ->
+ load_doc(TxDb, Id, Rev, DocOpts);
+
+load_doc(_TxDb, Id, Rev, DocOpts, DocLoader) when is_pid(DocLoader) ->
+ DocLoader ! {load_doc, Id, Rev, DocOpts},
+ receive
+ {load_doc_res, Result} -> Result
+ after
+ ?LOAD_DOC_TIMEOUT_MSEC ->
+ error(load_doc_timeout)
+ end.
+
+
load_doc(TxDb, Id, null, DocOpts) ->
case fabric2_db:open_doc(TxDb, Id, DocOpts) of
{ok, Doc} -> couch_doc:to_json_obj(Doc, DocOpts);
@@ -344,3 +372,38 @@ load_doc(TxDb, Id, Rev, DocOpts) ->
{ok, [{ok, Doc}]} -> couch_doc:to_json_obj(Doc, DocOpts);
{ok, [_Else]} -> null
end.
+
+
+
+% When reading doc bodies at the db version at which the indexer
+% observed them, need to use a separate process since the process dict
+% is used to hold some of the transaction metadata.
+%
+maybe_start_doc_loader(_Db, ?VIEW_CURRENT_VSN) ->
+ undefined;
+
+maybe_start_doc_loader(Db0, DbReadVsn) ->
+ Parent = self(),
+ Db = Db0#{tx := undefined},
+ spawn_link(fun() ->
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ erlfdb:set_read_version(maps:get(tx, TxDb), DbReadVsn),
+ doc_loader_loop(TxDb, Parent)
+ end)
+ end).
+
+
+stop_doc_loader(undefined) ->
+ ok;
+
+stop_doc_loader(Pid) when is_pid(Pid) ->
+ unlink(Pid),
+ exit(Pid, kill).
+
+
+doc_loader_loop(TxDb, Parent) ->
+ receive
+ {load_doc, Id, Rev, DocOpts} ->
+ Parent ! {load_doc_res, load_doc(TxDb, Id, Rev, DocOpts)},
+ doc_loader_loop(TxDb, Parent)
+ end.
diff --git a/src/couch_views/src/couch_views_server.erl b/src/couch_views/src/couch_views_server.erl
index 71a4abb8d..e94eaf170 100644
--- a/src/couch_views/src/couch_views_server.erl
+++ b/src/couch_views/src/couch_views_server.erl
@@ -15,6 +15,7 @@
-behaviour(gen_server).
+-include_lib("kernel/include/logger.hrl").
-export([
start_link/0
@@ -75,6 +76,7 @@ handle_call({accepted, Pid}, _From, St) ->
},
{reply, ok, spawn_acceptors(St1)};
false ->
+ ?LOG_ERROR(#{what => unknown_acceptor, pid => Pid}),
LogMsg = "~p : unknown acceptor processs ~p",
couch_log:error(LogMsg, [?MODULE, Pid]),
{stop, {unknown_acceptor_pid, Pid}, St}
@@ -127,6 +129,7 @@ format_status(_Opt, [_PDict, State]) ->
handle_acceptor_exit(#{acceptors := Acceptors} = St, Pid, Reason) ->
St1 = St#{acceptors := maps:remove(Pid, Acceptors)},
+ ?LOG_ERROR(#{what => acceptor_crash, pid => Pid, reason => Reason}),
LogMsg = "~p : acceptor process ~p exited with ~p",
couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
{noreply, spawn_acceptors(St1)}.
@@ -138,12 +141,14 @@ handle_worker_exit(#{workers := Workers} = St, Pid, normal) ->
handle_worker_exit(#{workers := Workers} = St, Pid, Reason) ->
St1 = St#{workers := maps:remove(Pid, Workers)},
+ ?LOG_ERROR(#{what => indexer_crash, pid => Pid, reason => Reason}),
LogMsg = "~p : indexer process ~p exited with ~p",
couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
{noreply, spawn_acceptors(St1)}.
handle_unknown_exit(St, Pid, Reason) ->
+ ?LOG_ERROR(#{what => unknown_process_crash, pid => Pid, reason => Reason}),
LogMsg = "~p : unknown process ~p exited with ~p",
couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
{stop, {unknown_pid_exit, Pid}, St}.
diff --git a/src/couch_views/src/couch_views_sup.erl b/src/couch_views/src/couch_views_sup.erl
index 94531893d..ee32d4e9f 100644
--- a/src/couch_views/src/couch_views_sup.erl
+++ b/src/couch_views/src/couch_views_sup.erl
@@ -16,6 +16,7 @@
-behaviour(supervisor).
+-include_lib("kernel/include/logger.hrl").
-export([
start_link/0
@@ -46,6 +47,7 @@ init(normal) ->
{ok, {flags(), Children}};
init(builds_disabled) ->
+ ?LOG_NOTICE(#{what => view_indexing_disabled}),
couch_log:notice("~p : view_indexing disabled", [?MODULE]),
couch_views_jobs:set_timeout(),
{ok, {flags(), []}}.
diff --git a/src/couch_views/src/couch_views_trees.erl b/src/couch_views/src/couch_views_trees.erl
index b45750be9..9aafbb276 100644
--- a/src/couch_views/src/couch_views_trees.erl
+++ b/src/couch_views/src/couch_views_trees.erl
@@ -32,7 +32,6 @@
-include("couch_views.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-include_lib("fabric/include/fabric2.hrl").
@@ -144,15 +143,8 @@ fold_red_idx(TxDb, View, Idx, Options, Callback, Acc0) ->
Callback(GroupKey, RedValue, WAcc)
end,
- case {GroupKeyFun, Dir} of
- {group_all, fwd} ->
- EBtreeOpts = [
- {dir, fwd},
- {inclusive_end, InclusiveEnd}
- ],
- Reduction = ebtree:reduce(Tx, Btree, StartKey, EndKey, EBtreeOpts),
- Wrapper({null, Reduction}, Acc0);
- {F, fwd} when is_function(F) ->
+ case Dir of
+ fwd ->
EBtreeOpts = [
{dir, fwd},
{inclusive_end, InclusiveEnd}
@@ -167,16 +159,7 @@ fold_red_idx(TxDb, View, Idx, Options, Callback, Acc0) ->
Acc0,
EBtreeOpts
);
- {group_all, rev} ->
- % Start/End keys swapped on purpose because ebtree. Also
- % inclusive_start for same reason.
- EBtreeOpts = [
- {dir, rev},
- {inclusive_start, InclusiveEnd}
- ],
- Reduction = ebtree:reduce(Tx, Btree, EndKey, StartKey, EBtreeOpts),
- Wrapper({null, Reduction}, Acc0);
- {F, rev} when is_function(F) ->
+ rev ->
% Start/End keys swapped on purpose because ebtree. Also
% inclusive_start for same reason.
EBtreeOpts = [
@@ -245,7 +228,7 @@ open_id_tree(TxDb, Sig) ->
Prefix = id_tree_prefix(DbPrefix, Sig),
TreeOpts = [
{persist_fun, fun couch_views_fdb:persist_chunks/3},
- {cache_fun, create_cache_fun(id_tree)}
+ {encode_fun, create_encode_fun(TxDb)}
],
ebtree:open(Tx, Prefix, get_order(id_btree), TreeOpts).
@@ -261,7 +244,8 @@ open_view_tree(TxDb, Sig, Lang, View, Options) ->
Prefix = view_tree_prefix(DbPrefix, Sig, ViewId),
BaseOpts = [
{collate_fun, couch_views_util:collate_fun(View)},
- {persist_fun, fun couch_views_fdb:persist_chunks/3}
+ {persist_fun, fun couch_views_fdb:persist_chunks/3},
+ {encode_fun, create_encode_fun(TxDb)}
],
ExtraOpts = case lists:keyfind(read_only, 1, Options) of
{read_only, Idx} ->
@@ -269,8 +253,7 @@ open_view_tree(TxDb, Sig, Lang, View, Options) ->
[{reduce_fun, RedFun}];
false ->
[
- {reduce_fun, make_reduce_fun(Lang, View)},
- {cache_fun, create_cache_fun({view, ViewId})}
+ {reduce_fun, make_reduce_fun(Lang, View)}
]
end,
TreeOpts = BaseOpts ++ ExtraOpts,
@@ -295,9 +278,6 @@ min_order(V) ->
make_read_only_reduce_fun(Lang, View, NthRed) ->
RedFuns = [Src || {_, Src} <- View#mrview.reduce_funs],
- if RedFuns /= [] -> ok; true ->
- io:format(standard_error, "~p~n", [process_info(self(), current_stacktrace)])
- end,
LPad = lists:duplicate(NthRed - 1, []),
RPad = lists:duplicate(length(RedFuns) - NthRed, []),
FunSrc = lists:nth(NthRed, RedFuns),
@@ -323,7 +303,7 @@ make_read_only_reduce_fun(Lang, View, NthRed) ->
make_reduce_fun(Lang, #mrview{} = View) ->
- RedFuns = [Src || {_, Src} <- View#mrview.reduce_funs],
+ RedFuns = [Src || {_, Src} <- View#mrview.reduce_funs, Src /= disabled],
fun
(KVs0, _ReReduce = false) ->
KVs1 = expand_dupes(KVs0),
@@ -350,27 +330,14 @@ make_reduce_fun(Lang, #mrview{} = View) ->
end.
-create_cache_fun(TreeId) ->
- CacheTid = case get(TreeId) of
- undefined ->
- Tid = ets:new(?MODULE, [protected, set]),
- put(TreeId, {ebtree_cache, Tid}),
- Tid;
- {ebtree_cache, Tid} ->
- Tid
- end,
+create_encode_fun(TxDb) ->
fun
- (set, [Id, Node]) ->
- true = ets:insert_new(CacheTid, {Id, Node}),
- ok;
- (clear, Id) ->
- ets:delete(CacheTid, Id),
- ok;
- (get, Id) ->
- case ets:lookup(CacheTid, Id) of
- [{Id, Node}] -> Node;
- [] -> undefined
- end
+ (encode, Key, Term) ->
+ Bin = term_to_binary(Term, [compressed, {minor_version, 2}]),
+ aegis:encrypt(TxDb, Key, Bin);
+ (decode, Key, Ciphertext) ->
+ Bin = aegis:decrypt(TxDb, Key, Ciphertext),
+ binary_to_term(Bin, [safe])
end.
@@ -404,8 +371,9 @@ to_red_opts(Options) ->
{Dir, StartKey, EndKey, InclusiveEnd} = to_map_opts(Options),
GroupKeyFun = case lists:keyfind(group_key_fun, 1, Options) of
+ {group_key_fun, group_all} -> fun({_Key, _DocId}) -> null end;
{group_key_fun, GKF} -> GKF;
- false -> fun({_Key, _DocId}) -> global_group end
+ false -> fun({_Key, _DocId}) -> null end
end,
{Dir, StartKey, EndKey, InclusiveEnd, GroupKeyFun}.
diff --git a/src/couch_views/src/couch_views_updater.erl b/src/couch_views/src/couch_views_updater.erl
index 7e5466eb8..f88c5a23d 100644
--- a/src/couch_views/src/couch_views_updater.erl
+++ b/src/couch_views/src/couch_views_updater.erl
@@ -17,7 +17,8 @@
-include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
+-include_lib("kernel/include/logger.hrl").
% If the doc revision doesn't not match the NewRevId passed here we can ignore
% the document since it is then a conflict document and it doesn't need
@@ -31,14 +32,27 @@ index(Db, #doc{id = Id, revs = Revs} = Doc, _NewWinner, _OldWinner, NewRevId,
index_int(Db, Doc, Seq)
end
catch
- error:{erlfdb_error, ErrCode} when is_integer(ErrCode) ->
- Stack = erlang:get_stacktrace(),
+ error:{erlfdb_error, ErrCode}:Stack when is_integer(ErrCode) ->
DbName = fabric2_db:name(Db),
+ ?LOG_ERROR(#{
+ what => mango_index_update,
+ status => erlfdb_error,
+ details => ErrCode,
+ db => DbName,
+ docid => Id
+ }),
couch_log:error("Mango index erlfdb error Db ~s Doc ~p ~p",
[DbName, Id, ErrCode]),
erlang:raise(error, {erlfdb_error, ErrCode}, Stack);
Error:Reason ->
DbName = fabric2_db:name(Db),
+ ?LOG_ERROR(#{
+ what => mango_index_update,
+ status => Error,
+ details => Reason,
+ db => DbName,
+ docid => Id
+ }),
couch_log:error("Mango index error for Db ~s Doc ~p ~p ~p",
[DbName, Id, Error, Reason])
end.
@@ -52,7 +66,7 @@ index_int(Db, #doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>,
case couch_views_ddoc:is_interactive(DDoc) of
true ->
- {ok, Mrst} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
+ {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
case couch_views_fdb:get_creation_vs(Db, Mrst) of
not_found ->
couch_views_fdb:new_interactive_index(Db, Mrst, Seq),
@@ -87,7 +101,7 @@ write_doc(Db, #doc{deleted = Deleted} = Doc) ->
},
lists:foreach(fun(DDoc) ->
- {ok, Mrst0} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
+ {ok, Mrst0} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
Mrst1 = couch_views_trees:open(Db, Mrst0),
case should_index_doc(Doc, Mrst1) of
diff --git a/src/couch_views/src/couch_views_util.erl b/src/couch_views/src/couch_views_util.erl
index 1e3e4beef..63f5daafc 100644
--- a/src/couch_views/src/couch_views_util.erl
+++ b/src/couch_views/src/couch_views_util.erl
@@ -19,13 +19,18 @@
validate_args/1,
validate_args/2,
is_paginated/1,
- active_tasks_info/5
+ active_tasks_info/5,
+ set_view_type/3,
+ set_extra/3,
+ get_view_queries/1,
+ get_view_keys/1,
+ extract_view/4
]).
-include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-include("couch_views.hrl").
+-include_lib("kernel/include/logger.hrl").
ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
@@ -52,6 +57,12 @@ ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
DictBySrcAcc
end;
({Name, Else}, DictBySrcAcc) ->
+ ?LOG_ERROR(#{
+ what => invalid_view_definition,
+ db => DbName,
+ ddoc => Id,
+ view => Name
+ }),
couch_log:error("design_doc_to_view_group ~s views ~p",
[Name, Else]),
DictBySrcAcc
@@ -65,7 +76,8 @@ ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
NumViews = fun({_, View}, N) ->
{View#mrview{id_num = N}, N+1}
end,
- {Views, _} = lists:mapfoldl(NumViews, 0, lists:sort(dict:to_list(BySrc))),
+ {Views0, _} = lists:mapfoldl(NumViews, 0, lists:sort(dict:to_list(BySrc))),
+ Views1 = maybe_disable_custom_reduce_funs(Views0),
Language = couch_util:get_value(<<"language">>, Fields, <<"javascript">>),
Lib = couch_util:get_value(<<"lib">>, RawViews, {[]}),
@@ -74,15 +86,58 @@ ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
db_name=DbName,
idx_name=Id,
lib=Lib,
- views=Views,
+ views=Views1,
language=Language,
design_opts=DesignOpts,
partitioned=Partitioned
},
- SigInfo = {Views, Language, DesignOpts, couch_index_util:sort_lib(Lib)},
+ SigInfo = {Views1, Language, DesignOpts, sort_lib(Lib)},
{ok, IdxState#mrst{sig=couch_hash:md5_hash(term_to_binary(SigInfo))}}.
+set_view_type(_Args, _ViewName, []) ->
+ throw({not_found, missing_named_view});
+
+set_view_type(Args, ViewName, [View | Rest]) ->
+ RedNames = [N || {N, _} <- View#mrview.reduce_funs],
+ case lists:member(ViewName, RedNames) of
+ true ->
+ case Args#mrargs.reduce of
+ false -> Args#mrargs{view_type=map};
+ _ -> Args#mrargs{view_type=red}
+ end;
+ false ->
+ case lists:member(ViewName, View#mrview.map_names) of
+ true -> Args#mrargs{view_type=map};
+ false -> set_view_type(Args, ViewName, Rest)
+ end
+ end.
+
+
+set_extra(#mrargs{} = Args, Key, Value) ->
+ Extra0 = Args#mrargs.extra,
+ Extra1 = lists:ukeysort(1, [{Key, Value} | Extra0]),
+ Args#mrargs{extra = Extra1}.
+
+
+extract_view(_Lang, _Args, _ViewName, []) ->
+ throw({not_found, missing_named_view});
+
+extract_view(Lang, #mrargs{view_type=map}=Args, Name, [View | Rest]) ->
+ Names = View#mrview.map_names ++ [N || {N, _} <- View#mrview.reduce_funs],
+ case lists:member(Name, Names) of
+ true -> {map, View, Args};
+ _ -> extract_view(Lang, Args, Name, Rest)
+ end;
+
+extract_view(Lang, #mrargs{view_type=red}=Args, Name, [View | Rest]) ->
+ RedNames = [N || {N, _} <- View#mrview.reduce_funs],
+ case lists:member(Name, RedNames) of
+ true -> {red, {index_of(Name, RedNames), Lang, View}, Args};
+ false -> extract_view(Lang, Args, Name, Rest)
+ end.
+
+
collate_fun(View) ->
#mrview{
options = Options
@@ -121,7 +176,7 @@ validate_args(Args) ->
validate_args(Args, []).
-% This is mostly a copy of couch_mrview_util:validate_args/1 but it doesn't
+% This is mostly a copy of couch_validate:validate_args/1 but it doesn't
% update start / end keys and also throws a not_implemented error for reduce
%
validate_args(#mrargs{} = Args, Opts) ->
@@ -327,6 +382,33 @@ active_tasks_info(ChangesDone, DbName, DDocId, LastSeq, DBSeq) ->
}.
+maybe_disable_custom_reduce_funs(Views) ->
+ case config:get_boolean("couch_views", "custom_reduce_enabled", true) of
+ true ->
+ Views;
+ false ->
+ disable_custom_reduce_funs(Views)
+ end.
+
+
+disable_custom_reduce_funs(Views) ->
+ lists:map(fun(View) ->
+ #mrview{
+ reduce_funs = ReduceFuns
+ } = View,
+ {Builtin, Custom} = lists:partition(fun({_Name, RedSrc}) ->
+ case RedSrc of
+ <<"_", _/binary>> -> true;
+ <<_/binary>> -> false
+ end
+ end, ReduceFuns),
+ DisabledCustom = [{Name, disabled} || {Name, _Src} <- Custom],
+ View#mrview{
+ reduce_funs = Builtin ++ DisabledCustom
+ }
+ end, Views).
+
+
convert_seq_to_stamp(<<"0">>) ->
<<"0-0-0">>;
@@ -338,3 +420,53 @@ convert_seq_to_stamp(Seq) ->
VS = integer_to_list(Stamp) ++ "-" ++ integer_to_list(Batch) ++ "-"
++ integer_to_list(DocNumber),
list_to_binary(VS).
+
+
+get_view_queries({Props}) ->
+ case couch_util:get_value(<<"queries">>, Props) of
+ undefined ->
+ undefined;
+ Queries when is_list(Queries) ->
+ Queries;
+ _ ->
+ throw({bad_request, "`queries` member must be an array."})
+ end.
+
+
+get_view_keys({Props}) ->
+ case couch_util:get_value(<<"keys">>, Props) of
+ undefined ->
+ undefined;
+ Keys when is_list(Keys) ->
+ Keys;
+ _ ->
+ throw({bad_request, "`keys` member must be an array."})
+ end.
+
+
+sort_lib({Lib}) ->
+ sort_lib(Lib, []).
+
+sort_lib([], LAcc) ->
+ lists:keysort(1, LAcc);
+
+sort_lib([{LName, {LObj}}|Rest], LAcc) ->
+ LSorted = sort_lib(LObj, []), % descend into nested object
+ sort_lib(Rest, [{LName, LSorted}|LAcc]);
+
+sort_lib([{LName, LCode}|Rest], LAcc) ->
+ sort_lib(Rest, [{LName, LCode}|LAcc]).
+
+
+index_of(Key, List) ->
+ index_of(Key, List, 1).
+
+
+index_of(_, [], _) ->
+ throw({error, missing_named_view});
+
+index_of(Key, [Key | _], Idx) ->
+ Idx;
+
+index_of(Key, [_ | Rest], Idx) ->
+ index_of(Key, Rest, Idx+1).
diff --git a/src/couch_views/src/couch_views_validate.erl b/src/couch_views/src/couch_views_validate.erl
new file mode 100644
index 000000000..558f65d1b
--- /dev/null
+++ b/src/couch_views/src/couch_views_validate.erl
@@ -0,0 +1,460 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_validate).
+
+
+-export([
+ validate_args/1,
+ validate_args/3,
+ validate_ddoc/2
+]).
+
+
+-define(LOWEST_KEY, null).
+-define(HIGHEST_KEY, {<<255, 255, 255, 255>>}).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include("couch_views.hrl").
+
+
+% There is another almost identical validate_args in couch_views_util. They
+% should probably be merged at some point in the future.
+%
+validate_args(Args) ->
+ GroupLevel = determine_group_level(Args),
+ Reduce = Args#mrargs.reduce,
+ case Reduce == undefined orelse is_boolean(Reduce) of
+ true -> ok;
+ _ -> mrverror(<<"Invalid `reduce` value.">>)
+ end,
+
+ case {Args#mrargs.view_type, Reduce} of
+ {map, true} -> mrverror(<<"Reduce is invalid for map-only views.">>);
+ _ -> ok
+ end,
+
+ case {Args#mrargs.view_type, GroupLevel, Args#mrargs.keys} of
+ {red, exact, _} -> ok;
+ {red, _, KeyList} when is_list(KeyList) ->
+ Msg = <<"Multi-key fetches for reduce views must use `group=true`">>,
+ mrverror(Msg);
+ _ -> ok
+ end,
+
+ case Args#mrargs.keys of
+ Keys when is_list(Keys) -> ok;
+ undefined -> ok;
+ _ -> mrverror(<<"`keys` must be an array of strings.">>)
+ end,
+
+ case {Args#mrargs.keys, Args#mrargs.start_key,
+ Args#mrargs.end_key} of
+ {undefined, _, _} -> ok;
+ {[], _, _} -> ok;
+ {[_|_], undefined, undefined} -> ok;
+ _ -> mrverror(<<"`keys` is incompatible with `key`"
+ ", `start_key` and `end_key`">>)
+ end,
+
+ case Args#mrargs.start_key_docid of
+ undefined -> ok;
+ SKDocId0 when is_binary(SKDocId0) -> ok;
+ _ -> mrverror(<<"`start_key_docid` must be a string.">>)
+ end,
+
+ case Args#mrargs.end_key_docid of
+ undefined -> ok;
+ EKDocId0 when is_binary(EKDocId0) -> ok;
+ _ -> mrverror(<<"`end_key_docid` must be a string.">>)
+ end,
+
+ case Args#mrargs.direction of
+ fwd -> ok;
+ rev -> ok;
+ _ -> mrverror(<<"Invalid direction.">>)
+ end,
+
+ case {Args#mrargs.limit >= 0, Args#mrargs.limit == undefined} of
+ {true, _} -> ok;
+ {_, true} -> ok;
+ _ -> mrverror(<<"`limit` must be a positive integer.">>)
+ end,
+
+ case Args#mrargs.skip < 0 of
+ true -> mrverror(<<"`skip` must be >= 0">>);
+ _ -> ok
+ end,
+
+ case {Args#mrargs.view_type, GroupLevel} of
+ {red, exact} -> ok;
+ {_, 0} -> ok;
+ {red, Int} when is_integer(Int), Int >= 0 -> ok;
+ {red, _} -> mrverror(<<"`group_level` must be >= 0">>);
+ {map, _} -> mrverror(<<"Invalid use of grouping on a map view.">>)
+ end,
+
+ case Args#mrargs.stable of
+ true -> ok;
+ false -> ok;
+ _ -> mrverror(<<"Invalid value for `stable`.">>)
+ end,
+
+ case Args#mrargs.update of
+ true -> ok;
+ false -> ok;
+ lazy -> ok;
+ _ -> mrverror(<<"Invalid value for `update`.">>)
+ end,
+
+ case is_boolean(Args#mrargs.inclusive_end) of
+ true -> ok;
+ _ -> mrverror(<<"Invalid value for `inclusive_end`.">>)
+ end,
+
+ case {Args#mrargs.view_type, Args#mrargs.include_docs} of
+ {red, true} -> mrverror(<<"`include_docs` is invalid for reduce">>);
+ {_, ID} when is_boolean(ID) -> ok;
+ _ -> mrverror(<<"Invalid value for `include_docs`">>)
+ end,
+
+ case {Args#mrargs.view_type, Args#mrargs.conflicts} of
+ {_, undefined} -> ok;
+ {map, V} when is_boolean(V) -> ok;
+ {red, undefined} -> ok;
+ {map, _} -> mrverror(<<"Invalid value for `conflicts`.">>);
+ {red, _} -> mrverror(<<"`conflicts` is invalid for reduce views.">>)
+ end,
+
+ SKDocId = case {Args#mrargs.direction, Args#mrargs.start_key_docid} of
+ {fwd, undefined} -> <<>>;
+ {rev, undefined} -> <<255>>;
+ {_, SKDocId1} -> SKDocId1
+ end,
+
+ EKDocId = case {Args#mrargs.direction, Args#mrargs.end_key_docid} of
+ {fwd, undefined} -> <<255>>;
+ {rev, undefined} -> <<>>;
+ {_, EKDocId1} -> EKDocId1
+ end,
+
+ case is_boolean(Args#mrargs.sorted) of
+ true -> ok;
+ _ -> mrverror(<<"Invalid value for `sorted`.">>)
+ end,
+
+ Args#mrargs{
+ start_key_docid=SKDocId,
+ end_key_docid=EKDocId,
+ group_level=GroupLevel
+ }.
+
+
+validate_args(Db, DDoc, Args0) ->
+ {ok, State} = couch_views_util:ddoc_to_mrst(fabric2_db:name(Db), DDoc),
+ Args1 = apply_limit(State#mrst.partitioned, Args0),
+ validate_args(State, Args1).
+
+
+validate_ddoc(#{} = Db, DDoc) ->
+ DbName = fabric2_db:name(Db),
+ IsPartitioned = fabric2_db:is_partitioned(Db),
+ validate_ddoc(DbName, IsPartitioned, DDoc).
+
+
+% Private functions
+
+validate_ddoc(DbName, _IsDbPartitioned, DDoc) ->
+ ok = validate_ddoc_fields(DDoc#doc.body),
+ GetName = fun
+ (#mrview{map_names = [Name | _]}) -> Name;
+ (#mrview{reduce_funs = [{Name, _} | _]}) -> Name;
+ (_) -> null
+ end,
+ ValidateView = fun(Ctx, #mrview{def=MapSrc, reduce_funs=Reds}=View) ->
+ couch_eval:try_compile(Ctx, map, GetName(View), MapSrc),
+ lists:foreach(fun
+ ({_RedName, <<"_sum", _/binary>>}) ->
+ ok;
+ ({_RedName, <<"_count", _/binary>>}) ->
+ ok;
+ ({_RedName, <<"_stats", _/binary>>}) ->
+ ok;
+ ({_RedName, <<"_approx_count_distinct", _/binary>>}) ->
+ ok;
+ ({_RedName, <<"_", _/binary>> = Bad}) ->
+ Msg = ["`", Bad, "` is not a supported reduce function."],
+ throw({invalid_design_doc, Msg});
+ ({RedName, RedSrc}) ->
+ couch_eval:try_compile(Ctx, reduce, RedName, RedSrc)
+ end, Reds)
+ end,
+ {ok, #mrst{
+ language = Lang,
+ views = Views
+ }} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+
+ Views =/= [] andalso couch_eval:with_context(#{language => Lang}, fun (Ctx) ->
+ lists:foreach(fun(V) -> ValidateView(Ctx, V) end, Views)
+ end),
+ ok.
+
+
+validate_args(#mrst{} = State, Args0) ->
+ Args = validate_args(Args0),
+
+ ViewPartitioned = State#mrst.partitioned,
+ Partition = get_extra(Args, partition),
+
+ case {ViewPartitioned, Partition} of
+ {true, undefined} ->
+ Msg1 = <<"`partition` parameter is mandatory "
+ "for queries to this view.">>,
+ mrverror(Msg1);
+ {true, _} ->
+ apply_partition(Args, Partition);
+ {false, undefined} ->
+ Args;
+ {false, Value} when is_binary(Value) ->
+ Msg2 = <<"`partition` parameter is not "
+ "supported in this design doc">>,
+ mrverror(Msg2)
+ end.
+
+
+validate_ddoc_fields(DDoc) ->
+ MapFuncType = map_function_type(DDoc),
+ lists:foreach(fun(Path) ->
+ validate_ddoc_fields(DDoc, Path)
+ end, [
+ [{<<"filters">>, object}, {any, [object, string]}],
+ [{<<"language">>, string}],
+ [{<<"lists">>, object}, {any, [object, string]}],
+ [{<<"options">>, object}],
+ [{<<"options">>, object}, {<<"include_design">>, boolean}],
+ [{<<"options">>, object}, {<<"local_seq">>, boolean}],
+ [{<<"options">>, object}, {<<"partitioned">>, boolean}],
+ [{<<"rewrites">>, [string, array]}],
+ [{<<"shows">>, object}, {any, [object, string]}],
+ [{<<"updates">>, object}, {any, [object, string]}],
+ [{<<"validate_doc_update">>, string}],
+ [{<<"views">>, object}, {<<"lib">>, object}],
+ [{<<"views">>, object}, {any, object}, {<<"map">>, MapFuncType}],
+ [{<<"views">>, object}, {any, object}, {<<"reduce">>, string}]
+ ]),
+ require_map_function_for_views(DDoc),
+ ok.
+
+
+require_map_function_for_views({Props}) ->
+ case couch_util:get_value(<<"views">>, Props) of
+ undefined -> ok;
+ {Views} ->
+ lists:foreach(fun
+ ({<<"lib">>, _}) -> ok;
+ ({Key, {Value}}) ->
+ case couch_util:get_value(<<"map">>, Value) of
+ undefined -> throw({invalid_design_doc,
+ <<"View `", Key/binary, "` must contain map function">>});
+ _ -> ok
+ end
+ end, Views),
+ ok
+ end.
+
+
+validate_ddoc_fields(DDoc, Path) ->
+ case validate_ddoc_fields(DDoc, Path, []) of
+ ok -> ok;
+ {error, {FailedPath0, Type0}} ->
+ FailedPath = iolist_to_binary(join(FailedPath0, <<".">>)),
+ Type = format_type(Type0),
+ throw({invalid_design_doc,
+ <<"`", FailedPath/binary, "` field must have ",
+ Type/binary, " type">>})
+ end.
+
+validate_ddoc_fields(undefined, _, _) ->
+ ok;
+
+validate_ddoc_fields(_, [], _) ->
+ ok;
+
+validate_ddoc_fields({KVS}=Props, [{any, Type} | Rest], Acc) ->
+ lists:foldl(fun
+ ({Key, _}, ok) ->
+ validate_ddoc_fields(Props, [{Key, Type} | Rest], Acc);
+ ({_, _}, {error, _}=Error) ->
+ Error
+ end, ok, KVS);
+
+validate_ddoc_fields({KVS}=Props, [{Key, Type} | Rest], Acc) ->
+ case validate_ddoc_field(Props, {Key, Type}) of
+ ok ->
+ validate_ddoc_fields(couch_util:get_value(Key, KVS),
+ Rest,
+ [Key | Acc]);
+ error ->
+ {error, {[Key | Acc], Type}};
+ {error, Key1} ->
+ {error, {[Key1 | Acc], Type}}
+ end.
+
+
+validate_ddoc_field(undefined, Type) when is_atom(Type) ->
+ ok;
+
+validate_ddoc_field(_, any) ->
+ ok;
+
+validate_ddoc_field(Value, Types) when is_list(Types) ->
+ lists:foldl(fun
+ (_, ok) -> ok;
+ (Type, _) -> validate_ddoc_field(Value, Type)
+ end, error, Types);
+validate_ddoc_field(Value, string) when is_binary(Value) ->
+ ok;
+
+validate_ddoc_field(Value, array) when is_list(Value) ->
+ ok;
+
+validate_ddoc_field({Value}, object) when is_list(Value) ->
+ ok;
+
+validate_ddoc_field(Value, boolean) when is_boolean(Value) ->
+ ok;
+
+validate_ddoc_field({Props}, {any, Type}) ->
+ validate_ddoc_field1(Props, Type);
+
+validate_ddoc_field({Props}, {Key, Type}) ->
+ validate_ddoc_field(couch_util:get_value(Key, Props), Type);
+
+validate_ddoc_field(_, _) ->
+ error.
+
+
+validate_ddoc_field1([], _) ->
+ ok;
+
+validate_ddoc_field1([{Key, Value} | Rest], Type) ->
+ case validate_ddoc_field(Value, Type) of
+ ok ->
+ validate_ddoc_field1(Rest, Type);
+ error ->
+ {error, Key}
+ end.
+
+
+map_function_type({Props}) ->
+ case couch_util:get_value(<<"language">>, Props) of
+ <<"query">> -> object;
+ _ -> string
+ end.
+
+
+format_type(Type) when is_atom(Type) ->
+ ?l2b(atom_to_list(Type));
+
+format_type(Types) when is_list(Types) ->
+ iolist_to_binary(join(lists:map(fun atom_to_list/1, Types), <<" or ">>)).
+
+
+join(L, Sep) ->
+ join(L, Sep, []).
+
+
+join([H|[]], _, Acc) ->
+ [H | Acc];
+
+join([H|T], Sep, Acc) ->
+ join(T, Sep, [Sep, H | Acc]).
+
+
+determine_group_level(#mrargs{group=undefined, group_level=undefined}) ->
+ 0;
+
+determine_group_level(#mrargs{group=false, group_level=undefined}) ->
+ 0;
+
+determine_group_level(#mrargs{group=false, group_level=Level}) when Level > 0 ->
+ mrverror(<<"Can't specify group=false and group_level>0 at the same time">>);
+
+determine_group_level(#mrargs{group=true, group_level=undefined}) ->
+ exact;
+
+determine_group_level(#mrargs{group_level=GroupLevel}) ->
+ GroupLevel.
+
+
+mrverror(Mesg) ->
+ throw({query_parse_error, Mesg}).
+
+
+apply_partition(#mrargs{keys=[{p, _, _} | _]} = Args, _Partition) ->
+ Args; % already applied
+
+apply_partition(#mrargs{keys=Keys} = Args, Partition) when Keys /= undefined ->
+ Args#mrargs{keys=[{p, Partition, K} || K <- Keys]};
+
+apply_partition(#mrargs{start_key={p, _, _}, end_key={p, _, _}} = Args, _Partition) ->
+ Args; % already applied.
+
+apply_partition(Args, Partition) ->
+ #mrargs{
+ direction = Dir,
+ start_key = StartKey,
+ end_key = EndKey
+ } = Args,
+
+ {DefSK, DefEK} = case Dir of
+ fwd -> {?LOWEST_KEY, ?HIGHEST_KEY};
+ rev -> {?HIGHEST_KEY, ?LOWEST_KEY}
+ end,
+
+ SK0 = if StartKey /= undefined -> StartKey; true -> DefSK end,
+ EK0 = if EndKey /= undefined -> EndKey; true -> DefEK end,
+
+ Args#mrargs{
+ start_key = {p, Partition, SK0},
+ end_key = {p, Partition, EK0}
+ }.
+
+
+get_extra(#mrargs{} = Args, Key) ->
+ couch_util:get_value(Key, Args#mrargs.extra).
+
+
+apply_limit(ViewPartitioned, Args) ->
+ Options = Args#mrargs.extra,
+ IgnorePQLimit = lists:keyfind(ignore_partition_query_limit, 1, Options),
+ LimitType = case {ViewPartitioned, IgnorePQLimit} of
+ {true, false} -> "partition_query_limit";
+ {true, _} -> "query_limit";
+ {false, _} -> "query_limit"
+ end,
+
+ MaxLimit = config:get_integer("query_server_config",
+ LimitType, ?MAX_VIEW_LIMIT),
+
+ % Set the highest limit possible if a user has not
+ % specified a limit
+ Args1 = case Args#mrargs.limit == ?MAX_VIEW_LIMIT of
+ true -> Args#mrargs{limit = MaxLimit};
+ false -> Args
+ end,
+
+ if Args1#mrargs.limit =< MaxLimit -> Args1; true ->
+ Fmt = "Limit is too large, must not exceed ~p",
+ mrverror(io_lib:format(Fmt, [MaxLimit]))
+ end.
diff --git a/src/couch_views/test/couch_views_active_tasks_test.erl b/src/couch_views/test/couch_views_active_tasks_test.erl
index b7f36a343..248449359 100644
--- a/src/couch_views/test/couch_views_active_tasks_test.erl
+++ b/src/couch_views/test/couch_views_active_tasks_test.erl
@@ -55,6 +55,9 @@ foreach_setup() ->
foreach_teardown({Db, _}) ->
meck:unload(),
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ couch_views:cleanup_indices(TxDb, [])
+ end),
ok = fabric2_db:delete(fabric2_db:name(Db), []).
diff --git a/src/couch_views/test/couch_views_batch_test.erl b/src/couch_views/test/couch_views_batch_test.erl
index 78e68925e..d4dbb50c5 100644
--- a/src/couch_views/test/couch_views_batch_test.erl
+++ b/src/couch_views/test/couch_views_batch_test.erl
@@ -15,7 +15,7 @@
-include_lib("eunit/include/eunit.hrl").
-include_lib("fabric/test/fabric2_test.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
batch_test_() ->
diff --git a/src/couch_views/test/couch_views_cleanup_test.erl b/src/couch_views/test/couch_views_cleanup_test.erl
index 54048c968..d1b6f2adc 100644
--- a/src/couch_views/test/couch_views_cleanup_test.erl
+++ b/src/couch_views/test/couch_views_cleanup_test.erl
@@ -17,7 +17,6 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch_views/include/couch_views.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-include_lib("fabric/include/fabric2.hrl").
-include_lib("fabric/test/fabric2_test.hrl").
diff --git a/src/couch_views/test/couch_views_custom_red_test.erl b/src/couch_views/test/couch_views_custom_red_test.erl
new file mode 100644
index 000000000..e8f8cbc2f
--- /dev/null
+++ b/src/couch_views/test/couch_views_custom_red_test.erl
@@ -0,0 +1,193 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_custom_red_test).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("fabric/test/fabric2_test.hrl").
+-include("couch_views.hrl").
+
+
+-define(NUM_DOCS, 100).
+
+
+custom_reduce_disabled_test_() ->
+ {
+ "Custom Reduce Disabled",
+ {
+ setup,
+ fun setup_disabled/0,
+ fun teardown/1,
+ with([
+ ?TDEF(builtin_reductions_work),
+ ?TDEF(custom_reduces_disabled)
+ ])
+ }
+ }.
+
+
+custom_reduce_enabled_test_() ->
+ {
+ "Custom Reduce Disabled",
+ {
+ setup,
+ fun setup_enabled/0,
+ fun teardown/1,
+ with([
+ ?TDEF(builtin_reductions_work),
+ ?TDEF(custom_reduces_enabled)
+ ])
+ }
+ }.
+
+
+sigs_change_test_() ->
+ {
+ "Sigs Change Test",
+ {
+ setup,
+ fun setup_sigs_change/0,
+ fun teardown_sigs_change/1,
+ with([
+ ?TDEF(sigs_change)
+ ])
+ }
+ }.
+
+setup_disabled() ->
+ setup_common(false).
+
+
+setup_enabled() ->
+ setup_common(true).
+
+
+setup_common(Enabled) ->
+ Ctx = test_util:start_couch([
+ fabric,
+ couch_jobs,
+ couch_js,
+ couch_views
+ ]),
+ config:set_boolean("couch_views", "custom_reduce_enabled", Enabled, false),
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ fabric2_db:update_docs(Db, [create_ddoc()]),
+ make_docs(Db, ?NUM_DOCS),
+ run_query(Db, <<"builtin">>, #{limit => 0}),
+ {Db, Ctx}.
+
+
+teardown({Db, Ctx}) ->
+ fabric2_db:delete(fabric2_db:name(Db), [{user_ctx, ?ADMIN_USER}]),
+ test_util:stop_couch(Ctx).
+
+
+setup_sigs_change() ->
+ meck:new(config, [passthrough]),
+ meck:expect(config, get, fun(_, _, Default) -> Default end).
+
+
+teardown_sigs_change(_) ->
+ meck:unload().
+
+
+builtin_reductions_work({Db, _}) ->
+ Result = run_query(Db, <<"builtin">>, #{}),
+ Expect = {ok, [row(null, ?NUM_DOCS)]},
+ ?assertEqual(Expect, Result).
+
+
+custom_reduces_disabled({Db, _}) ->
+ ?assertThrow({disabled, _}, run_query(Db, <<"custom">>, #{})).
+
+
+custom_reduces_enabled({Db, _}) ->
+ Result = run_query(Db, <<"custom">>, #{}),
+ Expect = {ok, [row(null, <<"silly_reduce">>)]},
+ ?assertEqual(Expect, Result).
+
+
+sigs_change(_) ->
+ meck:expect(config, get_boolean, fun("couch_views", _, _) -> false end),
+ {ok, Mrst1} = couch_views_util:ddoc_to_mrst(<<"foo">>, create_ddoc()),
+ meck:expect(config, get_boolean, fun("couch_views", _, _) -> true end),
+ {ok, Mrst2} = couch_views_util:ddoc_to_mrst(<<"foo">>, create_ddoc()),
+ ?assertNotEqual(Mrst1#mrst.sig, Mrst2#mrst.sig).
+
+
+run_query(Db, Idx, Args) ->
+ DDoc = create_ddoc(),
+ run_query(Db, DDoc, Idx, Args).
+
+
+run_query(Db, DDoc, Idx, Args) ->
+ couch_views:query(Db, DDoc, Idx, fun default_cb/2, [], Args).
+
+
+default_cb(complete, Acc) ->
+ {ok, lists:reverse(Acc)};
+default_cb({final, Info}, []) ->
+ {ok, [Info]};
+default_cb({final, _}, Acc) ->
+ {ok, Acc};
+default_cb({meta, _}, Acc) ->
+ {ok, Acc};
+default_cb(ok, ddoc_updated) ->
+ {ok, ddoc_updated};
+default_cb(Row, Acc) ->
+ {ok, [Row | Acc]}.
+
+
+row(Key, Value) ->
+ {row, [{key, Key}, {value, Value}]}.
+
+
+create_ddoc() ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, <<"_design/bar">>},
+ {<<"views">>, {[
+ {<<"custom">>, {[
+ {<<"map">>, <<"function(doc) {emit(doc.val, doc.val);}">>},
+ {<<"reduce">>, <<
+ "function(keys, values, rereduce) {\n"
+ " return \"silly_reduce\";\n"
+ "}\n"
+ >>}
+ ]}},
+ {<<"builtin">>, {[
+ {<<"map">>, <<"function(doc) {emit(doc.val, doc.val);}">>},
+ {<<"reduce">>, <<"_count">>}
+ ]}}
+ ]}}
+ ]}).
+
+
+make_docs(Db, TotalDocs) when TotalDocs > 0 ->
+ make_docs(Db, TotalDocs, 0).
+
+
+make_docs(Db, TotalDocs, DocsMade) when TotalDocs > DocsMade ->
+ DocCount = min(TotalDocs - DocsMade, 500),
+ Docs = [doc(I + DocsMade) || I <- lists:seq(1, DocCount)],
+ fabric2_db:update_docs(Db, Docs),
+ make_docs(Db, TotalDocs, DocsMade + DocCount);
+
+make_docs(_Db, TotalDocs, DocsMade) when TotalDocs =< DocsMade ->
+ ok.
+
+
+doc(Id) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, list_to_binary(integer_to_list(Id))},
+ {<<"val">>, Id}
+ ]}).
diff --git a/src/couch_views/test/couch_views_indexer_test.erl b/src/couch_views/test/couch_views_indexer_test.erl
index 75be2459f..c41db3b4f 100644
--- a/src/couch_views/test/couch_views_indexer_test.erl
+++ b/src/couch_views/test/couch_views_indexer_test.erl
@@ -15,7 +15,6 @@
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-include_lib("couch_views/include/couch_views.hrl").
-include_lib("fabric/test/fabric2_test.hrl").
@@ -42,16 +41,18 @@ indexer_test_() ->
?TDEF_FE(updated_docs_without_changes_are_reindexed),
?TDEF_FE(deleted_docs_not_indexed),
?TDEF_FE(deleted_docs_are_unindexed),
- ?TDEF_FE(multipe_docs_with_same_key),
- ?TDEF_FE(multipe_keys_from_same_doc),
- ?TDEF_FE(multipe_identical_keys_from_same_doc),
- ?TDEF_FE(fewer_multipe_identical_keys_from_same_doc),
+ ?TDEF_FE(multiple_docs_with_same_key),
+ ?TDEF_FE(multiple_keys_from_same_doc),
+ ?TDEF_FE(multiple_identical_keys_from_same_doc),
+ ?TDEF_FE(fewer_multiple_identical_keys_from_same_doc),
?TDEF_FE(multiple_design_docs),
+ ?TDEF_FE(multiple_doc_update_with_existing_rows),
?TDEF_FE(handle_size_key_limits),
?TDEF_FE(handle_size_value_limits),
?TDEF_FE(index_autoupdater_callback),
?TDEF_FE(handle_db_recreated_when_running),
?TDEF_FE(handle_db_recreated_after_finished),
+ ?TDEF_FE(handle_doc_updated_when_running),
?TDEF_FE(index_can_recover_from_crash, 60)
]
}
@@ -215,7 +216,7 @@ deleted_docs_are_unindexed(Db) ->
end).
-multipe_docs_with_same_key(Db) ->
+multiple_docs_with_same_key(Db) ->
DDoc = create_ddoc(),
Doc1 = doc(0, 1),
Doc2 = doc(1, 1),
@@ -231,7 +232,7 @@ multipe_docs_with_same_key(Db) ->
], Out).
-multipe_keys_from_same_doc(Db) ->
+multiple_keys_from_same_doc(Db) ->
DDoc = create_ddoc(multi_emit_different),
Doc = doc(0, 1),
@@ -246,7 +247,7 @@ multipe_keys_from_same_doc(Db) ->
], Out).
-multipe_identical_keys_from_same_doc(Db) ->
+multiple_identical_keys_from_same_doc(Db) ->
DDoc = create_ddoc(multi_emit_same),
Doc = doc(0, 1),
@@ -261,7 +262,7 @@ multipe_identical_keys_from_same_doc(Db) ->
], Out).
-fewer_multipe_identical_keys_from_same_doc(Db) ->
+fewer_multiple_identical_keys_from_same_doc(Db) ->
DDoc = create_ddoc(multi_emit_same),
Doc0 = #doc{
id = <<"0">>,
@@ -371,7 +372,8 @@ index_autoupdater_callback(Db) ->
?assertMatch([{ok, <<_/binary>>}], Result),
[{ok, JobId}] = Result,
- ?assertEqual(ok, couch_views_jobs:wait_for_job(JobId, DDoc#doc.id, DbSeq)).
+ ?assertMatch({ok, {_, _}},
+ couch_views_jobs:wait_for_job(JobId, DDoc#doc.id, DbSeq)).
multiple_design_docs(Db) ->
@@ -423,6 +425,31 @@ multiple_design_docs(Db) ->
?assertError({ddoc_deleted, _}, run_query(Db, DDoc2, ?MAP_FUN1)).
+multiple_doc_update_with_existing_rows(Db) ->
+ DDoc = create_ddoc(),
+ Doc0 = doc(0),
+ Doc1 = doc(1),
+
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, Doc1, []),
+
+ {ok, Out1} = run_query(Db, DDoc, ?MAP_FUN1),
+
+ ?assertEqual([row(<<"1">>, 1, 1)], Out1),
+
+ Doc2 = Doc1#doc{
+ revs = {Pos, [Rev]},
+ body = {[{<<"val">>, 2}]}
+ },
+ {ok, _} = fabric2_db:update_docs(Db, [Doc0, Doc2], []),
+
+ {ok, Out2} = run_query(Db, DDoc, ?MAP_FUN1),
+
+ ?assertEqual([
+ row(<<"0">>, 0, 0),
+ row(<<"1">>, 2, 2)
+ ], Out2).
+
handle_db_recreated_when_running(Db) ->
DbName = fabric2_db:name(Db),
@@ -504,6 +531,50 @@ handle_db_recreated_after_finished(Db) ->
], Out2).
+handle_doc_updated_when_running(Db) ->
+ DDoc = create_ddoc(),
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, _} = fabric2_db:update_doc(Db, doc(0), []),
+ {ok, _} = fabric2_db:update_doc(Db, doc(1), []),
+
+ % To intercept job building while it is running ensure updates happen one
+ % row at a time.
+ config:set("couch_views", "batch_initial_size", "1", false),
+
+ meck_intercept_job_update(self()),
+
+ [{ok, JobId}] = couch_views:build_indices(Db, [DDoc]),
+
+ {Indexer, _Job, _Data} = wait_indexer_update(10000),
+
+ {ok, State} = couch_jobs:get_job_state(undefined, ?INDEX_JOB_TYPE, JobId),
+ ?assertEqual(running, State),
+
+ {ok, SubId, running, _} = couch_jobs:subscribe(?INDEX_JOB_TYPE, JobId),
+
+ {ok, Doc} = fabric2_db:open_doc(Db, <<"1">>),
+ Doc2 = Doc#doc {
+ body = {[{<<"val">>, 2}]}
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc2),
+
+ reset_intercept_job_update(Indexer),
+ Indexer ! continue,
+
+ ?assertMatch({
+ ?INDEX_JOB_TYPE,
+ JobId,
+ finished,
+ #{<<"active_task_info">> := #{<<"changes_done">> := 1}}
+ }, couch_jobs:wait(SubId, finished, infinity)),
+
+ Args = #mrargs{update = false},
+ {ok, Out2} = couch_views:query(Db, DDoc, ?MAP_FUN1, fun fold_fun/2, [], Args),
+ ?assertEqual([
+ row(<<"0">>, 0, 0)
+ ], Out2).
+
+
index_can_recover_from_crash(Db) ->
ok = meck:new(config, [passthrough]),
ok = meck:expect(config, get_integer, fun(Section, Key, Default) ->
diff --git a/src/couch_views/test/couch_views_info_test.erl b/src/couch_views/test/couch_views_info_test.erl
index 993801a0d..18a0a63bb 100644
--- a/src/couch_views/test/couch_views_info_test.erl
+++ b/src/couch_views/test/couch_views_info_test.erl
@@ -15,7 +15,7 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
-include_lib("fabric/test/fabric2_test.hrl").
diff --git a/src/couch_views/test/couch_views_map_test.erl b/src/couch_views/test/couch_views_map_test.erl
index c419546e1..97e35cc9a 100644
--- a/src/couch_views/test/couch_views_map_test.erl
+++ b/src/couch_views/test/couch_views_map_test.erl
@@ -58,6 +58,7 @@ map_views_test_() ->
?TDEF(should_map_with_doc_emit),
?TDEF(should_map_update_is_false),
?TDEF(should_map_update_is_lazy),
+ ?TDEF(should_map_snapshot),
?TDEF(should_map_wait_for_interactive),
?TDEF(should_map_local_seq)
% fun should_give_ext_size_seq_indexed_test/1
@@ -410,7 +411,7 @@ should_map_update_is_lazy() ->
{ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
JobId = couch_views_jobs:job_id(Db, Mrst),
UpdateSeq = fabric2_db:get_update_seq(Db),
- ok = couch_views_jobs:wait_for_job(JobId, DDoc#doc.id, UpdateSeq),
+ {ok, _} = couch_views_jobs:wait_for_job(JobId, DDoc#doc.id, UpdateSeq),
Args2 = #{
start_key => 8,
@@ -422,6 +423,100 @@ should_map_update_is_lazy() ->
?assertEqual(Expect, Result2).
+should_map_snapshot() ->
+ Idx = <<"baz">>,
+ DbName = ?tempdb(),
+
+ {ok, Db} = fabric2_db:create(DbName, [{user_ctx, ?ADMIN_USER}]),
+
+ DDoc = create_ddoc(),
+ Docs = make_docs(2),
+ fabric2_db:update_docs(Db, [DDoc | Docs]),
+
+ % Lazy query just get a hold of a job and wait for it so we can
+ % get the indexer versionstamps
+ ?assertEqual({ok, []}, couch_views:query(Db, DDoc, Idx, fun default_cb/2,
+ [], #{update => lazy})),
+ {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+ JobId = couch_views_jobs:job_id(Db, Mrst),
+ DbSeq = fabric2_db:get_update_seq(Db),
+ {ok, VStamps} = couch_views_jobs:wait_for_job(JobId, DDoc#doc.id, DbSeq),
+
+ {DbReadVsn, ViewReadVsn} = VStamps,
+ ?assert(is_integer(DbReadVsn)),
+ ?assert(is_integer(ViewReadVsn)),
+ ?assert(DbReadVsn < ViewReadVsn),
+
+ % Update doc 1 and delete doc 2
+ {ok, Doc1Open} = fabric2_db:open_doc(Db, <<"1">>),
+ Doc1Upd = Doc1Open#doc{body = {[{<<"val">>, 42}]}},
+ ?assertMatch({ok, {2, _}}, fabric2_db:update_doc(Db, Doc1Upd)),
+
+ {ok, Doc2Open} = fabric2_db:open_doc(Db, <<"2">>),
+ Doc2Del = Doc2Open#doc{deleted = true},
+ ?assertMatch({ok, {2, _}}, fabric2_db:update_doc(Db, Doc2Del)),
+
+ ReadSnapshot = fun(#{tx := Tx} = TxDb) ->
+ Args = #mrargs{include_docs = true, view_type = map},
+ Callback = fun default_cb/2,
+ erlfdb:set_read_version(Tx, ViewReadVsn),
+ couch_views_reader:read(TxDb, Mrst, Idx, Callback, [], Args, DbReadVsn)
+ end,
+
+ % Perform a stale snapshot read asserting that docs updates
+ % haven't affected include_docs results
+ ?assertMatch({ok, [
+ {row, [
+ {id, <<"1">>},
+ {key, 1},
+ {value, 1},
+ {doc, {[
+ {<<"_id">>, <<"1">>},
+ {<<"_rev">>, <<_/binary>>},
+ {<<"val">>, 1}
+ ]}}
+ ]},
+ {row, [
+ {id, <<"2">>},
+ {key, 2},
+ {value, 2},
+ {doc, {[
+ {<<"_id">>, <<"2">>},
+ {<<"_rev">>, <<_/binary>>},
+ {<<"val">>, 2}
+ ]}}
+ ]}
+ ]}, fabric2_fdb:transactional(Db, ReadSnapshot)),
+
+ % Update the view
+ ?assertMatch({ok, [{row, [{id, <<"1">>}, {key, 42}, {value, 42}]}]},
+ couch_views:query(Db, DDoc, Idx, fun default_cb/2, [], #{})),
+
+ % After the view was updated, the original snapshot stays the same
+ ?assertMatch({ok, [
+ {row, [
+ {id, <<"1">>},
+ {key, 1},
+ {value, 1},
+ {doc, {[
+ {<<"_id">>, <<"1">>},
+ {<<"_rev">>, <<_/binary>>},
+ {<<"val">>, 1}
+ ]}}
+ ]},
+ {row, [
+ {id, <<"2">>},
+ {key, 2},
+ {value, 2},
+ {doc, {[
+ {<<"_id">>, <<"2">>},
+ {<<"_rev">>, <<_/binary>>},
+ {<<"val">>, 2}
+ ]}}
+ ]}
+ ]}, fabric2_fdb:transactional(Db, ReadSnapshot)).
+
+
should_map_wait_for_interactive() ->
DbName = ?tempdb(),
{ok, Db} = fabric2_db:create(DbName, [{user_ctx, ?ADMIN_USER}]),
@@ -471,27 +566,6 @@ check_local_seq(Val, Expected) ->
?assertEqual(Expected, Result).
-% should_give_ext_size_seq_indexed_test(Db) ->
-% DDoc = couch_doc:from_json_obj({[
-% {<<"_id">>, <<"_design/seqdoc">>},
-% {<<"options">>, {[{<<"seq_indexed">>, true}]}},
-% {<<"views">>, {[
-% {<<"view1">>, {[
-% {<<"map">>, <<"function(doc){emit(doc._id, doc._id);}">>}
-% ]}}
-% ]}
-% }
-% ]}),
-% {ok, _} = couch_db:update_doc(Db, DDoc, []),
-% {ok, Db1} = couch_db:open_int(couch_db:name(Db), []),
-% {ok, DDoc1} = couch_db:open_doc(Db1, <<"_design/seqdoc">>, [ejson_body]),
-% couch_mrview:query_view(Db1, DDoc1, <<"view1">>, [{update, true}]),
-% {ok, Info} = couch_mrview:get_info(Db1, DDoc),
-% Size = couch_util:get_nested_json_value({Info}, [sizes, external]),
-% ok = couch_db:close(Db1),
-% ?assert(is_number(Size)).
-
-
run_query(Idx, Args) ->
run_query(Idx, Args, false).
diff --git a/src/couch_views/test/couch_views_red_test.erl b/src/couch_views/test/couch_views_red_test.erl
index 707611f6e..84c64738d 100644
--- a/src/couch_views/test/couch_views_red_test.erl
+++ b/src/couch_views/test/couch_views_red_test.erl
@@ -213,7 +213,7 @@ should_reduce_empty_range({Db, _}) ->
end_key => 100001
},
Result = run_query(Db, <<"baz_count">>, Args),
- Expect = {ok, [row(null, 0)]},
+ Expect = {ok, []},
?assertEqual(Expect, Result).
@@ -224,7 +224,7 @@ should_reduce_empty_range_rev({Db, _}) ->
end_key => 100000
},
Result = run_query(Db, <<"baz_count">>, Args),
- Expect = {ok, [row(null, 0)]},
+ Expect = {ok, []},
?assertEqual(Expect, Result).
diff --git a/src/couch_views/test/couch_views_server_test.erl b/src/couch_views/test/couch_views_server_test.erl
index 3c0c0a86a..41d7aaf42 100644
--- a/src/couch_views/test/couch_views_server_test.erl
+++ b/src/couch_views/test/couch_views_server_test.erl
@@ -46,7 +46,8 @@ setup() ->
fabric,
couch_jobs,
couch_js,
- couch_eval
+ couch_eval,
+ couch_lib
]),
Ctx.
diff --git a/src/couch_views/test/couch_views_size_test.erl b/src/couch_views/test/couch_views_size_test.erl
index e69b5b292..91684a9ba 100644
--- a/src/couch_views/test/couch_views_size_test.erl
+++ b/src/couch_views/test/couch_views_size_test.erl
@@ -15,7 +15,6 @@
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-include_lib("couch_views/include/couch_views.hrl").
-include_lib("fabric/test/fabric2_test.hrl").
@@ -353,4 +352,4 @@ fold_fun({meta, _Meta}, Acc) ->
fold_fun({row, _} = Row, Acc) ->
{ok, [Row | Acc]};
fold_fun(complete, Acc) ->
- {ok, lists:reverse(Acc)}. \ No newline at end of file
+ {ok, lists:reverse(Acc)}.
diff --git a/src/couch_views/test/couch_views_trace_index_test.erl b/src/couch_views/test/couch_views_trace_index_test.erl
index 03c21a34a..346a99c60 100644
--- a/src/couch_views/test/couch_views_trace_index_test.erl
+++ b/src/couch_views/test/couch_views_trace_index_test.erl
@@ -17,7 +17,7 @@
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
% Steps for this to work
diff --git a/src/couch_views/test/couch_views_updater_test.erl b/src/couch_views/test/couch_views_updater_test.erl
index aadbe940b..1bd637dd6 100644
--- a/src/couch_views/test/couch_views_updater_test.erl
+++ b/src/couch_views/test/couch_views_updater_test.erl
@@ -14,10 +14,9 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
-include_lib("fabric/test/fabric2_test.hrl").
-include_lib("mango/src/mango_idx.hrl").
--include_lib("couch_views/include/couch_views.hrl").
indexer_test_() ->
diff --git a/src/couch_views/test/couch_views_upgrade_test.erl b/src/couch_views/test/couch_views_upgrade_test.erl
index 556a76297..3926db347 100644
--- a/src/couch_views/test/couch_views_upgrade_test.erl
+++ b/src/couch_views/test/couch_views_upgrade_test.erl
@@ -15,7 +15,6 @@
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-include_lib("couch_views/include/couch_views.hrl").
-include_lib("fabric/include/fabric2.hrl").
-include_lib("fabric/test/fabric2_test.hrl").
@@ -397,4 +396,4 @@ doc(Id, Val) ->
run_query(#{} = Db, DDoc, <<_/binary>> = View) ->
- couch_views:query(Db, DDoc, View, fun fold_fun/2, [], #mrargs{}). \ No newline at end of file
+ couch_views:query(Db, DDoc, View, fun fold_fun/2, [], #mrargs{}).
diff --git a/src/ctrace/README.md b/src/ctrace/README.md
index 4b0238b14..6c687e8c0 100644
--- a/src/ctrace/README.md
+++ b/src/ctrace/README.md
@@ -109,7 +109,7 @@ Instrumentation guide
- Extend `<app>_httpd_handlers:handler_info/1` as needed to
have operation ids. (We as community might need to work on
naming conventions)
-- Use [span conventions](https://github.com/apache/couchdb-documentation/blob/master/rfcs/011-opentracing.md#conventions) https://github.com/opentracing/specification/blob/master/semantic_conventions.md
+- Use [span conventions](https://github.com/apache/couchdb-documentation/blob/main/rfcs/011-opentracing.md#conventions) https://github.com/opentracing/specification/blob/master/semantic_conventions.md
- When in doubt consult open tracing spec
- [spec overview](https://github.com/opentracing/specification/blob/master/specification.md)
- [conventions](https://github.com/opentracing/specification/blob/master/semantic_conventions.md#standard-span-tags-and-log-fields)
diff --git a/src/ctrace/src/ctrace.erl b/src/ctrace/src/ctrace.erl
index 5521901fd..2821352bf 100644
--- a/src/ctrace/src/ctrace.erl
+++ b/src/ctrace/src/ctrace.erl
@@ -44,6 +44,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("passage/include/opentracing.hrl").
-include("ctrace.hrl").
+-include_lib("kernel/include/logger.hrl").
-type operation()
@@ -109,8 +110,7 @@ with_span(Operation, Options, Fun) ->
try
start_span(Operation, Options),
Fun()
- catch Type:Reason ->
- Stack = erlang:get_stacktrace(),
+ catch Type:Reason:Stack ->
log(#{
?LOG_FIELD_ERROR_KIND => Type,
?LOG_FIELD_MESSAGE => Reason,
diff --git a/src/ctrace/src/ctrace_config.erl b/src/ctrace/src/ctrace_config.erl
index c63c77f1b..e9141d21a 100644
--- a/src/ctrace/src/ctrace_config.erl
+++ b/src/ctrace/src/ctrace_config.erl
@@ -29,6 +29,7 @@
]).
-include("ctrace.hrl").
+-include_lib("kernel/include/logger.hrl").
-spec is_enabled() -> boolean().
@@ -132,10 +133,12 @@ http_client(Endpoint, Method, Headers, Body, _ReporterOptions) ->
compile_filter(OperationId, FilterDef) ->
try
+ ?LOG_INFO(#{what => compile_filter, id => OperationId}),
couch_log:info("Compiling filter : ~s", [OperationId]),
ctrace_dsl:compile(OperationId, FilterDef),
true
catch throw:{error, Reason} ->
+ ?LOG_ERROR(#{what => compile_filter, id => OperationId, details => Reason}),
couch_log:error("Cannot compile ~s :: ~s~n", [OperationId, Reason]),
false
end.
diff --git a/src/ctrace/test/ctrace_config_test.erl b/src/ctrace/test/ctrace_config_test.erl
index 0827013fd..bc108b2a1 100644
--- a/src/ctrace/test/ctrace_config_test.erl
+++ b/src/ctrace/test/ctrace_config_test.erl
@@ -14,6 +14,7 @@
-include_lib("eunit/include/eunit.hrl").
-include_lib("ctrace/src/ctrace.hrl").
+-include_lib("kernel/include/logger.hrl").
-define(TDEF(A), {atom_to_list(A), fun A/0}).
@@ -79,6 +80,7 @@ ensure_all_supported() ->
handle_all_syntax_error_supported() ->
+ ?LOG_ERROR(#{what => xkcd, event => test_start}),
couch_log:error("XKCD: TEST START", []),
config:delete("tracing.filters", "all", false),
test_util:wait_value(fun() ->
@@ -96,6 +98,7 @@ handle_all_syntax_error_supported() ->
% then we default to not generating traces
?assertEqual(false, ctrace:match(bam, #{gee => whiz})),
+ ?LOG_ERROR(#{what => xkcd, event => test_end}),
couch_log:error("XKCD: TEST END", []),
config:delete("tracing.filters", "all", false).
diff --git a/src/ddoc_cache/LICENSE b/src/ddoc_cache/LICENSE
deleted file mode 100644
index f6cd2bc80..000000000
--- a/src/ddoc_cache/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/ddoc_cache/README.md b/src/ddoc_cache/README.md
deleted file mode 100644
index 81d600b12..000000000
--- a/src/ddoc_cache/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-Design Doc Cache
-================
-
-Pretty much covers it.
diff --git a/src/ddoc_cache/priv/stats_descriptions.cfg b/src/ddoc_cache/priv/stats_descriptions.cfg
deleted file mode 100644
index f769a979f..000000000
--- a/src/ddoc_cache/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,12 +0,0 @@
-{[ddoc_cache, hit], [
- {type, counter},
- {desc, <<"number of design doc cache hits">>}
-]}.
-{[ddoc_cache, miss], [
- {type, counter},
- {desc, <<"number of design doc cache misses">>}
-]}.
-{[ddoc_cache, recovery], [
- {type, counter},
- {desc, <<"number of design doc cache recoveries">>}
-]}.
diff --git a/src/ddoc_cache/src/ddoc_cache.app.src b/src/ddoc_cache/src/ddoc_cache.app.src
deleted file mode 100644
index faacdf570..000000000
--- a/src/ddoc_cache/src/ddoc_cache.app.src
+++ /dev/null
@@ -1,32 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, ddoc_cache, [
- {description, "Design Document Cache"},
- {vsn, git},
- {registered, [
- ddoc_cache_lru,
- ddoc_cache_opener
- ]},
- {applications, [
- kernel,
- stdlib,
- crypto,
- config,
- couch_event,
- couch_log,
- couch_stats,
- mem3,
- fabric
- ]},
- {mod, {ddoc_cache_app, []}}
-]}.
diff --git a/src/ddoc_cache/src/ddoc_cache.erl b/src/ddoc_cache/src/ddoc_cache.erl
deleted file mode 100644
index 50cac3039..000000000
--- a/src/ddoc_cache/src/ddoc_cache.erl
+++ /dev/null
@@ -1,60 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache).
-
-
--export([
- open_doc/2,
- open_doc/3,
- open_validation_funs/1,
- open_custom/2,
- refresh/2,
-
- %% deprecated
- open/2
-]).
-
-
-open_doc(DbName, DocId) ->
- Key = {ddoc_cache_entry_ddocid, {DbName, DocId}},
- ddoc_cache_lru:open(Key).
-
-
-open_doc(DbName, DocId, RevId) ->
- Key = {ddoc_cache_entry_ddocid_rev, {DbName, DocId, RevId}},
- ddoc_cache_lru:open(Key).
-
-
-open_validation_funs(DbName) ->
- Key = {ddoc_cache_entry_validation_funs, DbName},
- ddoc_cache_lru:open(Key).
-
-
-open_custom(DbName, Mod) ->
- Key = {ddoc_cache_entry_custom, {DbName, Mod}},
- ddoc_cache_lru:open(Key).
-
-
-refresh(ShardDbName, DDocIds) when is_list(DDocIds) ->
- DbName = mem3:dbname(ShardDbName),
- ddoc_cache_lru:refresh(DbName, DDocIds).
-
-
-open(DbName, validation_funs) ->
- open_validation_funs(DbName);
-open(DbName, Module) when is_atom(Module) ->
- open_custom(DbName, Module);
-open(DbName, <<"_design/", _/binary>>=DDocId) when is_binary(DbName) ->
- open_doc(DbName, DDocId);
-open(DbName, DDocId) when is_binary(DDocId) ->
- open_doc(DbName, <<"_design/", DDocId/binary>>).
diff --git a/src/ddoc_cache/src/ddoc_cache.hrl b/src/ddoc_cache/src/ddoc_cache.hrl
deleted file mode 100644
index dba0d37b2..000000000
--- a/src/ddoc_cache/src/ddoc_cache.hrl
+++ /dev/null
@@ -1,40 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--type dbname() :: iodata().
--type docid() :: iodata().
--type doc_hash() :: <<_:128>>.
--type revision() :: {pos_integer(), doc_hash()}.
-
--define(CACHE, ddoc_cache_entries).
--define(LRU, ddoc_cache_lru).
--define(REFRESH_TIMEOUT, 67000).
--define(SHUTDOWN_TIMEOUT, 1000).
-
--record(entry, {
- key,
- val,
- pid
-}).
-
--record(opener, {
- key,
- pid,
- clients
-}).
-
-
--ifdef(TEST).
--define(EVENT(Name, Arg), ddoc_cache_ev:event(Name, Arg)).
--else.
--define(EVENT(Name, Arg), ignore).
--endif.
diff --git a/src/ddoc_cache/src/ddoc_cache_entry.erl b/src/ddoc_cache/src/ddoc_cache_entry.erl
deleted file mode 100644
index ed0311bbd..000000000
--- a/src/ddoc_cache/src/ddoc_cache_entry.erl
+++ /dev/null
@@ -1,374 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_entry).
--behaviour(gen_server).
--vsn(1).
-
-
--export([
- dbname/1,
- ddocid/1,
- recover/1,
- insert/2,
-
- start_link/2,
- shutdown/1,
- open/2,
- accessed/1,
- refresh/1
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3,
- format_status/2
-]).
-
--export([
- do_open/1
-]).
-
-
--include("ddoc_cache.hrl").
-
-
--ifndef(TEST).
--define(ENTRY_SHUTDOWN_TIMEOUT, 5000).
--else.
--define(ENTRY_SHUTDOWN_TIMEOUT, 500).
--endif.
-
-
--record(st, {
- key,
- val,
- opener,
- waiters,
- ts,
- accessed
-}).
-
-
-dbname({Mod, Arg}) ->
- Mod:dbname(Arg).
-
-
-ddocid({Mod, Arg}) ->
- Mod:ddocid(Arg).
-
-
-recover({Mod, Arg}) ->
- Mod:recover(Arg).
-
-
-insert({Mod, Arg}, Value) ->
- Mod:insert(Arg, Value).
-
-
-start_link(Key, Default) ->
- Pid = proc_lib:spawn_link(?MODULE, init, [{Key, Default}]),
- {ok, Pid}.
-
-
-shutdown(Pid) ->
- Ref = erlang:monitor(process, Pid),
- ok = gen_server:cast(Pid, shutdown),
- receive
- {'DOWN', Ref, process, Pid, normal} ->
- ok;
- {'DOWN', Ref, process, Pid, Reason} ->
- erlang:exit(Reason)
- after ?ENTRY_SHUTDOWN_TIMEOUT ->
- erlang:demonitor(Ref, [flush]),
- erlang:exit({timeout, {entry_shutdown, Pid}})
- end.
-
-
-open(Pid, Key) ->
- try
- Resp = gen_server:call(Pid, open),
- case Resp of
- {open_ok, Val} ->
- Val;
- {open_error, {T, R, S}} ->
- erlang:raise(T, R, S)
- end
- catch
- error:database_does_not_exist ->
- erlang:error(database_does_not_exist);
- exit:_ ->
- % Its possible that this process was evicted just
- % before we tried talking to it. Just fallback
- % to a standard recovery
- recover(Key)
- end.
-
-
-accessed(Pid) ->
- gen_server:cast(Pid, accessed).
-
-
-refresh(Pid) ->
- gen_server:cast(Pid, force_refresh).
-
-
-init({Key, undefined}) ->
- true = ets:update_element(?CACHE, Key, {#entry.pid, self()}),
- St = #st{
- key = Key,
- opener = spawn_opener(Key),
- waiters = [],
- accessed = 1
- },
- ?EVENT(started, Key),
- gen_server:enter_loop(?MODULE, [], St);
-
-init({Key, Wrapped}) ->
- Default = ddoc_cache_value:unwrap(Wrapped),
- Updates = [
- {#entry.val, Default},
- {#entry.pid, self()}
- ],
- NewTs = os:timestamp(),
- true = ets:update_element(?CACHE, Key, Updates),
- true = ets:insert(?LRU, {{NewTs, Key, self()}}),
- St = #st{
- key = Key,
- val = {open_ok, {ok, Default}},
- opener = start_timer(),
- waiters = [],
- ts = NewTs,
- accessed = 1
- },
- ?EVENT(default_started, Key),
- gen_server:enter_loop(?MODULE, [], St, hibernate).
-
-
-terminate(_Reason, St) ->
- #st{
- key = Key,
- opener = Pid,
- ts = Ts
- } = St,
- % We may have already deleted our cache entry
- % during shutdown
- Pattern = #entry{key = Key, pid = self(), _ = '_'},
- CacheMSpec = [{Pattern, [], [true]}],
- true = ets:select_delete(?CACHE, CacheMSpec) < 2,
- % We may have already deleted our LRU entry
- % during shutdown
- if Ts == undefined -> ok; true ->
- LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
- true = ets:select_delete(?LRU, LruMSpec) < 2
- end,
- % Blow away any current opener if it exists
- if not is_pid(Pid) -> ok; true ->
- catch exit(Pid, kill)
- end,
- ok.
-
-
-handle_call(open, From, #st{opener = Pid} = St) when is_pid(Pid) ->
- NewSt = St#st{
- waiters = [From | St#st.waiters]
- },
- {noreply, NewSt};
-
-handle_call(open, _From, St) ->
- {reply, St#st.val, St};
-
-handle_call(Msg, _From, St) ->
- {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
-
-
-handle_cast(accessed, St) ->
- ?EVENT(accessed, St#st.key),
- drain_accessed(),
- NewSt = St#st{
- accessed = St#st.accessed + 1
- },
- {noreply, update_lru(NewSt)};
-
-handle_cast(force_refresh, St) ->
- % If we had frequent design document updates
- % they could end up racing accessed events and
- % end up prematurely evicting this entry from
- % cache. To prevent this we just make sure that
- % accessed is set to at least 1 before we
- % execute a refresh.
- NewSt = if St#st.accessed > 0 -> St; true ->
- St#st{accessed = 1}
- end,
- % We remove the cache entry value so that any
- % new client comes to us for the refreshed
- % value.
- true = ets:update_element(?CACHE, St#st.key, {#entry.val, undefined}),
- handle_cast(refresh, NewSt);
-
-handle_cast(refresh, #st{accessed = 0} = St) ->
- {stop, normal, St};
-
-handle_cast(refresh, #st{opener = Ref} = St) when is_reference(Ref) ->
- #st{
- key = Key
- } = St,
- erlang:cancel_timer(Ref),
- NewSt = St#st{
- opener = spawn_opener(Key),
- accessed = 0
- },
- {noreply, NewSt};
-
-handle_cast(refresh, #st{opener = Pid} = St) when is_pid(Pid) ->
- catch exit(Pid, kill),
- receive
- {'DOWN', _, _, Pid, _} -> ok
- end,
- NewSt = St#st{
- opener = spawn_opener(St#st.key),
- accessed = 0
- },
- {noreply, NewSt};
-
-handle_cast(shutdown, St) ->
- remove_from_cache(St),
- {stop, normal, St};
-
-handle_cast(Msg, St) ->
- {stop, {bad_cast, Msg}, St}.
-
-
-handle_info({'DOWN', _, _, Pid, Resp}, #st{key = Key, opener = Pid} = St) ->
- case Resp of
- {open_ok, Key, {ok, Val}} ->
- update_cache(St, Val),
- NewSt1 = St#st{
- val = {open_ok, {ok, Val}},
- opener = start_timer(),
- waiters = []
- },
- NewSt2 = update_lru(NewSt1),
- respond(St#st.waiters, {open_ok, {ok, Val}}),
- {noreply, NewSt2};
- {Status, Key, Other} ->
- NewSt = St#st{
- val = {Status, Other},
- opener = undefined,
- waiters = undefined
- },
- remove_from_cache(NewSt),
- respond(St#st.waiters, {Status, Other}),
- {stop, normal, NewSt}
- end;
-
-handle_info(Msg, St) ->
- {stop, {bad_info, Msg}, St}.
-
-
-code_change(_, St, _) ->
- {ok, St}.
-
-
-format_status(_Opt, [_PDict, State]) ->
- #st{
- key = Key,
- val = Val,
- opener = Opener,
- waiters = Waiters,
- ts = TS,
- accessed = Accepted
- } = State,
- [{data, [{"State", [
- {key, Key},
- {val, Val},
- {opener, Opener},
- {waiters, {length, length(Waiters)}},
- {ts, TS},
- {accessed, Accepted}
- ]}]}].
-
-spawn_opener(Key) ->
- {Pid, _} = erlang:spawn_monitor(?MODULE, do_open, [Key]),
- Pid.
-
-
-start_timer() ->
- TimeOut = config:get_integer(
- "ddoc_cache", "refresh_timeout", ?REFRESH_TIMEOUT),
- erlang:send_after(TimeOut, self(), {'$gen_cast', refresh}).
-
-
-do_open(Key) ->
- try recover(Key) of
- Resp ->
- erlang:exit({open_ok, Key, Resp})
- catch T:R ->
- S = erlang:get_stacktrace(),
- erlang:exit({open_error, Key, {T, R, S}})
- end.
-
-
-update_lru(#st{key = Key, ts = Ts} = St) ->
- remove_from_lru(Ts, Key),
- NewTs = os:timestamp(),
- true = ets:insert(?LRU, {{NewTs, Key, self()}}),
- St#st{ts = NewTs}.
-
-
-update_cache(#st{val = undefined} = St, Val) ->
- true = ets:update_element(?CACHE, St#st.key, {#entry.val, Val}),
- ?EVENT(inserted, St#st.key);
-
-update_cache(#st{val = V1} = _St, V2) when {open_ok, {ok, V2}} == V1 ->
- ?EVENT(update_noop, _St#st.key);
-
-update_cache(St, Val) ->
- true = ets:update_element(?CACHE, St#st.key, {#entry.val, Val}),
- ?EVENT(updated, {St#st.key, Val}).
-
-
-remove_from_cache(St) ->
- #st{
- key = Key,
- ts = Ts
- } = St,
- Pattern = #entry{key = Key, pid = self(), _ = '_'},
- CacheMSpec = [{Pattern, [], [true]}],
- 1 = ets:select_delete(?CACHE, CacheMSpec),
- remove_from_lru(Ts, Key),
- ?EVENT(removed, St#st.key),
- ok.
-
-
-remove_from_lru(Ts, Key) ->
- if Ts == undefined -> ok; true ->
- LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
- 1 = ets:select_delete(?LRU, LruMSpec)
- end.
-
-
-drain_accessed() ->
- receive
- {'$gen_cast', accessed} ->
- drain_accessed()
- after 0 ->
- ok
- end.
-
-
-respond(Waiters, Resp) ->
- [gen_server:reply(W, Resp) || W <- Waiters].
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_custom.erl b/src/ddoc_cache/src/ddoc_cache_entry_custom.erl
deleted file mode 100644
index 9eaf16f34..000000000
--- a/src/ddoc_cache/src/ddoc_cache_entry_custom.erl
+++ /dev/null
@@ -1,37 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_entry_custom).
-
-
--export([
- dbname/1,
- ddocid/1,
- recover/1,
- insert/2
-]).
-
-
-dbname({DbName, _}) ->
- DbName.
-
-
-ddocid(_) ->
- no_ddocid.
-
-
-recover({DbName, Mod}) ->
- Mod:recover(DbName).
-
-
-insert(_, _) ->
- ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
deleted file mode 100644
index 7c3dc6787..000000000
--- a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
+++ /dev/null
@@ -1,46 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_entry_ddocid).
-
-
--export([
- dbname/1,
- ddocid/1,
- recover/1,
- insert/2
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
-
-
-dbname({DbName, _}) ->
- DbName.
-
-
-ddocid({_, DDocId}) ->
- DDocId.
-
-
-recover({DbName, DDocId}) ->
- fabric2_db:open_doc(DbName, DDocId, [ejson_body, ?ADMIN_CTX]).
-
-
-insert({DbName, DDocId}, {ok, #doc{revs = Revs} = DDoc}) ->
- {Depth, [RevId | _]} = Revs,
- Rev = {Depth, RevId},
- Key = {ddoc_cache_entry_ddocid_rev, {DbName, DDocId, Rev}},
- spawn(fun() -> ddoc_cache_lru:insert(Key, DDoc) end);
-
-insert(_, _) ->
- ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
deleted file mode 100644
index 38445af96..000000000
--- a/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
+++ /dev/null
@@ -1,47 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_entry_ddocid_rev).
-
-
--export([
- dbname/1,
- ddocid/1,
- recover/1,
- insert/2
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
-
-
-dbname({DbName, _, _}) ->
- DbName.
-
-
-ddocid({_, DDocId, _}) ->
- DDocId.
-
-
-recover({DbName, DDocId, Rev}) ->
- Opts = [ejson_body, ?ADMIN_CTX],
- {ok, [Resp]} = fabric2_db:open_doc_revs(DbName, DDocId, [Rev], Opts),
- Resp.
-
-
-insert({DbName, DDocId, _Rev}, {ok, #doc{} = DDoc}) ->
- Key = {ddoc_cache_entry_ddocid, {DbName, DDocId}},
- spawn(fun() -> ddoc_cache_lru:insert(Key, DDoc) end);
-
-insert(_, _) ->
- ok.
-
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl b/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
deleted file mode 100644
index 2182dead6..000000000
--- a/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
+++ /dev/null
@@ -1,44 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_entry_validation_funs).
-
-
--export([
- dbname/1,
- ddocid/1,
- recover/1,
- insert/2
-]).
-
-
-dbname(DbName) ->
- DbName.
-
-
-ddocid(_) ->
- no_ddocid.
-
-
-recover(DbName) ->
- {ok, DDocs} = fabric:design_docs(mem3:dbname(DbName)),
- Funs = lists:flatmap(fun(DDoc) ->
- case couch_doc:get_validate_doc_fun(DDoc) of
- nil -> [];
- Fun -> [Fun]
- end
- end, DDocs),
- {ok, Funs}.
-
-
-insert(_, _) ->
- ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
deleted file mode 100644
index 28a8a64c4..000000000
--- a/src/ddoc_cache/src/ddoc_cache_lru.erl
+++ /dev/null
@@ -1,333 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_lru).
--behaviour(gen_server).
--vsn(1).
-
-
--export([
- start_link/0,
- open/1,
- insert/2,
- refresh/2
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
--export([
- handle_db_event/3
-]).
-
-
--include("ddoc_cache.hrl").
-
-
--define(OPENER, ddoc_cache_opener).
-
-
--record(st, {
- pids, % pid -> key
- dbs, % dbname -> docid -> key -> pid
- evictor
-}).
-
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-open(Key) ->
- try ets:lookup(?CACHE, Key) of
- [] ->
- lru_start(Key, true);
- [#entry{pid = undefined}] ->
- lru_start(Key, false);
- [#entry{val = undefined, pid = Pid}] ->
- couch_stats:increment_counter([ddoc_cache, miss]),
- ddoc_cache_entry:open(Pid, Key);
- [#entry{val = Val, pid = Pid}] ->
- couch_stats:increment_counter([ddoc_cache, hit]),
- ddoc_cache_entry:accessed(Pid),
- {ok, Val}
- catch _:_ ->
- couch_stats:increment_counter([ddoc_cache, recovery]),
- ddoc_cache_entry:recover(Key)
- end.
-
-
-insert(Key, Value) ->
- case ets:lookup(?CACHE, Key) of
- [] ->
- Wrapped = ddoc_cache_value:wrap(Value),
- gen_server:call(?MODULE, {start, Key, Wrapped}, infinity);
- [#entry{}] ->
- ok
- end.
-
-
-refresh(DbName, DDocIds) ->
- gen_server:cast(?MODULE, {refresh, DbName, DDocIds}).
-
-
-init(_) ->
- couch_util:set_mqd_off_heap(?MODULE),
- process_flag(trap_exit, true),
- BaseOpts = [public, named_table],
- CacheOpts = [
- set,
- {read_concurrency, true},
- {keypos, #entry.key}
- ] ++ BaseOpts,
- ets:new(?CACHE, CacheOpts),
- ets:new(?LRU, [ordered_set, {write_concurrency, true}] ++ BaseOpts),
- {ok, Pids} = khash:new(),
- {ok, Dbs} = khash:new(),
- {ok, Evictor} = couch_event:link_listener(
- ?MODULE, handle_db_event, nil, [all_dbs]
- ),
- ?EVENT(lru_init, nil),
- {ok, #st{
- pids = Pids,
- dbs = Dbs,
- evictor = Evictor
- }}.
-
-
-terminate(_Reason, St) ->
- case is_pid(St#st.evictor) of
- true -> exit(St#st.evictor, kill);
- false -> ok
- end,
- ok.
-
-
-handle_call({start, Key, Default}, _From, St) ->
- #st{
- pids = Pids,
- dbs = Dbs
- } = St,
- case ets:lookup(?CACHE, Key) of
- [] ->
- MaxSize = config:get_integer("ddoc_cache", "max_size", 104857600),
- case trim(St, max(0, MaxSize)) of
- ok ->
- true = ets:insert_new(?CACHE, #entry{key = Key}),
- {ok, Pid} = ddoc_cache_entry:start_link(Key, Default),
- true = ets:update_element(?CACHE, Key, {#entry.pid, Pid}),
- ok = khash:put(Pids, Pid, Key),
- store_key(Dbs, Key, Pid),
- {reply, {ok, Pid}, St};
- full ->
- ?EVENT(full, Key),
- {reply, full, St}
- end;
- [#entry{pid = Pid}] ->
- {reply, {ok, Pid}, St}
- end;
-
-handle_call(Msg, _From, St) ->
- {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
-
-handle_cast({evict, DbName}, St) ->
- gen_server:abcast(mem3:nodes(), ?OPENER, {do_evict, DbName}),
- {noreply, St};
-
-handle_cast({refresh, DbName, DDocIds}, St) ->
- gen_server:abcast(mem3:nodes(), ?OPENER, {do_evict, DbName, DDocIds}),
- {noreply, St};
-
-handle_cast({do_evict, DbName}, St) ->
- #st{
- dbs = Dbs
- } = St,
- ToRem = case khash:lookup(Dbs, DbName) of
- {value, DDocIds} ->
- AccOut = khash:fold(DDocIds, fun(_, Keys, Acc1) ->
- khash:to_list(Keys) ++ Acc1
- end, []),
- ?EVENT(evicted, DbName),
- AccOut;
- not_found ->
- ?EVENT(evict_noop, DbName),
- []
- end,
- lists:foreach(fun({Key, Pid}) ->
- remove_entry(St, Key, Pid)
- end, ToRem),
- khash:del(Dbs, DbName),
- {noreply, St};
-
-handle_cast({do_refresh, DbName, DDocIdList}, St) ->
- #st{
- dbs = Dbs
- } = St,
- % We prepend no_ddocid to the DDocIdList below
- % so that we refresh all custom and validation
- % function entries which load data from all
- % design documents.
- case khash:lookup(Dbs, DbName) of
- {value, DDocIds} ->
- lists:foreach(fun(DDocId) ->
- case khash:lookup(DDocIds, DDocId) of
- {value, Keys} ->
- khash:fold(Keys, fun(_, Pid, _) ->
- ddoc_cache_entry:refresh(Pid)
- end, nil);
- not_found ->
- ok
- end
- end, [no_ddocid | DDocIdList]);
- not_found ->
- ok
- end,
- {noreply, St};
-
-handle_cast(Msg, St) ->
- {stop, {invalid_cast, Msg}, St}.
-
-
-handle_info({'EXIT', Pid, Reason}, #st{evictor = Pid} = St) ->
- {stop, Reason, St};
-
-handle_info({'EXIT', Pid, normal}, St) ->
- % This clause handles when an entry starts
- % up but encounters an error or uncacheable
- % response from its recover call.
- #st{
- pids = Pids
- } = St,
- {value, Key} = khash:lookup(Pids, Pid),
- khash:del(Pids, Pid),
- remove_key(St, Key),
- {noreply, St};
-
-handle_info(Msg, St) ->
- {stop, {invalid_info, Msg}, St}.
-
-
-code_change(_OldVsn, St, _Extra) ->
- {ok, St}.
-
-
-handle_db_event(ShardDbName, created, St) ->
- gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
- {ok, St};
-
-handle_db_event(ShardDbName, deleted, St) ->
- gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
- {ok, St};
-
-handle_db_event(_DbName, _Event, St) ->
- {ok, St}.
-
-
-lru_start(Key, DoInsert) ->
- case gen_server:call(?MODULE, {start, Key, undefined}, infinity) of
- {ok, Pid} ->
- couch_stats:increment_counter([ddoc_cache, miss]),
- Resp = ddoc_cache_entry:open(Pid, Key),
- if not DoInsert -> ok; true ->
- ddoc_cache_entry:insert(Key, Resp)
- end,
- Resp;
- full ->
- couch_stats:increment_counter([ddoc_cache, recovery]),
- ddoc_cache_entry:recover(Key)
- end.
-
-
-trim(_, 0) ->
- full;
-
-trim(St, MaxSize) ->
- CurSize = ets:info(?CACHE, memory) * erlang:system_info(wordsize),
- if CurSize =< MaxSize -> ok; true ->
- case ets:first(?LRU) of
- {_Ts, Key, Pid} ->
- remove_entry(St, Key, Pid),
- trim(St, MaxSize);
- '$end_of_table' ->
- full
- end
- end.
-
-
-remove_entry(St, Key, Pid) ->
- #st{
- pids = Pids
- } = St,
- unlink_and_flush(Pid),
- ddoc_cache_entry:shutdown(Pid),
- khash:del(Pids, Pid),
- remove_key(St, Key).
-
-
-store_key(Dbs, Key, Pid) ->
- DbName = ddoc_cache_entry:dbname(Key),
- DDocId = ddoc_cache_entry:ddocid(Key),
- case khash:lookup(Dbs, DbName) of
- {value, DDocIds} ->
- case khash:lookup(DDocIds, DDocId) of
- {value, Keys} ->
- khash:put(Keys, Key, Pid);
- not_found ->
- {ok, Keys} = khash:from_list([{Key, Pid}]),
- khash:put(DDocIds, DDocId, Keys)
- end;
- not_found ->
- {ok, Keys} = khash:from_list([{Key, Pid}]),
- {ok, DDocIds} = khash:from_list([{DDocId, Keys}]),
- khash:put(Dbs, DbName, DDocIds)
- end.
-
-
-remove_key(St, Key) ->
- #st{
- dbs = Dbs
- } = St,
- DbName = ddoc_cache_entry:dbname(Key),
- DDocId = ddoc_cache_entry:ddocid(Key),
- {value, DDocIds} = khash:lookup(Dbs, DbName),
- {value, Keys} = khash:lookup(DDocIds, DDocId),
- khash:del(Keys, Key),
- case khash:size(Keys) of
- 0 -> khash:del(DDocIds, DDocId);
- _ -> ok
- end,
- case khash:size(DDocIds) of
- 0 -> khash:del(Dbs, DbName);
- _ -> ok
- end.
-
-
-unlink_and_flush(Pid) ->
- erlang:unlink(Pid),
- % Its possible that the entry process has already exited before
- % we unlink it so we have to flush out a possible 'EXIT'
- % message sitting in our message queue. Notice that we're
- % maintaining the assertion that all entry processes only
- % ever exit normally.
- receive
- {'EXIT', Pid, normal} ->
- ok
- after 0 ->
- ok
- end.
diff --git a/src/ddoc_cache/src/ddoc_cache_opener.erl b/src/ddoc_cache/src/ddoc_cache_opener.erl
deleted file mode 100644
index 52de54217..000000000
--- a/src/ddoc_cache/src/ddoc_cache_opener.erl
+++ /dev/null
@@ -1,66 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_opener).
--behaviour(gen_server).
--vsn(1).
-
-
--export([
- start_link/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-init(_) ->
- {ok, nil}.
-
-terminate(_Reason, _St) ->
- ok.
-
-
-handle_call(Msg, _From, St) ->
- {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
-
-% The do_evict clauses are upgrades while we're
-% in a rolling reboot.
-handle_cast({do_evict, _} = Msg, St) ->
- gen_server:cast(ddoc_cache_lru, Msg),
- {noreply, St};
-
-handle_cast({do_evict, DbName, DDocIds}, St) ->
- gen_server:cast(ddoc_cache_lru, {do_refresh, DbName, DDocIds}),
- {noreply, St};
-
-handle_cast(Msg, St) ->
- {stop, {invalid_cast, Msg}, St}.
-
-
-handle_info(Msg, St) ->
- {stop, {invalid_info, Msg}, St}.
-
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/src/ddoc_cache/src/ddoc_cache_sup.erl b/src/ddoc_cache/src/ddoc_cache_sup.erl
deleted file mode 100644
index 6fff9ef4f..000000000
--- a/src/ddoc_cache/src/ddoc_cache_sup.erl
+++ /dev/null
@@ -1,46 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_sup).
--behaviour(supervisor).
-
-
--export([
- start_link/0,
- init/1
-]).
-
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-
-init([]) ->
- Children = [
- {
- ddoc_cache_lru,
- {ddoc_cache_lru, start_link, []},
- permanent,
- 5000,
- worker,
- [ddoc_cache_lru]
- },
- {
- ddoc_cache_opener,
- {ddoc_cache_opener, start_link, []},
- permanent,
- 5000,
- worker,
- [ddoc_cache_opener]
- }
- ],
- {ok, {{one_for_one, 25, 1}, Children}}.
diff --git a/src/ddoc_cache/src/ddoc_cache_value.erl b/src/ddoc_cache/src/ddoc_cache_value.erl
deleted file mode 100644
index 21a5bb549..000000000
--- a/src/ddoc_cache/src/ddoc_cache_value.erl
+++ /dev/null
@@ -1,27 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_value).
-
-
--export([
- wrap/1,
- unwrap/1
-]).
-
-
-wrap(Value) ->
- {?MODULE, term_to_binary(Value)}.
-
-
-unwrap({?MODULE, Bin}) when is_binary(Bin) ->
- binary_to_term(Bin).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl
deleted file mode 100644
index b576d88bb..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl
+++ /dev/null
@@ -1,175 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_basic_test).
-
-
--export([
- recover/1
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-
-recover(DbName) ->
- {ok, {DbName, totes_custom}}.
-
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:new(ddoc_cache_ev, [passthrough]),
- Ctx.
-
-
-stop_couch(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-
-check_basic_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"cache_ddoc", fun cache_ddoc/1},
- {"cache_ddoc_rev", fun cache_ddoc_rev/1},
- {"cache_vdu", fun cache_vdu/1},
- {"cache_custom", fun cache_custom/1},
- {"cache_ddoc_refresher_unchanged", fun cache_ddoc_refresher_unchanged/1},
- {"dont_cache_not_found", fun dont_cache_not_found/1},
- {"deprecated_api_works", fun deprecated_api_works/1}
- ])
- }.
-
-
-check_no_vdu_test_() ->
- {
- setup,
- fun() -> ddoc_cache_tutil:start_couch([{write_ddocs, false}]) end,
- fun ddoc_cache_tutil:stop_couch/1,
- ddoc_cache_tutil:with([
- {"cache_no_vdu_no_ddoc", fun cache_no_vdu_no_ddoc/1},
- {"cache_no_vdu_empty_ddoc", fun cache_no_vdu_empty_ddoc/1}
- ])
- }.
-
-
-cache_ddoc({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- ?assertEqual(0, ets:info(?CACHE, size)),
- Resp1 = ddoc_cache:open_doc(DbName, ?FOOBAR),
- ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp1),
- meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
- ?assertEqual(2, ets:info(?CACHE, size)),
- Resp2 = ddoc_cache:open_doc(DbName, ?FOOBAR),
- ?assertEqual(Resp1, Resp2),
- ?assertEqual(2, ets:info(?CACHE, size)).
-
-
-cache_ddoc_rev({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
- ?assertEqual(0, ets:info(?CACHE, size)),
- Resp1 = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
- ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp1),
- meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
- ?assertEqual(2, ets:info(?CACHE, size)),
- Resp2 = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
- ?assertEqual(Resp1, Resp2),
- ?assertEqual(2, ets:info(?CACHE, size)),
-
- % Assert that the non-rev cache entry is separate
- Resp3 = ddoc_cache:open_doc(DbName, ?FOOBAR),
- ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp3),
- ?assertEqual(2, ets:info(?CACHE, size)).
-
-
-cache_vdu({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- ?assertEqual(0, ets:info(?CACHE, size)),
- Resp1 = ddoc_cache:open_validation_funs(DbName),
- ?assertMatch({ok, [_]}, Resp1),
- ?assertEqual(1, ets:info(?CACHE, size)),
- Resp2 = ddoc_cache:open_validation_funs(DbName),
- ?assertEqual(Resp1, Resp2),
- ?assertEqual(1, ets:info(?CACHE, size)).
-
-
-cache_custom({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- ?assertEqual(0, ets:info(?CACHE, size)),
- Resp1 = ddoc_cache:open_custom(DbName, ?MODULE),
- ?assertMatch({ok, {DbName, totes_custom}}, Resp1),
- ?assertEqual(1, ets:info(?CACHE, size)),
- Resp2 = ddoc_cache:open_custom(DbName, ?MODULE),
- ?assertEqual(Resp1, Resp2),
- ?assertEqual(1, ets:info(?CACHE, size)).
-
-
-cache_ddoc_refresher_unchanged({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- ?assertEqual(0, ets:info(?CACHE, size)),
- ddoc_cache:open_doc(DbName, ?FOOBAR),
- meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
- Tab1 = [_, _] = lists:sort(ets:tab2list(?CACHE)),
- ddoc_cache:open_doc(DbName, ?FOOBAR),
- meck:wait(ddoc_cache_ev, event, [accessed, '_'], 1000),
- Tab2 = lists:sort(ets:tab2list(?CACHE)),
- ?assertEqual(Tab2, Tab1).
-
-
-dont_cache_not_found({DbName, _}) ->
- DDocId = <<"_design/not_found">>,
- ddoc_cache_tutil:clear(),
- Resp = ddoc_cache:open_doc(DbName, DDocId),
- ?assertEqual({not_found, missing}, Resp),
- ?assertEqual(0, ets:info(?CACHE, size)),
- ?assertEqual(0, ets:info(?LRU, size)).
-
-
-deprecated_api_works({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- {ok, _} = ddoc_cache:open(DbName, ?FOOBAR),
- {ok, _} = ddoc_cache:open(DbName, <<"foobar">>),
- {ok, _} = ddoc_cache:open(DbName, ?MODULE),
- {ok, _} = ddoc_cache:open(DbName, validation_funs).
-
-
-cache_no_vdu_no_ddoc({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- Resp = ddoc_cache:open_validation_funs(DbName),
- ?assertEqual({ok, []}, Resp),
- ?assertEqual(1, ets:info(?CACHE, size)),
- ?assertEqual(1, ets:info(?LRU, size)).
-
-
-cache_no_vdu_empty_ddoc({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- DDoc = #doc{
- id = <<"_design/no_vdu">>,
- body = {[]}
- },
- {ok, _} = fabric:update_docs(DbName, [DDoc], [?ADMIN_CTX]),
- Resp = ddoc_cache:open_validation_funs(DbName),
- ?assertEqual({ok, []}, Resp),
- ?assertEqual(1, ets:info(?CACHE, size)),
- ?assertEqual(1, ets:info(?LRU, size)).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl
deleted file mode 100644
index b1a185bdc..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl
+++ /dev/null
@@ -1,77 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_coverage_test).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-
-coverage_test_() ->
- {
- setup,
- fun ddoc_cache_tutil:start_couch/0,
- fun ddoc_cache_tutil:stop_couch/1,
- [
- fun restart_lru/0,
- fun stop_on_evictor_death/0
- ]
- }.
-
-
-restart_lru() ->
- send_bad_messages(ddoc_cache_lru),
- ?assertEqual(ok, ddoc_cache_lru:terminate(bang, {st, a, b, c})),
- ?assertEqual({ok, foo}, ddoc_cache_lru:code_change(1, foo, [])).
-
-
-stop_on_evictor_death() ->
- meck:new(ddoc_cache_ev, [passthrough]),
- try
- Lru = whereis(ddoc_cache_lru),
- State = sys:get_state(Lru),
- Evictor = element(4, State),
- Ref = erlang:monitor(process, Lru),
- exit(Evictor, shutdown),
- receive
- {'DOWN', Ref, _, _, Reason} ->
- ?assertEqual(shutdown, Reason)
- end,
- meck:wait(ddoc_cache_ev, event, [lru_init, '_'], 1000),
- ?assert(whereis(ddoc_cache_lru) /= Lru)
- after
- meck:unload()
- end.
-
-
-send_bad_messages(Name) ->
- wait_for_restart(Name, fun() ->
- ?assertEqual({invalid_call, foo}, gen_server:call(Name, foo))
- end),
- wait_for_restart(Name, fun() ->
- gen_server:cast(Name, foo)
- end),
- wait_for_restart(Name, fun() ->
- whereis(Name) ! foo
- end).
-
-
-wait_for_restart(Server, Fun) ->
- Ref = erlang:monitor(process, whereis(Server)),
- Fun(),
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
- end,
- ?assert(is_pid(test_util:wait_process(Server))).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl
deleted file mode 100644
index d46bdde32..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl
+++ /dev/null
@@ -1,62 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_disabled_test).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- config:set("ddoc_cache", "max_size", "0", false),
- Ctx.
-
-
-check_disabled_test_() ->
- {
- setup,
- fun start_couch/0,
- fun ddoc_cache_tutil:stop_couch/1,
- ddoc_cache_tutil:with([
- {"resp_ok", fun resp_ok/1},
- {"resp_not_found", fun resp_not_found/1},
- {"check_effectively_disabled", fun check_effectively_disabled/1}
- ])
- }.
-
-
-resp_ok({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- Resp = ddoc_cache:open_doc(DbName, ?FOOBAR),
- ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp),
- ?assertEqual(0, ets:info(?CACHE, size)),
- ?assertEqual(0, ets:info(?LRU, size)).
-
-
-resp_not_found({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- Resp = ddoc_cache:open_doc(DbName, <<"_design/not_found">>),
- ?assertEqual({not_found, missing}, Resp),
- ?assertEqual(0, ets:info(?CACHE, size)),
- ?assertEqual(0, ets:info(?LRU, size)).
-
-
-check_effectively_disabled({DbName, _}) ->
- config:set("ddoc_cache", "max_size", "1", false),
- ddoc_cache_tutil:clear(),
- Resp = ddoc_cache:open_doc(DbName, ?FOOBAR),
- ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp),
- ?assertEqual(0, ets:info(?CACHE, size)),
- ?assertEqual(0, ets:info(?LRU, size)).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl
deleted file mode 100644
index c992bea8d..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl
+++ /dev/null
@@ -1,159 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_entry_test).
-
-
--export([
- recover/1
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-
-recover(<<"foo">>) ->
- timer:sleep(30000);
-
-recover(DbName) ->
- {ok, {DbName, such_custom}}.
-
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:new(ddoc_cache_ev, [passthrough]),
- Ctx.
-
-
-stop_couch(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-
-check_entry_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"cancel_and_replace_opener", fun cancel_and_replace_opener/1},
- {"condenses_access_messages", fun condenses_access_messages/1},
- {"kill_opener_on_terminate", fun kill_opener_on_terminate/1},
- {"evict_when_not_accessed", fun evict_when_not_accessed/1},
- {"open_dead_entry", fun open_dead_entry/1},
- {"handles_bad_messages", fun handles_bad_messages/1},
- {"handles_code_change", fun handles_code_change/1}
- ])
- }.
-
-
-cancel_and_replace_opener(_) ->
- Key = {ddoc_cache_entry_custom, {<<"foo">>, ?MODULE}},
- true = ets:insert_new(?CACHE, #entry{key = Key}),
- {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined),
- Opener1 = element(4, sys:get_state(Entry)),
- Ref1 = erlang:monitor(process, Opener1),
- gen_server:cast(Entry, force_refresh),
- receive {'DOWN', Ref1, _, _, _} -> ok end,
- Opener2 = element(4, sys:get_state(Entry)),
- ?assert(Opener2 /= Opener1),
- ?assert(is_process_alive(Opener2)),
- % Clean up after ourselves
- unlink(Entry),
- ddoc_cache_entry:shutdown(Entry).
-
-
-condenses_access_messages({DbName, _}) ->
- meck:reset(ddoc_cache_ev),
- Key = {ddoc_cache_entry_custom, {DbName, ?MODULE}},
- true = ets:insert(?CACHE, #entry{key = Key}),
- {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined),
- erlang:suspend_process(Entry),
- lists:foreach(fun(_) ->
- gen_server:cast(Entry, accessed)
- end, lists:seq(1, 100)),
- erlang:resume_process(Entry),
- meck:wait(1, ddoc_cache_ev, event, [accessed, Key], 1000),
- ?assertError(
- timeout,
- meck:wait(2, ddoc_cache_ev, event, [accessed, Key], 100)
- ),
- unlink(Entry),
- ddoc_cache_entry:shutdown(Entry).
-
-
-kill_opener_on_terminate(_) ->
- Pid = spawn(fun() -> receive _ -> ok end end),
- ?assert(is_process_alive(Pid)),
- St = {st, key, val, Pid, waiters, ts, accessed},
- ?assertEqual(ok, ddoc_cache_entry:terminate(normal, St)),
- ?assert(not is_process_alive(Pid)).
-
-
-evict_when_not_accessed(_) ->
- meck:reset(ddoc_cache_ev),
- Key = {ddoc_cache_entry_custom, {<<"bar">>, ?MODULE}},
- true = ets:insert_new(?CACHE, #entry{key = Key}),
- {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined),
- Ref = erlang:monitor(process, Entry),
- AccessCount1 = element(7, sys:get_state(Entry)),
- ?assertEqual(1, AccessCount1),
- ok = gen_server:cast(Entry, refresh),
-
- meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000),
-
- AccessCount2 = element(7, sys:get_state(Entry)),
- ?assertEqual(0, AccessCount2),
- ok = gen_server:cast(Entry, refresh),
- receive {'DOWN', Ref, _, _, Reason} -> Reason end,
- ?assertEqual(normal, Reason),
- ?assertEqual(0, ets:info(?CACHE, size)).
-
-
-open_dead_entry({DbName, _}) ->
- Pid = spawn(fun() -> ok end),
- Key = {ddoc_cache_entry_custom, {DbName, ?MODULE}},
- ?assertEqual(recover(DbName), ddoc_cache_entry:open(Pid, Key)).
-
-
-handles_bad_messages(_) ->
- CallExpect = {stop, {bad_call, foo}, {bad_call, foo}, baz},
- CastExpect = {stop, {bad_cast, foo}, bar},
- InfoExpect = {stop, {bad_info, foo}, bar},
- ?assertEqual(CallExpect, ddoc_cache_entry:handle_call(foo, bar, baz)),
- ?assertEqual(CastExpect, ddoc_cache_entry:handle_cast(foo, bar)),
- ?assertEqual(InfoExpect, ddoc_cache_entry:handle_info(foo, bar)).
-
-
-handles_code_change(_) ->
- CCExpect = {ok, bar},
- ?assertEqual(CCExpect, ddoc_cache_entry:code_change(foo, bar, baz)).
-
-
-handles_bad_shutdown_test_() ->
- {timeout, 10, ?_test(begin
- ErrorPid = spawn(fun() ->
- receive
- _ -> exit(bad_shutdown)
- end
- end),
- ?assertExit(bad_shutdown, ddoc_cache_entry:shutdown(ErrorPid)),
- NotDeadYetPid = spawn(fun() ->
- timer:sleep(infinity)
- end),
- ?assertExit(
- {timeout, {entry_shutdown, NotDeadYetPid}},
- ddoc_cache_entry:shutdown(NotDeadYetPid)
- )
- end)}.
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_ev.erl b/src/ddoc_cache/test/eunit/ddoc_cache_ev.erl
deleted file mode 100644
index a451342cf..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_ev.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_ev).
-
--export([
- event/2
-]).
-
-
-event(Name, Arg) ->
- couch_log:error("~s :: ~s :: ~p", [?MODULE, Name, Arg]).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl
deleted file mode 100644
index bd61afc37..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl
+++ /dev/null
@@ -1,96 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_eviction_test).
-
-
--export([
- recover/1
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include_lib("mem3/include/mem3.hrl").
--include("ddoc_cache_test.hrl").
-
-
-recover(DbName) ->
- {ok, {DbName, totes_custom}}.
-
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:new(ddoc_cache_ev, [passthrough]),
- Ctx.
-
-
-stop_couch(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-
-check_eviction_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"evict_all", fun evict_all/1},
- {"dont_evict_all_unrelated", fun dont_evict_all_unrelated/1},
- {"check_upgrade_clause", fun check_upgrade_clause/1}
- ])
- }.
-
-
-evict_all({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
- #shard{name = ShardName} = hd(mem3:shards(DbName)),
- {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
- {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
- {ok, _} = ddoc_cache:open_validation_funs(DbName),
- {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE),
- ?assertEqual(4, ets:info(?CACHE, size)),
- {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo),
- meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000),
- meck:wait(4, ddoc_cache_ev, event, [removed, '_'], 1000),
- ?assertEqual(0, ets:info(?CACHE, size)).
-
-
-dont_evict_all_unrelated({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
- {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
- {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
- {ok, _} = ddoc_cache:open_validation_funs(DbName),
- {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE),
- ?assertEqual(4, ets:info(?CACHE, size)),
- ShardName = <<"shards/00000000-ffffffff/test.1384769918">>,
- {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo),
- meck:wait(ddoc_cache_ev, event, [evict_noop, <<"test">>], 1000),
- ?assertEqual(4, ets:info(?CACHE, size)).
-
-
-check_upgrade_clause({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
- meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
- ?assertEqual(2, ets:info(?CACHE, size)),
- gen_server:cast(ddoc_cache_opener, {do_evict, DbName}),
- meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000),
- meck:wait(2, ddoc_cache_ev, event, [removed, '_'], 1000),
- ?assertEqual(0, ets:info(?CACHE, size)).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl
deleted file mode 100644
index 9a5391587..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl
+++ /dev/null
@@ -1,219 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_lru_test).
-
-
--export([
- recover/1
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-
-recover(<<"pause", _/binary>>) ->
- receive go -> ok end,
- {ok, paused};
-
-recover(<<"big", _/binary>>) ->
- {ok, [couch_rand:uniform() || _ <- lists:seq(1, 8192)]};
-
-recover(DbName) ->
- {ok, DbName}.
-
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:new(ddoc_cache_ev, [passthrough]),
- Ctx.
-
-
-stop_couch(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-
-check_not_started_test() ->
- % Starting couch, but not ddoc_cache
- {
- setup,
- fun test_util:start_couch/0,
- fun test_util:stop_couch/1,
- [
- fun(_) ->
- Key = {ddoc_cache_entry_custom, {<<"dbname">>, ?MODULE}},
- ?assertEqual({ok, <<"dbname">>}, ddoc_cache_lru:open(Key))
- end
- ]
- }.
-
-check_lru_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"check_multi_start", fun check_multi_start/1},
- {"check_multi_open", fun check_multi_open/1},
- {"check_capped_size", fun check_capped_size/1},
- {"check_cache_refill", fun check_cache_refill/1},
- {"check_evict_and_exit", fun check_evict_and_exit/1}
- ])
- }.
-
-
-check_multi_start(_) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Key = {ddoc_cache_entry_custom, {<<"pause">>, ?MODULE}},
- % These will all get sent through ddoc_cache_lru
- Clients = lists:map(fun(_) ->
- spawn_monitor(fun() ->
- ddoc_cache_lru:open(Key)
- end)
- end, lists:seq(1, 10)),
- meck:wait(ddoc_cache_ev, event, [started, Key], 1000),
- lists:foreach(fun({Pid, _Ref}) ->
- ?assert(is_process_alive(Pid))
- end, Clients),
- [#entry{pid = Pid}] = ets:tab2list(?CACHE),
- Opener = element(4, sys:get_state(Pid)),
- OpenerRef = erlang:monitor(process, Opener),
- ?assert(is_process_alive(Opener)),
- Opener ! go,
- receive {'DOWN', OpenerRef, _, _, _} -> ok end,
- lists:foreach(fun({_, Ref}) ->
- receive
- {'DOWN', Ref, _, _, normal} -> ok
- end
- end, Clients).
-
-
-check_multi_open(_) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Key = {ddoc_cache_entry_custom, {<<"pause">>, ?MODULE}},
- % We wait after the first client so that
- % the rest of the clients go directly to
- % ddoc_cache_entry bypassing ddoc_cache_lru
- Client1 = spawn_monitor(fun() ->
- ddoc_cache_lru:open(Key)
- end),
- meck:wait(ddoc_cache_ev, event, [started, Key], 1000),
- Clients = [Client1] ++ lists:map(fun(_) ->
- spawn_monitor(fun() ->
- ddoc_cache_lru:open(Key)
- end)
- end, lists:seq(1, 9)),
- lists:foreach(fun({Pid, _Ref}) ->
- ?assert(is_process_alive(Pid))
- end, Clients),
- [#entry{pid = Pid}] = ets:tab2list(?CACHE),
- Opener = element(4, sys:get_state(Pid)),
- OpenerRef = erlang:monitor(process, Opener),
- ?assert(is_process_alive(Opener)),
- Opener ! go,
- receive {'DOWN', OpenerRef, _, _, _} -> ok end,
- lists:foreach(fun({_, Ref}) ->
- receive {'DOWN', Ref, _, _, normal} -> ok end
- end, Clients).
-
-
-check_capped_size(_) ->
- % The extra factor of two in the size checks is
- % a fudge factor. We don't reject entries from
- % the cache if they would put us over the limit
- % as we don't have the size information a
- % priori.
- config:set("ddoc_cache", "max_size", "1048576", false),
- MaxSize = 1048576,
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- lists:foreach(fun(I) ->
- DbName = list_to_binary("big_" ++ integer_to_list(I)),
- ddoc_cache:open_custom(DbName, ?MODULE),
- meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000),
- ?assert(cache_size() < MaxSize * 2)
- end, lists:seq(1, 25)),
- lists:foreach(fun(I) ->
- DbName = list_to_binary("big_" ++ integer_to_list(I)),
- ddoc_cache:open_custom(DbName, ?MODULE),
- meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000),
- ?assert(cache_size() < MaxSize * 2)
- end, lists:seq(26, 100)).
-
-
-check_cache_refill({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
-
- InitDDoc = fun(I) ->
- NumBin = list_to_binary(integer_to_list(I)),
- DDocId = <<"_design/", NumBin/binary>>,
- Doc = #doc{id = DDocId, body = {[]}},
- {ok, _} = fabric:update_doc(DbName, Doc, [?ADMIN_CTX]),
- {ok, _} = ddoc_cache:open_doc(DbName, DDocId),
- {ddoc_cache_entry_ddocid, {DbName, DDocId}}
- end,
-
- lists:foreach(fun(I) ->
- Key = InitDDoc(I),
- meck:wait(ddoc_cache_ev, event, [started, Key], 1000)
- end, lists:seq(1, 5)),
-
- ShardName = mem3:name(hd(mem3:shards(DbName))),
- {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo),
- meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000),
- meck:wait(10, ddoc_cache_ev, event, [removed, '_'], 1000),
- ?assertEqual(0, ets:info(?CACHE, size)),
-
- lists:foreach(fun(I) ->
- Key = InitDDoc(I),
- meck:wait(ddoc_cache_ev, event, [started, Key], 1000)
- end, lists:seq(6, 10)).
-
-
-check_evict_and_exit(_) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
-
- Key = {ddoc_cache_entry_custom, {<<"dbname">>, ?MODULE}},
- ?assertEqual({ok, <<"dbname">>}, ddoc_cache_lru:open(Key)),
- [#entry{key = Key, pid = Pid}] = ets:tab2list(?CACHE),
-
- erlang:monitor(process, whereis(ddoc_cache_lru)),
-
- % Pause the LRU so we can queue multiple messages
- erlang:suspend_process(whereis(ddoc_cache_lru)),
-
- gen_server:cast(ddoc_cache_lru, {do_evict, <<"dbname">>}),
- whereis(ddoc_cache_lru) ! {'EXIT', Pid, normal},
-
- % Resume the LRU and ensure that it doesn't die
- erlang:resume_process(whereis(ddoc_cache_lru)),
-
- meck:wait(ddoc_cache_ev, event, [evicted, <<"dbname">>], 1000),
-
- % Make sure it can handle another message
- OtherKey = {ddoc_cache_entry_custom, {<<"otherdb">>, ?MODULE}},
- ?assertEqual({ok, <<"otherdb">>}, ddoc_cache_lru:open(OtherKey)),
-
- % And verify our monitor doesn't fire
- timer:sleep(500),
- ?assertEqual({messages, []}, process_info(self(), messages)).
-
-
-cache_size() ->
- ets:info(?CACHE, memory) * erlang:system_info(wordsize).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl
deleted file mode 100644
index 96682910c..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl
+++ /dev/null
@@ -1,87 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_no_cache_test).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-
-ddoc(DDocId) ->
- {ok, #doc{
- id = DDocId,
- revs = {1, [<<"deadbeefdeadbeef">>]},
- body = {[
- {<<"ohai">>, null}
- ]}
- }}.
-
-
-not_found(_DDocId) ->
- {not_found, missing}.
-
-
-return_error(_DDocId) ->
- {error, timeout}.
-
-
-no_cache_test_() ->
- {
- "ddoc_cache no cache test",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreachx,
- fun setup/1,
- fun teardown/2,
- [
- {fun ddoc/1, fun no_cache_open_ok_test/2},
- {fun not_found/1, fun no_cache_open_not_found_test/2},
- {fun return_error/1, fun no_cache_open_error_test/2}
- ]
- }
- }
- }.
-
-setup_all() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:new(fabric),
- Ctx.
-
-teardown_all(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-setup(Resp) ->
- meck:expect(fabric, open_doc, fun(_, DDocId, _) ->
- Resp(DDocId)
- end).
-
-teardown(_, _) ->
- meck:unload().
-
-no_cache_open_ok_test(_, _) ->
- Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>),
- ?_assertEqual(ddoc(<<"bar">>), Resp).
-
-
-no_cache_open_not_found_test(_, _) ->
- Resp = ddoc_cache:open_doc(<<"foo">>, <<"baz">>),
- ?_assertEqual(not_found(<<"baz">>), Resp).
-
-
-no_cache_open_error_test(_, _) ->
- Resp = ddoc_cache:open_doc(<<"foo">>, <<"bif">>),
- ?_assertEqual(return_error(<<"bif">>), Resp).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl
deleted file mode 100644
index c7379d26a..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl
+++ /dev/null
@@ -1,46 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_open_error_test).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:expect(fabric, open_doc, fun(_, ?FOOBAR, _) ->
- erlang:error(test_kaboom)
- end),
- Ctx.
-
-
-stop_couch(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-
-check_open_error_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"handle_open_error", fun handle_open_error/1}
- ])
- }.
-
-
-handle_open_error({DbName, _}) ->
- ?assertError(test_kaboom, ddoc_cache:open_doc(DbName, ?FOOBAR)).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl
deleted file mode 100644
index 73d644f71..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl
+++ /dev/null
@@ -1,107 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_open_test).
-
--export([
- dbname/1,
- ddocid/1,
- recover/1,
- insert/2
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-
-%% behaviour callbacks
-dbname(DbName) ->
- DbName.
-
-
-ddocid(_) ->
- no_ddocid.
-
-
-recover({deleted, _DbName}) ->
- erlang:error(database_does_not_exist);
-recover(DbName) ->
- ddoc_cache_entry_validation_funs:recover(DbName).
-
-
-insert(_, _) ->
- ok.
-
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:new(ddoc_cache_entry_validation_funs, [passthrough]),
- meck:expect(ddoc_cache_entry_validation_funs, recover,
- ['_'], meck:passthrough()),
- Ctx.
-
-
-stop_couch(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-
-check_open_error_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"should_return_database_does_not_exist",
- fun should_return_database_does_not_exist/1},
- {"should_not_call_recover_when_database_does_not_exist",
- fun should_not_call_recover_when_database_does_not_exist/1},
- {"should_call_recover_when_needed",
- fun should_call_recover_when_needed/1},
- {"should_call_recover_when_needed",
- fun should_not_crash_lru_process/1}
- ])
- }.
-
-
-should_return_database_does_not_exist({DbName, _}) ->
- ?assertError(
- database_does_not_exist,
- ddoc_cache_lru:open({?MODULE, {deleted, DbName}})).
-
-
-should_not_call_recover_when_database_does_not_exist({DbName, _}) ->
- meck:reset(ddoc_cache_entry_validation_funs),
- ?assertError(
- database_does_not_exist,
- ddoc_cache_lru:open({?MODULE, {deleted, DbName}})),
- ?assertError(
- timeout,
- meck:wait(1, ddoc_cache_entry_validation_funs, recover, '_', 100)).
-
-
-should_call_recover_when_needed({DbName, _}) ->
- meck:reset(ddoc_cache_entry_validation_funs),
- ddoc_cache_lru:open({?MODULE, DbName}),
- ?assertEqual(
- ok,
- meck:wait(1, ddoc_cache_entry_validation_funs, recover, '_', 500)).
-
-
-should_not_crash_lru_process({DbName, _}) ->
- LRUPid = whereis(ddoc_cache_lru),
- ?assert(is_process_alive(LRUPid)),
- ?assertError(
- database_does_not_exist,
- ddoc_cache_lru:open({?MODULE, {deleted, DbName}})),
- ?assert(is_process_alive(LRUPid)).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl
deleted file mode 100644
index c3846360c..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl
+++ /dev/null
@@ -1,33 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_opener_test).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-
-empty_hull_test() ->
- InitExpect = {ok, nil},
- TermExpect = ok,
- CallExpect = {stop, {invalid_call, foo}, {invalid_call, foo}, baz},
- CastExpect = {stop, {invalid_cast, foo}, bar},
- InfoExpect = {stop, {invalid_info, foo}, bar},
- CCExpect = {ok, bar},
- ?assertEqual(InitExpect, ddoc_cache_opener:init(foo)),
- ?assertEqual(TermExpect, ddoc_cache_opener:terminate(foo, bar)),
- ?assertEqual(CallExpect, ddoc_cache_opener:handle_call(foo, bar, baz)),
- ?assertEqual(CastExpect, ddoc_cache_opener:handle_cast(foo, bar)),
- ?assertEqual(InfoExpect, ddoc_cache_opener:handle_info(foo, bar)),
- ?assertEqual(CCExpect, ddoc_cache_opener:code_change(foo, bar, baz)).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl
deleted file mode 100644
index 24ae346d4..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl
+++ /dev/null
@@ -1,174 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_refresh_test).
-
-
--export([
- recover/1
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-
-recover(DbName) ->
- {ok, {DbName, rand_string()}}.
-
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:new(ddoc_cache_ev, [passthrough]),
- Ctx.
-
-
-stop_couch(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-
-check_refresh_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"refresh_ddoc", fun refresh_ddoc/1},
- {"refresh_ddoc_rev", fun refresh_ddoc_rev/1},
- {"refresh_vdu", fun refresh_vdu/1},
- {"refresh_custom", fun refresh_custom/1},
- {"refresh_multiple", fun refresh_multiple/1},
- {"check_upgrade_clause", fun check_upgrade_clause/1}
- ])
- }.
-
-
-refresh_ddoc({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
- meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
-
- ?assertEqual(2, ets:info(?CACHE, size)),
- [#entry{key = Key, val = DDoc}, _] = lists:sort(ets:tab2list(?CACHE)),
- NewDDoc = DDoc#doc{
- body = {[{<<"foo">>, <<"baz">>}]}
- },
- {ok, {Depth, RevId}} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
- Expect = NewDDoc#doc{
- revs = {Depth, [RevId | element(2, DDoc#doc.revs)]}
- },
- meck:wait(ddoc_cache_ev, event, [updated, {Key, Expect}], 1000),
- ?assertMatch({ok, Expect}, ddoc_cache:open_doc(DbName, ?FOOBAR)),
- ?assertEqual(2, ets:info(?CACHE, size)).
-
-
-refresh_ddoc_rev({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
- {ok, RevDDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
-
- meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
-
- [_, #entry{key = Key, val = DDoc}] = lists:sort(ets:tab2list(?CACHE)),
- NewDDoc = DDoc#doc{
- body = {[{<<"foo">>, <<"kazam">>}]}
- },
- {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
- % We pass the rev explicitly so we assert that we're
- % getting the same original response from the cache
- meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000),
- ?assertMatch({ok, RevDDoc}, ddoc_cache:open_doc(DbName, ?FOOBAR, Rev)),
- ?assertEqual(2, ets:info(?CACHE, size)).
-
-
-refresh_vdu({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- {ok, [_]} = ddoc_cache:open_validation_funs(DbName),
- [#entry{key = Key}] = ets:tab2list(?CACHE),
- {ok, DDoc} = fabric:open_doc(DbName, ?VDU, [?ADMIN_CTX]),
- {ok, _} = fabric:update_doc(DbName, DDoc#doc{body = {[]}}, [?ADMIN_CTX]),
- meck:wait(ddoc_cache_ev, event, [updated, {Key, []}], 1000),
- ?assertMatch({ok, []}, ddoc_cache:open_validation_funs(DbName)),
- ?assertEqual(1, ets:info(?CACHE, size)).
-
-
-refresh_custom({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- {ok, Resp1} = ddoc_cache:open_custom(DbName, ?MODULE),
- {ok, DDoc} = fabric:open_doc(DbName, ?VDU, [?CUSTOM]),
- {ok, _} = fabric:update_doc(DbName, DDoc#doc{body = {[]}}, [?ADMIN_CTX]),
- meck:wait(ddoc_cache_ev, event, [updated, '_'], 1000),
- ?assertNotEqual({ok, Resp1}, ddoc_cache:open_custom(DbName, ?MODULE)),
- ?assertEqual(1, ets:info(?CACHE, size)).
-
-
-refresh_multiple({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
- {ok, DDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR),
- {ok, DDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
- ?assertEqual(2, ets:info(?CACHE, size)),
- % Relying on the sort order of entry keys to make
- % sure our entries line up for this test
- [
- #entry{key = NoRevKey, val = DDoc},
- #entry{key = RevKey, val = DDoc}
- ] = lists:sort(ets:tab2list(?CACHE)),
- NewDDoc = DDoc#doc{
- body = {[{<<"foo">>, <<"kalamazoo">>}]}
- },
- {ok, {Depth, RevId}} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
- Updated = NewDDoc#doc{
- revs = {Depth, [RevId | element(2, DDoc#doc.revs)]}
- },
- meck:wait(ddoc_cache_ev, event, [update_noop, RevKey], 1000),
- meck:wait(ddoc_cache_ev, event, [updated, {NoRevKey, Updated}], 1000),
- % We pass the rev explicitly so we assert that we're
- % getting the same original response from the cache
- ?assertEqual({ok, Updated}, ddoc_cache:open_doc(DbName, ?FOOBAR)),
- ?assertEqual({ok, DDoc}, ddoc_cache:open_doc(DbName, ?FOOBAR, Rev)),
- ?assertEqual(2, ets:info(?CACHE, size)).
-
-
-check_upgrade_clause({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
- [#entry{key = Key}] = ets:tab2list(?CACHE),
- gen_server:cast(ddoc_cache_opener, {do_evict, DbName, [?FOOBAR]}),
- meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000).
-
-
-rand_string() ->
- Bin = crypto:strong_rand_bytes(8),
- to_hex(Bin, []).
-
-
-to_hex(<<>>, Acc) ->
- list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
- to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
-
-
-hexdig(C) when C >= 0, C =< 9 ->
- C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
- C + $A - 10.
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl
deleted file mode 100644
index e40518529..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl
+++ /dev/null
@@ -1,224 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_remove_test).
-
-
--export([
- recover/1
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("eunit/include/eunit.hrl").
--include("ddoc_cache_test.hrl").
-
-
-recover(DbName) ->
- {ok, #doc{body = {Body}}} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
- case couch_util:get_value(<<"status">>, Body) of
- <<"ok">> ->
- {ok, yay};
- <<"not_ok">> ->
- {ruh, roh};
- <<"error">> ->
- erlang:error(thpppt)
- end.
-
-
-start_couch() ->
- Ctx = ddoc_cache_tutil:start_couch(),
- meck:new(ddoc_cache_ev, [passthrough]),
- Ctx.
-
-
-stop_couch(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-
-check_refresh_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"remove_ddoc", fun remove_ddoc/1},
- {"remove_ddoc_rev", fun remove_ddoc_rev/1},
- {"remove_ddoc_rev_only", fun remove_ddoc_rev_only/1},
- {"remove_custom_not_ok", fun remove_custom_not_ok/1},
- {"remove_custom_error", fun remove_custom_error/1}
- ])
- }.
-
-
-remove_ddoc({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- ?assertEqual(0, ets:info(?CACHE, size)),
- {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
-
- meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
-
- [#entry{val = DDoc}, #entry{val = DDoc}] = ets:tab2list(?CACHE),
- {Depth, [RevId | _]} = DDoc#doc.revs,
- NewDDoc = DDoc#doc{
- deleted = true,
- body = {[]}
- },
- {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
-
- DDocIdKey = {ddoc_cache_entry_ddocid, {DbName, ?FOOBAR}},
- Rev = {Depth, RevId},
- DDocIdRevKey = {ddoc_cache_entry_ddocid_rev, {DbName, ?FOOBAR, Rev}},
- meck:wait(ddoc_cache_ev, event, [removed, DDocIdKey], 1000),
- meck:wait(ddoc_cache_ev, event, [update_noop, DDocIdRevKey], 1000),
-
- ?assertMatch({not_found, deleted}, ddoc_cache:open_doc(DbName, ?FOOBAR)),
- ?assertEqual(1, ets:info(?CACHE, size)).
-
-
-remove_ddoc_rev({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Rev = ddoc_cache_tutil:get_rev(DbName, ?VDU),
- {ok, _} = ddoc_cache:open_doc(DbName, ?VDU, Rev),
-
- meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
-
- % Notice the sort so that we know we're getting the
- % revid version second.
- [_, #entry{key = Key, val = DDoc, pid = Pid}]
- = lists:sort(ets:tab2list(?CACHE)),
-
- NewDDoc = DDoc#doc{
- body = {[{<<"an">>, <<"update">>}]}
- },
- {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
- meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000),
- % Compact the database so that the old rev is removed
- lists:foreach(fun(Shard) ->
- do_compact(Shard#shard.name)
- end, mem3:local_shards(DbName)),
- % Trigger a refresh rather than wait for the timeout
- ddoc_cache_entry:refresh(Pid),
- meck:wait(ddoc_cache_ev, event, [removed, Key], 1000),
- ?assertMatch(
- {{not_found, missing}, _},
- ddoc_cache:open_doc(DbName, ?VDU, Rev)
- ),
- ?assertEqual(1, ets:info(?CACHE, size)).
-
-
-remove_ddoc_rev_only({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- Rev = ddoc_cache_tutil:get_rev(DbName, ?VDU),
- {ok, _} = ddoc_cache:open_doc(DbName, ?VDU),
- {ok, _} = ddoc_cache:open_doc(DbName, ?VDU, Rev),
- % Relying on the sort order of keys to keep
- % these lined up for testing
- [
- #entry{key = NoRevKey, val = DDoc, pid = NoRevPid},
- #entry{key = RevKey, val = DDoc, pid = RevPid}
- ] = lists:sort(ets:tab2list(?CACHE)),
- NewDDoc = DDoc#doc{
- body = {[{<<"new">>, <<"awesomeness">>}]}
- },
- {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
- meck:wait(ddoc_cache_ev, event, [updated, '_'], 1000),
- meck:wait(ddoc_cache_ev, event, [update_noop, RevKey], 1000),
- % Compact the database so that the old rev is removed
- lists:foreach(fun(Shard) ->
- do_compact(Shard#shard.name)
- end, mem3:local_shards(DbName)),
- % Trigger a refresh rather than wait for the timeout
- ddoc_cache_entry:refresh(NoRevPid),
- ddoc_cache_entry:refresh(RevPid),
- meck:wait(ddoc_cache_ev, event, [update_noop, NoRevKey], 1000),
- meck:wait(ddoc_cache_ev, event, [removed, RevKey], 1000),
- ?assertMatch({ok, _}, ddoc_cache:open_doc(DbName, ?VDU)),
- ?assertMatch(
- {{not_found, missing}, _},
- ddoc_cache:open_doc(DbName, ?VDU, Rev)
- ),
- ?assertEqual(1, ets:info(?CACHE, size)).
-
-remove_custom_not_ok({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- init_custom_ddoc(DbName),
- {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE),
- [#entry{key = Key}] = ets:tab2list(?CACHE),
- {ok, DDoc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
- NewDDoc = DDoc#doc{
- body = {[{<<"status">>, <<"not_ok">>}]}
- },
- {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
- meck:wait(ddoc_cache_ev, event, [removed, Key], 1000),
- ?assertEqual({ruh, roh}, ddoc_cache:open_custom(DbName, ?MODULE)),
- ?assertEqual(0, ets:info(?CACHE, size)).
-
-
-remove_custom_error({DbName, _}) ->
- ddoc_cache_tutil:clear(),
- meck:reset(ddoc_cache_ev),
- init_custom_ddoc(DbName),
- {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE),
- [#entry{key = Key}] = ets:tab2list(?CACHE),
- {ok, DDoc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
- NewDDoc = DDoc#doc{
- body = {[{<<"status">>, <<"error">>}]}
- },
- {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
- meck:wait(ddoc_cache_ev, event, [removed, Key], 1000),
- ?assertError(thpppt, ddoc_cache:open_custom(DbName, ?MODULE)),
- ?assertEqual(0, ets:info(?CACHE, size)).
-
-
-init_custom_ddoc(DbName) ->
- Body = {[{<<"status">>, <<"ok">>}]},
- {ok, Doc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
- NewDoc = Doc#doc{body = Body},
- {ok, _} = fabric:update_doc(DbName, NewDoc, [?ADMIN_CTX]).
-
-
-do_compact(ShardName) ->
- {ok, Db} = couch_db:open_int(ShardName, []),
- try
- {ok, Pid} = couch_db:start_compact(Db),
- Ref = erlang:monitor(process, Pid),
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
- end
- after
- couch_db:close(Db)
- end,
- wait_for_compaction(ShardName).
-
-
-wait_for_compaction(ShardName) ->
- {ok, Db} = couch_db:open_int(ShardName, []),
- CompactRunning = try
- {ok, Info} = couch_db:get_db_info(Db),
- couch_util:get_value(compact_running, Info)
- after
- couch_db:close(Db)
- end,
- if not CompactRunning -> ok; true ->
- timer:sleep(100),
- wait_for_compaction(ShardName)
- end. \ No newline at end of file
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_test.hrl b/src/ddoc_cache/test/eunit/ddoc_cache_test.hrl
deleted file mode 100644
index 73f7bc217..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_test.hrl
+++ /dev/null
@@ -1,26 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
--define(CACHE, ddoc_cache_entries).
--define(LRU, ddoc_cache_lru).
--define(OPENERS, ddoc_cache_openers).
-
--define(FOOBAR, <<"_design/foobar">>).
--define(VDU, <<"_design/vdu">>).
--define(CUSTOM, <<"_design/custom">>).
-
--record(entry, {
- key,
- val,
- pid
-}).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl b/src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl
deleted file mode 100644
index b34d4b163..000000000
--- a/src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl
+++ /dev/null
@@ -1,111 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_tutil).
-
-
--export([
- start_couch/0,
- start_couch/1,
- stop_couch/1,
- clear/0,
- get_rev/2,
- ddocs/0,
- purge_modules/0,
- with/1
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch/include/couch_eunit.hrl").
-
-
-start_couch() ->
- start_couch([{write_ddocs, true}]).
-
-
-start_couch(Options) ->
- WriteDDocs = couch_util:get_value(write_ddocs, Options, true),
- purge_modules(),
- Ctx = test_util:start_couch(?CONFIG_CHAIN, [chttpd, ddoc_cache]),
- TmpDb = ?tempdb(),
- ok = fabric:create_db(TmpDb, [{q, "1"}, {n, "1"}]),
- if not WriteDDocs -> ok; true ->
- {ok, _} = fabric:update_docs(TmpDb, ddocs(), [?ADMIN_CTX])
- end,
- {TmpDb, Ctx}.
-
-
-stop_couch({_TmpDb, Ctx}) ->
- test_util:stop_couch(Ctx).
-
-
-clear() ->
- application:stop(ddoc_cache),
- application:start(ddoc_cache).
-
-
-get_rev(DbName, DDocId) ->
- {_, Ref} = erlang:spawn_monitor(fun() ->
- {ok, #doc{revs = Revs}} = fabric:open_doc(DbName, DDocId, [?ADMIN_CTX]),
- {Depth, [RevId | _]} = Revs,
- exit({Depth, RevId})
- end),
- receive
- {'DOWN', Ref, _, _, Rev} -> Rev
- end.
-
-
-ddocs() ->
- FooBar = #doc{
- id = <<"_design/foobar">>,
- body = {[
- {<<"foo">>, <<"bar">>}
- ]}
- },
- VDU = #doc{
- id = <<"_design/vdu">>,
- body = {[
- {<<"validate_doc_update">>, <<"function(doc) {return;}">>}
- ]}
- },
- Custom = #doc{
- id = <<"_design/custom">>,
- body = {[
- {<<"status">>, <<"ok">>},
- {<<"custom">>, <<"hotrod">>}
- ]}
- },
- [FooBar, VDU, Custom].
-
-
-purge_modules() ->
- case application:get_key(ddoc_cache, modules) of
- {ok, Mods} ->
- lists:foreach(fun(Mod) ->
- case code:which(Mod) of
- cover_compiled ->
- ok;
- _ ->
- code:delete(Mod),
- code:purge(Mod)
- end
- end, Mods);
- undefined ->
- ok
- end.
-
-%% eunit implementation of {with, Tests} doesn't detect test name correctly
-with(Tests) ->
- fun(ArgsTuple) ->
- [{Name, ?_test(Fun(ArgsTuple))} || {Name, Fun} <- Tests]
- end.
diff --git a/src/dreyfus/.gitignore b/src/dreyfus/.gitignore
deleted file mode 100644
index 16fd00698..000000000
--- a/src/dreyfus/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-ebin/
-.*.sw?
-test/elixir/_build
-test/elixir/deps
diff --git a/src/dreyfus/LICENSE.txt b/src/dreyfus/LICENSE.txt
deleted file mode 100644
index 1561dafac..000000000
--- a/src/dreyfus/LICENSE.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2015 IBM Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/dreyfus/README.md b/src/dreyfus/README.md
deleted file mode 100644
index d653432d0..000000000
--- a/src/dreyfus/README.md
+++ /dev/null
@@ -1,78 +0,0 @@
-What is dreyfus?
--------------
-Dreyfus manages Clouseau nodes to deliver full-text search features.
-
-Dreyfus consists of the following files:
-
-- **dreyfus.app.src** - application resource file. As can be seen from this file, a callback module for the application is dreyfus_app, and the two registered processes started in this application are: dreyfus_index_manager and dreyfus_sup.
-- **dreyfus_app.erl** - a callback module for the application that starts the top supervisor by dreyfus_sup:start_link().
-- **dreyfus_sup.erl** - the top supervisor that starts dreyfus_index_manager as its child worker process.
-- **dreyfus_index_manager.erl** - manages multiple processes of dreyfus_index.
-- **dreyfus_index.erl** - contains main callback functions to operate on index. One process is created for every index (a distinct index function in a design document).
-- **dreyfus_index_updater.erl** - contains callback functions for index update.
-- **dreyfus_httpd.erl** - handles http requests.
-- **dreyfus_fabric.erl**, dreyfus_fabric_cleanup.erl, dreyfus_fabric_group1.erl, dreyfus_fabric_group2.erl, dreyfus_fabric_info.erl, dreyfus_fabric_search.erl - collection of proxy functions for operations in a cluster with shards.
-- **dreyfus_rpc.erl** - proxy functions executed for every shard.
-- **clouseau_rpc.erl** - contains remote procedure calls functions to Clouseau nodes.
-- **dreyfus_bookmark.erl** - utility functions for managing bookmarks for retrieving the next set of results
-- **dreyfus_util.erl** - various utility functions
-
-
-
-Life of http request
--------------
-Http requests have the following life cycle:
-
-![Dreyfus](https://cloud.githubusercontent.com/assets/5738841/7590919/cbaf1c50-f898-11e4-8a4c-462a1a680135.png)
-
-1. A request from chttpd goes to dreyfus_httpd.
-2. dreyfus_httpd:
- - passes and validates the request in functions: `parse_index_params` & `validate_index_query`.
- - depending on the type of the request invokes one of the fabric_functions: dreyfus_fabric_search, dreyfus_fabric_group1, dreyfus_fabric_group2, dreyfus_fabric_info, or dreyfus_fabric_cleanup.
-3. dreyfus_fabric:
- - Get shards and workers to be executed on every shard:
- `Shards = dreyfus_util:get_shards(DbName, QueryArgs)`,
- `Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, search,
- [DDoc, IndexName, dreyfus_util:export(QueryArgs)])`
- - spawns processes to execute jobs on every shard using a RPC server rexi: `rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, State, infinity, 1000 * 60 * 60)
-`
-4. dreyfus_rpc:
- - is executed on every shard of every node at the same time.
- - calls `dreyfus_index_manager:get_index(DbName, Index)` to get an index. dreyfus_index_manager will spawn a process of creating an index if the index doesn't exist.
- - an index of every shard will be updated if necessary with an instruction `dreyfus_index:await(Pid, MinSeq)`.
- - calls `dreyfus_index:Fun(Pid, QueryArgs)` with a corresponding search request.
-
-5. dreyfus_index:
- - synchronously calls `clouseau_rpc:search`.
-6. clouseau_rp:
- - calls `ioq:call(Ref, Msg, erlang:get(io_priority))` to run search on clouseau nodes using Lucene.
-7. top_docs are returned from Lucene
-8. top_docs are passed to dreyfus_index
-9. top_docs are passed to dreyfus_rpc
-10. dreyfus_rpc processes pass their individual top_docs as a reply `rexi:reply(Result)` to the initial dreyfus_fabric process that spawned them.
-11. dreyfus_fabric merges documents from all shards: `MergedTopDocs = merge_top_docs(TopDocs, Sortable, Limit, Sort)` and returns the results to dreyfus_httpd.
-12. dreyfus_httpd returns the formatted results to chttpd through send_json(..)
-
-
-Indexing
--------------
-
-### Indexing triggered by a search request
-During a search request, before dreyfus_rpc calls dreyfus_index:search, dreyfus_rpc first initiates the updating of Lucene indexes. It does it in the following way:
-
-![DreyfusIndexing.png](https://cloud.githubusercontent.com/assets/5738841/7590923/d12303fe-f898-11e4-833d-b1387b7048a6.png)
-
-1. The last sequence number (signifying the number of the last change in the database) in calculated: `{_LastSeq, MinSeq} = calculate_seqs(Db, Stale)`. For the stale queries (queries that don't need to reflect recent changes in the database), MinSeq will be 0, meaning that they don't need to initiate update of the index, before returning query results. The meaning of 0 is 'wait until index is at least at update_seq 0' which is true even for empty indexes.
-
-2. Function call `dreyfus_index:design_doc_to_index(DDoc, IndexName)` returns a record representation of an index: `#index{
- analyzer=Analyzer,
- ddoc_id=Id,
- def=Def,
- def_lang=Language,
- name=IndexName,
- sig=Sig}`. `Sig` here is a hashed version of an index function and an analyzer represented in a Javascript function in a design document. `Sig` is used to check if an index description is changed, and the index needs to be reconstructed.
-
-
-3. Function call `dreyfus_index_manager:get_index(DbName, Index)` will return Pid of the corresponding to this index dreyfus_index process. dreyfus_index_manager stores all the dreyfus_index processes for all indexes in the storage: `ets:new(?BY_SIG, [set, private, named_table])`. If the dreyfus_index process of the given index exists in the ets ?BY_SIG, it will be returned. If it doesn't exist, a new dreyfus_index process will be spawned. For this, dreyfus_index_manager in the `handle_call({get_index,..)` will return `{noreply, State};` to not block gen_server, and will transfer handling creation of a new index process to the spawned process - `spawn_link(fun() -> new_index(DbName, Index) end)`, remembering the Pid of the caller in the ets ?BY_SIG. `new_index` will create a new index process, sending `open_ok` message to the dreyfus_index_manager gen_server. `handle_call({open_ok,..) ` will retrieve the Pid - `From` of the original caller, and send a reply to this caller, a message containing a Pid of the created index process - NewPid. Calling `add_to_ets(NewPid, DbName, Sig)` will update two ets ?BY_SIG and ?BY_Pid.
-
-4. `dreyfus_index:await(Pid, MinSeq)` will initiate the update of the index, if the requested MinSeq is bigger than the current Seq stored in the index. It will do this by calling `dreyfus_index_updater:update(IndexPid, Index)`. Dreyfus_index_updater will load all documents, modified since last seq stored in the drefus index, and for every document will call `clouseau_rpc:delete` to delete documents in Java Lucene Index, or `clouseau_rpc:update` to update an index in Java Lucene Index.
diff --git a/src/dreyfus/include/dreyfus.hrl b/src/dreyfus/include/dreyfus.hrl
deleted file mode 100644
index 7c6a36945..000000000
--- a/src/dreyfus/include/dreyfus.hrl
+++ /dev/null
@@ -1,74 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(index, {
- current_seq=0,
- dbname,
- ddoc_id,
- analyzer,
- def,
- def_lang,
- name,
- sig=nil
-}).
-
--record(grouping, {
- by=nil,
- groups=[],
- offset=0,
- limit=10,
- sort=relevance,
- new_api=true
-}).
-
--record(index_query_args, {
- q,
- partition=nil,
- limit=25,
- stale=false,
- include_docs=false,
- bookmark=nil,
- sort=relevance,
- grouping=#grouping{},
- stable=false,
- counts=nil,
- ranges=nil,
- drilldown=[],
- include_fields=nil,
- highlight_fields=nil,
- highlight_pre_tag = <<"<em>">>,
- highlight_post_tag = <<"</em>">>,
- highlight_number=1,
- highlight_size=0,
- raw_bookmark=false
-}).
-
--record(sortable, {
- order, % sort order
- shard, % originating shard
- item % the item itself
-}).
-
-% Our local representation of top_docs, not equal to wire format.
--record(top_docs, {
- update_seq,
- total_hits,
- hits,
- counts,
- ranges
-}).
-
-%% These must match the case classes in clouseau.
--record(hit, {
- order,
- fields
-}).
diff --git a/src/dreyfus/priv/stats_descriptions.cfg b/src/dreyfus/priv/stats_descriptions.cfg
deleted file mode 100644
index 7f93ee26a..000000000
--- a/src/dreyfus/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,65 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
-
-{[dreyfus, httpd, search], [
- {type, histogram},
- {desc, <<"Distribution of overall search request latency as experienced by the end user">>}
-]}.
-{[dreyfus, rpc, search], [
- {type, histogram},
- {desc, <<"length of a search RPC worker">>}
-]}.
-{[dreyfus, rpc, group1], [
- {type, histogram},
- {desc, <<"length of a group1 RPC worker">>}
-]}.
-{[dreyfus, rpc, group2], [
- {type, histogram},
- {desc, <<"length of a group2 RPC worker">>}
-]}.
-{[dreyfus, rpc, info], [
- {type, histogram},
- {desc, <<"length of an info RPC worker">>}
-]}.
-{[dreyfus, index, await], [
- {type, histogram},
- {desc, <<"length of an dreyfus_index await request">>}
-]}.
-{[dreyfus, index, search], [
- {type, histogram},
- {desc, <<"length of an dreyfus_index search request">>}
-]}.
-{[dreyfus, index, group1], [
- {type, histogram},
- {desc, <<"length of an dreyfus_index group1 request">>}
-]}.
-{[dreyfus, index, group2], [
- {type, histogram},
- {desc, <<"length of an dreyfus_index group2 request">>}
-]}.
-{[dreyfus, index, info], [
- {type, histogram},
- {desc, <<"length of an dreyfus_index info request">>}
-]}.
-
-%% Declare IOQ search channel metrics
-{[couchdb, io_queue, search], [
- {type, counter},
- {desc, <<"Search IO directly triggered by client requests">>}
-]}.
-
-%% Declare IOQ2 search channel metrics
-{[couchdb, io_queue2, search, count], [
- {type, counter},
- {desc, <<"Search IO directly triggered by client requests">>}
-]}.
diff --git a/src/dreyfus/src/clouseau_rpc.erl b/src/dreyfus/src/clouseau_rpc.erl
deleted file mode 100644
index b8190b32c..000000000
--- a/src/dreyfus/src/clouseau_rpc.erl
+++ /dev/null
@@ -1,109 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(clouseau_rpc).
-
--include("dreyfus.hrl").
-
--export([open_index/3]).
--export([await/2, commit/2, get_update_seq/1, info/1, search/2]).
--export([group1/7, group2/2]).
--export([delete/2, update/3, cleanup/1, cleanup/2, rename/1]).
--export([analyze/2, version/0, disk_size/1]).
--export([set_purge_seq/2, get_purge_seq/1, get_root_dir/0]).
--export([connected/0]).
-
-open_index(Peer, Path, Analyzer) ->
- rpc({main, clouseau()}, {open, Peer, Path, Analyzer}).
-
-disk_size(Path) ->
- rpc({main, clouseau()}, {disk_size, Path}).
-get_root_dir() ->
- rpc({main, clouseau()}, {get_root_dir}).
-
-await(Ref, MinSeq) ->
- rpc(Ref, {await, MinSeq}).
-
-commit(Ref, NewCommitSeq) ->
- rpc(Ref, {commit, NewCommitSeq}).
-
-info(Ref) ->
- rpc(Ref, info).
-
-get_update_seq(Ref) ->
- rpc(Ref, get_update_seq).
-
-set_purge_seq(Ref, Seq) ->
- rpc(Ref, {set_purge_seq, Seq}).
-
-get_purge_seq(Ref) ->
- rpc(Ref, get_purge_seq).
-
-search(Ref, Args) ->
- case rpc(Ref, {search, Args}) of
- {ok, Response} when is_list(Response) ->
- {ok, #top_docs{
- update_seq = couch_util:get_value(update_seq, Response),
- total_hits = couch_util:get_value(total_hits, Response),
- hits = couch_util:get_value(hits, Response),
- counts = couch_util:get_value(counts, Response),
- ranges = couch_util:get_value(ranges, Response)
- }};
- Else ->
- Else
- end.
-
-group1(Ref, Query, GroupBy, Refresh, Sort, Offset, Limit) ->
- rpc(Ref, {group1, Query, GroupBy, Refresh, Sort, Offset, Limit}).
-
-group2(Ref, Args) ->
- rpc(Ref, {group2, Args}).
-
-delete(Ref, Id) ->
- rpc(Ref, {delete, couch_util:to_binary(Id)}).
-
-update(Ref, Id, Fields) ->
- rpc(Ref, {update, Id, Fields}).
-
-cleanup(DbName) ->
- gen_server:cast({cleanup, clouseau()}, {cleanup, DbName}).
-
-rename(DbName) ->
- gen_server:cast({cleanup, clouseau()}, {rename, DbName}).
-
-cleanup(DbName, ActiveSigs) ->
- gen_server:cast({cleanup, clouseau()}, {cleanup, DbName, ActiveSigs}).
-
-analyze(Analyzer, Text) ->
- rpc({analyzer, clouseau()}, {analyze, Analyzer, Text}).
-
-version() ->
- rpc({main, clouseau()}, version).
-
-connected() ->
- HiddenNodes = erlang:nodes(hidden),
- case lists:member(clouseau(), HiddenNodes) of
- true ->
- true;
- false ->
- % We might have just booted up, so let's ping
- pong == net_adm:ping(clouseau())
- end.
-
-rpc(Ref, Msg) ->
- ioq:call(Ref, Msg, erlang:get(io_priority)).
-
-clouseau() ->
- list_to_atom(config:get("dreyfus", "name", "clouseau@127.0.0.1")).
diff --git a/src/dreyfus/src/dreyfus.app.src b/src/dreyfus/src/dreyfus.app.src
deleted file mode 100644
index be6595222..000000000
--- a/src/dreyfus/src/dreyfus.app.src
+++ /dev/null
@@ -1,22 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
-{application, dreyfus, [
- {description, "Clouseau index manager"},
- {vsn, git},
- {mod, {dreyfus_app, []}},
- {registered, [dreyfus_index_manager, dreyfus_sup]},
- {applications, [kernel, stdlib, couch_log, config, couch_event, mem3, ioq, couch_epi]}
-]}.
diff --git a/src/dreyfus/src/dreyfus_app.erl b/src/dreyfus/src/dreyfus_app.erl
deleted file mode 100644
index 7cd7f4a31..000000000
--- a/src/dreyfus/src/dreyfus_app.erl
+++ /dev/null
@@ -1,24 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_Type, []) ->
- dreyfus_sup:start_link().
-
-stop([]) ->
- ok.
diff --git a/src/dreyfus/src/dreyfus_bookmark.erl b/src/dreyfus/src/dreyfus_bookmark.erl
deleted file mode 100644
index 9a2979b25..000000000
--- a/src/dreyfus/src/dreyfus_bookmark.erl
+++ /dev/null
@@ -1,90 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_bookmark).
-
--include("dreyfus.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--export([
- update/3,
- unpack/2,
- pack/1,
- add_missing_shards/2
-]).
-
-
-update(_Sort, Bookmark, []) ->
- Bookmark;
-update(relevance, Bookmark, [#sortable{} = Sortable | Rest]) ->
- #sortable{
- order = [Score, Doc],
- shard = Shard
- } = Sortable,
- B1 = fabric_dict:store(Shard, {Score, Doc}, Bookmark),
- B2 = fabric_view:remove_overlapping_shards(Shard, B1),
- update(relevance, B2, Rest);
-update(Sort, Bookmark, [#sortable{} = Sortable | Rest]) ->
- #sortable{
- order = Order,
- shard = Shard
- } = Sortable,
- B1 = fabric_dict:store(Shard, Order, Bookmark),
- B2 = fabric_view:remove_overlapping_shards(Shard, B1),
- update(Sort, B2, Rest).
-
-
-unpack(DbName, #index_query_args{bookmark=nil} = Args) ->
- fabric_dict:init(dreyfus_util:get_shards(DbName, Args), nil);
-unpack(DbName, #index_query_args{} = Args) ->
- unpack(DbName, Args#index_query_args.bookmark);
-unpack(DbName, Packed) when is_binary(Packed) ->
- lists:map(fun({Node, Range, After}) ->
- case mem3:get_shard(DbName, Node, Range) of
- {ok, Shard} ->
- {Shard, After};
- {error, not_found} ->
- PlaceHolder = #shard{
- node = Node,
- range = Range,
- dbname = DbName,
- _='_'
- },
- {PlaceHolder, After}
- end
- end, binary_to_term(couch_util:decodeBase64Url(Packed))).
-
-
-pack(nil) ->
- null;
-pack(Workers) ->
- Workers1 = [{N,R,A} || {#shard{node=N, range=R}, A} <- Workers, A =/= nil],
- Bin = term_to_binary(Workers1, [compressed, {minor_version,1}]),
- couch_util:encodeBase64Url(Bin).
-
-
-add_missing_shards(Bookmark, LiveShards) ->
- {BookmarkShards, _} = lists:unzip(Bookmark),
- add_missing_shards(Bookmark, BookmarkShards, LiveShards).
-
-
-add_missing_shards(Bookmark, _, []) ->
- Bookmark;
-add_missing_shards(Bookmark, BMShards, [H | T]) ->
- Bookmark1 = case lists:keymember(H#shard.range, #shard.range, BMShards) of
- true -> Bookmark;
- false -> fabric_dict:store(H, nil, Bookmark)
- end,
- add_missing_shards(Bookmark1, BMShards, T).
diff --git a/src/dreyfus/src/dreyfus_config.erl b/src/dreyfus/src/dreyfus_config.erl
deleted file mode 100644
index b7555c1d0..000000000
--- a/src/dreyfus/src/dreyfus_config.erl
+++ /dev/null
@@ -1,15 +0,0 @@
- -module(dreyfus_config).
-
- -export([data/0, get/1]).
-
-data() ->
- try
- config:get("dreyfus_blacklist")
- catch error:badarg ->
- % lazy workaround to address issue with epi invocation on startup
- []
- end.
-
-get(Key) ->
- Handle = couch_epi:get_handle({dreyfus, black_list}),
- couch_epi:get_value(Handle, dreyfus, Key).
diff --git a/src/dreyfus/src/dreyfus_epi.erl b/src/dreyfus/src/dreyfus_epi.erl
deleted file mode 100644
index cb07f8a34..000000000
--- a/src/dreyfus/src/dreyfus_epi.erl
+++ /dev/null
@@ -1,46 +0,0 @@
--module(dreyfus_epi).
-
--behaviour(couch_epi_plugin).
-
--export([
- app/0,
- providers/0,
- services/0,
- data_subscriptions/0,
- data_providers/0,
- processes/0,
- notify/3
-]).
-
--define(DATA_INTERVAL, 1000).
-
-app() ->
- dreyfus.
-
-providers() ->
- [
- {couch_db, dreyfus_plugin_couch_db},
- {chttpd_handlers, dreyfus_httpd_handlers}
- ].
-
-
-services() ->
- [].
-
-data_subscriptions() ->
- [{dreyfus, black_list}].
-
-data_providers() ->
- [
- {{dreyfus, black_list}, {callback_module, dreyfus_config},
- [{interval, ?DATA_INTERVAL}]}
- ].
-
-processes() ->
- [].
-
-notify(_Key, _Old, _New) ->
- Listeners = application:get_env(dreyfus, config_listeners, []),
- lists:foreach(fun(L) ->
- L ! dreyfus_config_change_finished
- end, Listeners).
diff --git a/src/dreyfus/src/dreyfus_fabric.erl b/src/dreyfus/src/dreyfus_fabric.erl
deleted file mode 100644
index 0b25a6cc6..000000000
--- a/src/dreyfus/src/dreyfus_fabric.erl
+++ /dev/null
@@ -1,205 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_fabric).
--export([get_json_docs/2, handle_error_message/7]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
--include("dreyfus.hrl").
-
-get_json_docs(DbName, DocIds) ->
- fabric:all_docs(DbName, fun callback/2, [], [{keys, DocIds}, {include_docs, true}]).
-
-callback({meta,_}, Acc) ->
- {ok, Acc};
-callback({error, Reason}, _Acc) ->
- {error, Reason};
-callback({row, Row}, Acc) ->
- {id, Id} = lists:keyfind(id, 1, Row),
- {ok, [{Id, lists:keyfind(doc, 1, Row)}|Acc]};
-callback(complete, Acc) ->
- {ok, lists:reverse(Acc)};
-callback(timeout, _Acc) ->
- {error, timeout}.
-
-handle_error_message({rexi_DOWN, _, {_, NodeRef}, _}, _Worker,
- Counters, _Replacements, _StartFun, _StartArgs, RingOpts) ->
- case fabric_util:remove_down_workers(Counters, NodeRef, RingOpts) of
- {ok, NewCounters} ->
- {ok, NewCounters};
- error ->
- {error, {nodedown, <<"progress not possible">>}}
- end;
-handle_error_message({rexi_EXIT, {maintenance_mode, _}}, Worker,
- Counters, Replacements, StartFun, StartArgs, RingOpts) ->
- handle_replacement(Worker, Counters, Replacements, StartFun, StartArgs,
- RingOpts);
-handle_error_message({rexi_EXIT, Reason}, Worker,
- Counters, _Replacements, _StartFun, _StartArgs, RingOpts) ->
- handle_error(Reason, Worker, Counters, RingOpts);
-handle_error_message({error, Reason}, Worker,
- Counters, _Replacements, _StartFun, _StartArgs, RingOpts) ->
- handle_error(Reason, Worker, Counters, RingOpts);
-handle_error_message({'EXIT', Reason}, Worker,
- Counters, _Replacements, _StartFun, _StartArgs, RingOpts) ->
- handle_error({exit, Reason}, Worker, Counters, RingOpts);
-handle_error_message(Reason, Worker, Counters,
- _Replacements, _StartFun, _StartArgs, RingOpts) ->
- couch_log:error("Unexpected error during request: ~p", [Reason]),
- handle_error(Reason, Worker, Counters, RingOpts).
-
-handle_error(Reason, Worker, Counters0, RingOpts) ->
- Counters = fabric_dict:erase(Worker, Counters0),
- case fabric_ring:is_progress_possible(Counters, RingOpts) of
- true ->
- {ok, Counters};
- false ->
- {error, Reason}
- end.
-
-handle_replacement(Worker, OldCntrs0, OldReplacements, StartFun, StartArgs,
- RingOpts) ->
- OldCounters = lists:filter(fun({#shard{ref=R}, _}) ->
- R /= Worker#shard.ref
- end, OldCntrs0),
- case lists:keytake(Worker#shard.range, 1, OldReplacements) of
- {value, {_Range, Replacements}, NewReplacements} ->
- NewCounters = lists:foldl(fun(Repl, CounterAcc) ->
- NewCounter = start_replacement(StartFun, StartArgs, Repl),
- fabric_dict:store(NewCounter, nil, CounterAcc)
- end, OldCounters, Replacements),
- true = fabric_ring:is_progress_possible(NewCounters, RingOpts),
- NewRefs = fabric_dict:fetch_keys(NewCounters),
- {new_refs, NewRefs, NewCounters, NewReplacements};
- false ->
- handle_error({nodedown, <<"progress not possible">>},
- Worker, OldCounters, RingOpts)
- end.
-
-start_replacement(StartFun, StartArgs, Shard) ->
- [DDoc, IndexName, QueryArgs] = StartArgs,
- After = case QueryArgs#index_query_args.bookmark of
- Bookmark when is_list(Bookmark) ->
- lists:foldl(fun({#shard{range=R0}, After0}, Acc) ->
- case R0 == Shard#shard.range of
- true -> After0;
- false -> Acc
- end
- end, nil, Bookmark);
- _ ->
- nil
- end,
- QueryArgs1 = QueryArgs#index_query_args{bookmark=After},
- StartArgs1 = [DDoc, IndexName, QueryArgs1],
- Ref = rexi:cast(Shard#shard.node,
- {dreyfus_rpc, StartFun,
- [Shard#shard.name|StartArgs1]}),
- Shard#shard{ref = Ref}.
-
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-
-node_down_test() ->
- [S1, S2, S3] = [
- mk_shard("n1", [0, 4]),
- mk_shard("n1", [5, ?RING_END]),
- mk_shard("n2", [0, ?RING_END])
- ],
- [W1, W2, W3] = [
- S1#shard{ref = make_ref()},
- S2#shard{ref = make_ref()},
- S3#shard{ref = make_ref()}
- ],
- Counters1 = fabric_dict:init([W1, W2, W3], nil),
-
- N1 = S1#shard.node,
- Msg1 = {rexi_DOWN, nil, {nil, N1}, nil},
- Res1 = handle_error_message(Msg1, nil, Counters1, nil, nil, nil, []),
- ?assertEqual({ok, [{W3, nil}]}, Res1),
-
- {ok, Counters2} = Res1,
- N2 = S3#shard.node,
- Msg2 = {rexi_DOWN, nil, {nil, N2}, nil},
- Res2 = handle_error_message(Msg2, nil, Counters2, nil, nil, nil, []),
- ?assertEqual({error, {nodedown, <<"progress not possible">>}}, Res2).
-
-
-worker_error_test() ->
- [S1, S2] = [
- mk_shard("n1", [0, ?RING_END]),
- mk_shard("n2", [0, ?RING_END])
- ],
- [W1, W2] = [S1#shard{ref = make_ref()}, S2#shard{ref = make_ref()}],
- Counters1 = fabric_dict:init([W1, W2], nil),
-
- Res1 = handle_error(bam, W1, Counters1, []),
- ?assertEqual({ok, [{W2, nil}]}, Res1),
-
- {ok, Counters2} = Res1,
- ?assertEqual({error, boom}, handle_error(boom, W2, Counters2, [])).
-
-
-node_down_with_partitions_test() ->
- [S1, S2] = [
- mk_shard("n1", [0, 4]),
- mk_shard("n2", [0, 8])
- ],
- [W1, W2] = [
- S1#shard{ref = make_ref()},
- S2#shard{ref = make_ref()}
- ],
- Counters1 = fabric_dict:init([W1, W2], nil),
- RingOpts = [{any, [S1, S2]}],
-
- N1 = S1#shard.node,
- Msg1 = {rexi_DOWN, nil, {nil, N1}, nil},
- Res1 = handle_error_message(Msg1, nil, Counters1, nil, nil, nil, RingOpts),
- ?assertEqual({ok, [{W2, nil}]}, Res1),
-
- {ok, Counters2} = Res1,
- N2 = S2#shard.node,
- Msg2 = {rexi_DOWN, nil, {nil, N2}, nil},
- Res2 = handle_error_message(Msg2, nil, Counters2, nil, nil, nil, RingOpts),
- ?assertEqual({error, {nodedown, <<"progress not possible">>}}, Res2).
-
-
-worker_error_with_partitions_test() ->
- [S1, S2] = [
- mk_shard("n1", [0, 4]),
- mk_shard("n2", [0, 8])],
- [W1, W2] = [
- S1#shard{ref = make_ref()},
- S2#shard{ref = make_ref()}
- ],
- Counters1 = fabric_dict:init([W1, W2], nil),
- RingOpts = [{any, [S1, S2]}],
-
- Res1 = handle_error(bam, W1, Counters1, RingOpts),
- ?assertEqual({ok, [{W2, nil}]}, Res1),
-
- {ok, Counters2} = Res1,
- ?assertEqual({error, boom}, handle_error(boom, W2, Counters2, RingOpts)).
-
-
-mk_shard(Name, Range) ->
- Node = list_to_atom(Name),
- BName = list_to_binary(Name),
- #shard{name = BName, node = Node, range = Range}.
-
--endif.
diff --git a/src/dreyfus/src/dreyfus_fabric_cleanup.erl b/src/dreyfus/src/dreyfus_fabric_cleanup.erl
deleted file mode 100644
index 681712748..000000000
--- a/src/dreyfus/src/dreyfus_fabric_cleanup.erl
+++ /dev/null
@@ -1,78 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_fabric_cleanup).
-
--include("dreyfus.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([go/1]).
-
-go(DbName) ->
- {ok, DesignDocs} = fabric:design_docs(DbName),
- ActiveSigs = lists:usort(lists:flatmap(fun active_sigs/1,
- [couch_doc:from_json_obj(DD) || DD <- DesignDocs])),
- cleanup_local_purge_doc(DbName, ActiveSigs),
- clouseau_rpc:cleanup(DbName, ActiveSigs),
- ok.
-
-active_sigs(#doc{body={Fields}}=Doc) ->
- try
- {RawIndexes} = couch_util:get_value(<<"indexes">>, Fields, {[]}),
- {IndexNames, _} = lists:unzip(RawIndexes),
- [begin
- {ok, Index} = dreyfus_index:design_doc_to_index(Doc, IndexName),
- Index#index.sig
- end || IndexName <- IndexNames]
- catch error:{badmatch, _Error} ->
- []
- end.
-
-cleanup_local_purge_doc(DbName, ActiveSigs) ->
- {ok, BaseDir} = clouseau_rpc:get_root_dir(),
- DbNamePattern = <<DbName/binary, ".*">>,
- Pattern0 = filename:join([BaseDir, "shards", "*", DbNamePattern, "*"]),
- Pattern = binary_to_list(iolist_to_binary(Pattern0)),
- DirListStrs = filelib:wildcard(Pattern),
- DirList = [iolist_to_binary(DL) || DL <- DirListStrs],
- LocalShards = mem3:local_shards(DbName),
- ActiveDirs = lists:foldl(fun(LS, AccOuter) ->
- lists:foldl(fun(Sig, AccInner) ->
- DirName = filename:join([BaseDir, LS#shard.name, Sig]),
- [DirName | AccInner]
- end, AccOuter, ActiveSigs)
- end, [], LocalShards),
-
- DeadDirs = DirList -- ActiveDirs,
- lists:foreach(fun(IdxDir) ->
- Sig = dreyfus_util:get_signature_from_idxdir(IdxDir),
- case Sig of undefined -> ok; _ ->
- DocId = dreyfus_util:get_local_purge_doc_id(Sig),
- LocalShards = mem3:local_shards(DbName),
- lists:foreach(fun(LS) ->
- ShardDbName = LS#shard.name,
- {ok, ShardDb} = couch_db:open_int(ShardDbName, []),
- case couch_db:open_doc(ShardDb, DocId, []) of
- {ok, LocalPurgeDoc} ->
- couch_db:update_doc(ShardDb,
- LocalPurgeDoc#doc{deleted=true}, [?ADMIN_CTX]);
- {not_found, _} ->
- ok
- end,
- couch_db:close(ShardDb)
- end, LocalShards)
- end
- end, DeadDirs).
diff --git a/src/dreyfus/src/dreyfus_fabric_group1.erl b/src/dreyfus/src/dreyfus_fabric_group1.erl
deleted file mode 100644
index bdae6f040..000000000
--- a/src/dreyfus/src/dreyfus_fabric_group1.erl
+++ /dev/null
@@ -1,129 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_fabric_group1).
-
--include("dreyfus.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([go/4]).
-
--record(state, {
- limit,
- sort,
- top_groups,
- counters,
- start_args,
- replacements,
- ring_opts
-}).
-
-go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
- dreyfus_util:maybe_deny_index(DbName, GroupId, IndexName),
- go(DbName, DDoc, IndexName, QueryArgs);
-
-go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) ->
- DesignName = dreyfus_util:get_design_docid(DDoc),
- dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName),
- Shards = dreyfus_util:get_shards(DbName, QueryArgs),
- RingOpts = dreyfus_util:get_ring_opts(QueryArgs, Shards),
- Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, group1, [DDoc,
- IndexName, dreyfus_util:export(QueryArgs)]),
- Replacements = fabric_view:get_shard_replacements(DbName, Workers),
- Counters = fabric_dict:init(Workers, nil),
- RexiMon = fabric_util:create_monitors(Workers),
- State = #state{
- limit = QueryArgs#index_query_args.grouping#grouping.limit,
- sort = QueryArgs#index_query_args.grouping#grouping.sort,
- top_groups = [],
- counters = Counters,
- start_args = [DDoc, IndexName, QueryArgs],
- replacements = Replacements,
- ring_opts = RingOpts
- },
- try
- rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, infinity, 1000 * 60 * 60)
- after
- rexi_monitor:stop(RexiMon),
- fabric_util:cleanup(Workers)
- end;
-go(DbName, DDoc, IndexName, OldArgs) ->
- go(DbName, DDoc, IndexName, dreyfus_util:upgrade(OldArgs)).
-
-handle_message({ok, NewTopGroups}, Shard, State0) ->
- State = upgrade_state(State0),
- #state{top_groups=TopGroups, limit=Limit, sort=Sort} = State,
- case fabric_dict:lookup_element(Shard, State#state.counters) of
- undefined ->
- %% already heard from someone else in this range
- {ok, State};
- nil ->
- C1 = fabric_dict:store(Shard, ok, State#state.counters),
- C2 = fabric_view:remove_overlapping_shards(Shard, C1),
- MergedTopGroups = merge_top_groups(TopGroups, make_sortable(Shard, NewTopGroups), Limit, Sort),
- State1 = State#state{
- counters=C2,
- top_groups=MergedTopGroups
- },
- case fabric_dict:any(nil, C2) of
- true ->
- {ok, State1};
- false ->
- {stop, remove_sortable(MergedTopGroups)}
- end
- end;
-
-handle_message(Error, Worker, State0) ->
- State = upgrade_state(State0),
- case dreyfus_fabric:handle_error_message(Error, Worker,
- State#state.counters, State#state.replacements,
- group1, State#state.start_args, State#state.ring_opts) of
- {ok, Counters} ->
- {ok, State#state{counters=Counters}};
- {new_refs, NewRefs, NewCounters, NewReplacements} ->
- NewState = State#state{
- counters = NewCounters,
- replacements = NewReplacements
- },
- {new_refs, NewRefs, NewState};
- Else ->
- Else
- end.
-
-merge_top_groups(TopGroupsA, TopGroupsB, Limit, Sort) ->
- MergedGroups0 = TopGroupsA ++ TopGroupsB,
- GNs = lists:usort([N || #sortable{item={N,_}} <- MergedGroups0]),
- MergedGroups = [merge_top_group(Sort, [S || #sortable{item={N,_}}=S <- MergedGroups0, N =:= GN]) || GN <- GNs],
- lists:sublist(dreyfus_util:sort(Sort, MergedGroups), Limit).
-
-merge_top_group(_Sort, [Group]) ->
- Group;
-merge_top_group(Sort, [_, _] = Groups) ->
- hd(dreyfus_util:sort(Sort, Groups)).
-
-make_sortable(Shard, TopGroups) ->
- [#sortable{item=G, order=Order, shard=Shard} || {_Name, Order}=G <- TopGroups].
-
-remove_sortable(Sortables) ->
- [Item || #sortable{item=Item} <- Sortables].
-
-upgrade_state({state, Limit, Sort, TopGroups, Counters}) ->
- #state{limit=Limit, sort=Sort, top_groups=TopGroups, counters=Counters,
- replacements=[]};
-upgrade_state(#state{}=State) ->
- State.
diff --git a/src/dreyfus/src/dreyfus_fabric_group2.erl b/src/dreyfus/src/dreyfus_fabric_group2.erl
deleted file mode 100644
index 8d864dd0c..000000000
--- a/src/dreyfus/src/dreyfus_fabric_group2.erl
+++ /dev/null
@@ -1,158 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_fabric_group2).
-
--include("dreyfus.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([go/4]).
-
--record(state, {
- limit,
- sort,
- total_hits,
- total_grouped_hits,
- top_groups,
- counters,
- start_args,
- replacements,
- ring_opts
-}).
-
-go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
- dreyfus_util:maybe_deny_index(DbName, GroupId, IndexName),
- go(DbName, DDoc, IndexName, QueryArgs);
-
-go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) ->
- DesignName = dreyfus_util:get_design_docid(DDoc),
- dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName),
- Shards = dreyfus_util:get_shards(DbName, QueryArgs),
- RingOpts = dreyfus_util:get_ring_opts(QueryArgs, Shards),
- Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, group2,
- [DDoc, IndexName, dreyfus_util:export(QueryArgs)]),
- Replacements = fabric_view:get_shard_replacements(DbName, Workers),
- Counters = fabric_dict:init(Workers, nil),
- RexiMon = fabric_util:create_monitors(Workers),
- State = #state{
- limit = QueryArgs#index_query_args.limit,
- sort = QueryArgs#index_query_args.sort,
- total_hits = 0,
- total_grouped_hits = 0,
- top_groups = [],
- counters = Counters,
- start_args = [DDoc, IndexName, QueryArgs],
- replacements = Replacements,
- ring_opts = RingOpts
- },
- try
- rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, infinity, 1000 * 60 * 60)
- after
- rexi_monitor:stop(RexiMon),
- fabric_util:cleanup(Workers)
- end;
-go(DbName, DDoc, IndexName, OldArgs) ->
- go(DbName, DDoc, IndexName, dreyfus_util:upgrade(OldArgs)).
-
-
-handle_message({ok, NewTotalHits, NewTotalGroupedHits, NewTopGroups},
- Shard, State0) ->
- State = upgrade_state(State0),
- #state{total_hits=TotalHits, total_grouped_hits=TotalGroupedHits,
- top_groups=TopGroups, limit=Limit, sort=Sort} = State,
- case fabric_dict:lookup_element(Shard, State#state.counters) of
- undefined ->
- %% already heard from someone else in this range
- {ok, State};
- nil ->
- C1 = fabric_dict:store(Shard, ok, State#state.counters),
- C2 = fabric_view:remove_overlapping_shards(Shard, C1),
- MergedTotalHits = NewTotalHits + TotalHits,
- MergedTotalGroupedHits = NewTotalGroupedHits + TotalGroupedHits,
- Sortable = make_sortable(Shard, NewTopGroups),
- MergedTopGroups = merge_top_groups(TopGroups, Sortable, Limit, Sort),
- State1 = State#state{
- counters=C2,
- total_hits=MergedTotalHits,
- total_grouped_hits=MergedTotalGroupedHits,
- top_groups=MergedTopGroups
- },
- case fabric_dict:any(nil, C2) of
- true ->
- {ok, State1};
- false ->
- {stop, {MergedTotalHits, MergedTotalGroupedHits,
- remove_sortable(MergedTopGroups)}}
- end
- end;
-
-handle_message(Error, Worker, State0) ->
- State = upgrade_state(State0),
- case dreyfus_fabric:handle_error_message(Error, Worker,
- State#state.counters, State#state.replacements,
- group2, State#state.start_args, State#state.ring_opts) of
- {ok, Counters} ->
- {ok, State#state{counters=Counters}};
- {new_refs, NewRefs, NewCounters, NewReplacements} ->
- NewState = State#state{
- counters = NewCounters,
- replacements = NewReplacements
- },
- {new_refs, NewRefs, NewState};
- Else ->
- Else
- end.
-
-merge_top_groups([], TopGroups, _Limit, _Sort) ->
- TopGroups;
-merge_top_groups(TopGroupsA, TopGroupsB, Limit, Sort) ->
- lists:zipwith(fun(A,B) -> merge_top_group(A, B, Limit, Sort) end,
- TopGroupsA,
- TopGroupsB).
-
-merge_top_group({Name, TotalA, HitsA}, {Name, TotalB, HitsB}, Limit, Sort) ->
- MergedHits = lists:sublist(dreyfus_util:sort(Sort, HitsA ++ HitsB), Limit),
- {Name, TotalA + TotalB, MergedHits}.
-
-
-make_sortable(Shard, TopGroups) ->
- [make_sortable_group(Shard, TopGroup) || TopGroup <- TopGroups].
-
-make_sortable_group(Shard, {Name, TotalHits, Hits}) ->
- {Name, TotalHits, [make_sortable_hit(Shard, Hit) || Hit <- Hits]}.
-
-make_sortable_hit(Shard, Hit) ->
- #sortable{item=Hit, order=Hit#hit.order, shard=Shard}.
-
-remove_sortable(SortableGroups) ->
- [remove_sortable_group(G) || G <- SortableGroups].
-
-remove_sortable_group({Name, TotalHits, SortableHits}) ->
- {Name, TotalHits, [remove_sortable_hit(H) || H <- SortableHits]}.
-
-remove_sortable_hit(SortableHit) ->
- SortableHit#sortable.item.
-
-upgrade_state({state, Limit, Sort, TotalHits, TotalGroupedHits,
- TopGroups, Counters}) ->
- #state{limit = Limit, sort = Sort, total_hits = TotalHits,
- total_grouped_hits = TotalGroupedHits,
- top_groups = TopGroups, counters = Counters,
- replacements = []};
-upgrade_state(#state{} = State) ->
- State.
diff --git a/src/dreyfus/src/dreyfus_fabric_info.erl b/src/dreyfus/src/dreyfus_fabric_info.erl
deleted file mode 100644
index e217bc0ef..000000000
--- a/src/dreyfus/src/dreyfus_fabric_info.erl
+++ /dev/null
@@ -1,108 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_fabric_info).
-
--include("dreyfus.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([go/4]).
-
-go(DbName, DDocId, IndexName, InfoLevel) when is_binary(DDocId) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", DDocId/binary>>, []),
- dreyfus_util:maybe_deny_index(DbName, DDocId, IndexName),
- go(DbName, DDoc, IndexName, InfoLevel);
-
-go(DbName, DDoc, IndexName, InfoLevel) ->
- DesignName = dreyfus_util:get_design_docid(DDoc),
- dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName),
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, InfoLevel, [DDoc, IndexName]),
- RexiMon = fabric_util:create_monitors(Shards),
- Acc0 = {fabric_dict:init(Workers, nil), []},
- try
- fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0)
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Worker, {Counters, Acc}) ->
- case fabric_util:remove_down_workers(Counters, NodeRef) of
- {ok, NewCounters} ->
- {ok, {NewCounters, Acc}};
- error ->
- {error, {nodedown, <<"progress not possible">>}}
- end;
-
-handle_message({rexi_EXIT, Reason}, Worker, {Counters, Acc}) ->
- NewCounters = fabric_dict:erase(Worker, Counters),
- case fabric_ring:is_progress_possible(NewCounters) of
- true ->
- {ok, {NewCounters, Acc}};
- false ->
- {error, Reason}
- end;
-
-handle_message({ok, Info}, Worker, {Counters, Acc}) ->
- case fabric_dict:lookup_element(Worker, Counters) of
- undefined ->
- % already heard from someone else in this range
- {ok, {Counters, Acc}};
- nil ->
- C1 = fabric_dict:store(Worker, ok, Counters),
- C2 = fabric_view:remove_overlapping_shards(Worker, C1),
- case fabric_dict:any(nil, C2) of
- true ->
- {ok, {C2, [Info|Acc]}};
- false ->
- {stop, merge_results(lists:flatten([Info|Acc]))}
- end
- end;
-
-handle_message({error, Reason}, Worker, {Counters, Acc}) ->
- NewCounters = fabric_dict:erase(Worker, Counters),
- case fabric_ring:is_progress_possible(NewCounters) of
- true ->
- {ok, {NewCounters, Acc}};
- false ->
- {error, Reason}
- end;
-handle_message({'EXIT', _}, Worker, {Counters, Acc}) ->
- NewCounters = fabric_dict:erase(Worker, Counters),
- case fabric_ring:is_progress_possible(NewCounters) of
- true ->
- {ok, {NewCounters, Acc}};
- false ->
- {error, {nodedown, <<"progress not possible">>}}
- end.
-
-merge_results(Info) ->
- Dict = lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end,
- orddict:new(), Info),
- orddict:fold(fun
- (disk_size, X, Acc) ->
- [{disk_size, lists:sum(X)} | Acc];
- (doc_count, X, Acc) ->
- [{doc_count, lists:sum(X)} | Acc];
- (doc_del_count, X, Acc) ->
- [{doc_del_count, lists:sum(X)} | Acc];
- (committed_seq, X, Acc) ->
- [{committed_seq, lists:sum(X)} | Acc];
- (pending_seq, X, Acc) ->
- [{pending_seq, lists:sum(X)} | Acc];
- (_, _, Acc) ->
- Acc
- end, [], Dict).
diff --git a/src/dreyfus/src/dreyfus_fabric_search.erl b/src/dreyfus/src/dreyfus_fabric_search.erl
deleted file mode 100644
index 8edaa385a..000000000
--- a/src/dreyfus/src/dreyfus_fabric_search.erl
+++ /dev/null
@@ -1,270 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_fabric_search).
-
--include("dreyfus.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([go/4]).
-
--record(state, {
- limit,
- sort,
- top_docs,
- counters,
- start_args,
- replacements,
- ring_opts
-}).
-
-go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>,
- [ejson_body]),
- dreyfus_util:maybe_deny_index(DbName, GroupId, IndexName),
- go(DbName, DDoc, IndexName, QueryArgs);
-
-go(DbName, DDoc, IndexName, #index_query_args{bookmark=nil}=QueryArgs) ->
- DesignName = dreyfus_util:get_design_docid(DDoc),
- dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName),
- Shards = dreyfus_util:get_shards(DbName, QueryArgs),
- RingOpts = dreyfus_util:get_ring_opts(QueryArgs, Shards),
- Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, search,
- [DDoc, IndexName, dreyfus_util:export(QueryArgs)]),
- Counters = fabric_dict:init(Workers, nil),
- go(DbName, DDoc, IndexName, QueryArgs, Counters, Counters, RingOpts);
-
-go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) ->
- Bookmark0 = try dreyfus_bookmark:unpack(DbName, QueryArgs)
- catch
- _:_ ->
- throw({bad_request, "Invalid bookmark parameter supplied"})
- end,
- Shards = dreyfus_util:get_shards(DbName, QueryArgs),
- LiveNodes = [node() | nodes()],
- LiveShards = [S || #shard{node=Node} = S <- Shards, lists:member(Node, LiveNodes)],
- Bookmark1 = dreyfus_bookmark:add_missing_shards(Bookmark0, LiveShards),
- Counters0 = lists:flatmap(fun({#shard{name=Name, node=N} = Shard, After}) ->
- QueryArgs1 = dreyfus_util:export(QueryArgs#index_query_args{
- bookmark = After
- }),
- case lists:member(Shard, LiveShards) of
- true ->
- Ref = rexi:cast(N, {dreyfus_rpc, search,
- [Name, DDoc, IndexName, QueryArgs1]}),
- [Shard#shard{ref = Ref}];
- false ->
- lists:map(fun(#shard{name=Name2, node=N2} = NewShard) ->
- Ref = rexi:cast(N2, {dreyfus_rpc, search,
- [Name2, DDoc, IndexName, QueryArgs1]}),
- NewShard#shard{ref = Ref}
- end, find_replacement_shards(Shard, LiveShards))
- end
- end, Bookmark1),
- Counters = fabric_dict:init(Counters0, nil),
- WorkerShards = fabric_dict:fetch_keys(Counters),
- RingOpts = dreyfus_util:get_ring_opts(QueryArgs, WorkerShards),
- QueryArgs2 = QueryArgs#index_query_args{
- bookmark = Bookmark1
- },
- go(DbName, DDoc, IndexName, QueryArgs2, Counters, Bookmark1, RingOpts);
-go(DbName, DDoc, IndexName, OldArgs) ->
- go(DbName, DDoc, IndexName, dreyfus_util:upgrade(OldArgs)).
-
-go(DbName, DDoc, IndexName, QueryArgs, Counters, Bookmark, RingOpts) ->
- {Workers, _} = lists:unzip(Counters),
- #index_query_args{
- limit = Limit,
- sort = Sort,
- raw_bookmark = RawBookmark
- } = QueryArgs,
- Replacements = fabric_view:get_shard_replacements(DbName, Workers),
- State = #state{
- limit = Limit,
- sort = Sort,
- top_docs = #top_docs{total_hits=0,hits=[]},
- counters = Counters,
- start_args = [DDoc, IndexName, QueryArgs],
- replacements = Replacements,
- ring_opts = RingOpts
- },
- RexiMon = fabric_util:create_monitors(Workers),
- try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, infinity, 1000 * 60 * 60) of
- {ok, Result} ->
- #state{top_docs=TopDocs} = Result,
- #top_docs{total_hits=TotalHits, hits=Hits,
- counts=Counts, ranges=Ranges} = TopDocs,
- case RawBookmark of
- true ->
- {ok, Bookmark, TotalHits, Hits, Counts, Ranges};
- false ->
- Bookmark1 = dreyfus_bookmark:update(Sort, Bookmark, Hits),
- Hits1 = remove_sortable(Hits),
- {ok, Bookmark1, TotalHits, Hits1, Counts, Ranges}
- end;
- {error, Reason} ->
- {error, Reason}
- after
- rexi_monitor:stop(RexiMon),
- fabric_util:cleanup(Workers)
- end.
-
-handle_message({ok, #top_docs{}=NewTopDocs}, Shard, State0) ->
- State = upgrade_state(State0),
- #state{top_docs=TopDocs, limit=Limit, sort=Sort} = State,
- case fabric_dict:lookup_element(Shard, State#state.counters) of
- undefined ->
- %% already heard from someone else in this range
- {ok, State};
- nil ->
- C1 = fabric_dict:store(Shard, ok, State#state.counters),
- C2 = fabric_view:remove_overlapping_shards(Shard, C1),
- Sortable = make_sortable(Shard, NewTopDocs),
- MergedTopDocs = merge_top_docs(TopDocs, Sortable, Limit, Sort),
- State1 = State#state{
- counters=C2,
- top_docs=MergedTopDocs
- },
- case fabric_dict:any(nil, C2) of
- true ->
- {ok, State1};
- false ->
- {stop, State1}
- end
- end;
-
-% upgrade clause
-handle_message({ok, {top_docs, UpdateSeq, TotalHits, Hits}}, Shard, State) ->
- TopDocs = #top_docs{
- update_seq = UpdateSeq,
- total_hits = TotalHits,
- hits = Hits},
- handle_message({ok, TopDocs}, Shard, State);
-
-handle_message(Error, Worker, State0) ->
- State = upgrade_state(State0),
- case dreyfus_fabric:handle_error_message(Error, Worker,
- State#state.counters, State#state.replacements,
- search, State#state.start_args, State#state.ring_opts) of
- {ok, Counters} ->
- {ok, State#state{counters=Counters}};
- {new_refs, NewRefs, NewCounters, NewReplacements} ->
- NewState = State#state{
- counters = NewCounters,
- replacements = NewReplacements
- },
- {new_refs, NewRefs, NewState};
- Else ->
- Else
- end.
-
-find_replacement_shards(#shard{range=Range}, AllShards) ->
- [Shard || Shard <- AllShards, Shard#shard.range =:= Range].
-
-make_sortable(Shard, #top_docs{}=TopDocs) ->
- Hits = make_sortable(Shard, TopDocs#top_docs.hits),
- TopDocs#top_docs{hits=Hits};
-make_sortable(Shard, List) when is_list(List) ->
- make_sortable(Shard, List, []).
-
-make_sortable(_, [], Acc) ->
- lists:reverse(Acc);
-make_sortable(Shard, [#hit{}=Hit|Rest], Acc) ->
- make_sortable(Shard, Rest, [#sortable{item=Hit, order=Hit#hit.order, shard=Shard} | Acc]).
-
-remove_sortable(List) ->
- remove_sortable(List, []).
-
-remove_sortable([], Acc) ->
- lists:reverse(Acc);
-remove_sortable([#sortable{item=Item} | Rest], Acc) ->
- remove_sortable(Rest, [Item | Acc]).
-
-merge_top_docs(#top_docs{}=TopDocsA, #top_docs{}=TopDocsB, Limit, Sort) ->
- MergedTotal = sum_element(#top_docs.total_hits, TopDocsA, TopDocsB),
- MergedHits = lists:sublist(dreyfus_util:sort(Sort,
- TopDocsA#top_docs.hits ++ TopDocsB#top_docs.hits), Limit),
- MergedCounts = merge_facets(TopDocsA#top_docs.counts, TopDocsB#top_docs.counts),
- MergedRanges = merge_facets(TopDocsA#top_docs.ranges, TopDocsB#top_docs.ranges),
- #top_docs{total_hits=MergedTotal, hits=MergedHits,
- counts=MergedCounts, ranges=MergedRanges}.
-
-merge_facets(undefined, undefined) ->
- undefined;
-merge_facets(undefined, Facets) ->
- sort_facets(Facets);
-merge_facets(Facets, undefined) ->
- sort_facets(Facets);
-merge_facets(FacetsA, FacetsB) ->
- merge_facets_int(sort_facets(FacetsA), sort_facets(FacetsB)).
-
-merge_facets_int([], []) ->
- [];
-merge_facets_int(FacetsA, []) ->
- FacetsA;
-merge_facets_int([], FacetsB) ->
- FacetsB;
-merge_facets_int([{KA, _, _}=A | RA], [{KB, _, _} | _]=FB) when KA < KB ->
- [A | merge_facets_int(RA, FB)];
-merge_facets_int([{KA, VA, CA} | RA], [{KB, VB, CB} | RB]) when KA =:= KB ->
- [{KA, VA+VB, merge_facets_int(CA, CB)} | merge_facets_int(RA, RB)];
-merge_facets_int([{KA, _, _} | _]=FA, [{KB, _, _}=B | RB]) when KA > KB ->
- [B | merge_facets_int(FA, RB)].
-
-sort_facets([]) ->
- [];
-sort_facets(Facets) ->
- lists:sort(lists:map(fun({K, V, C}) -> {K, V, sort_facets(C)} end,
- Facets)).
-
-sum_element(N, T1, T2) ->
- element(N, T1) + element(N, T2).
-
-upgrade_state({state, Limit, Sort, TopDocs, Counters}) ->
- #state{limit=Limit, sort=Sort, top_docs=TopDocs, counters=Counters,
- replacements=[]};
-upgrade_state(#state{}=State) ->
- State.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-merge_facets_test() ->
- % empty list is a no-op
- ?assertEqual([{foo, 1.0, []}], merge_facets([{foo, 1.0, []}], [])),
-
- % one level, one key
- ?assertEqual([{foo, 3.0, []}],
- merge_facets([{foo, 1.0, []}],
- [{foo, 2.0, []}])),
-
- % one level, two keys
- ?assertEqual([{bar, 6.0, []}, {foo, 9.0, []}],
- merge_facets([{foo, 1.0, []}, {bar, 2.0, []}],
- [{bar, 4.0, []}, {foo, 8.0, []}])),
-
- % multi level, multi keys
- ?assertEqual([{foo, 2.0, [{bar, 2.0, []}]}],
- merge_facets([{foo, 1.0, [{bar, 1.0, []}]}],
- [{foo, 1.0, [{bar, 1.0, []}]}])),
-
- ?assertEqual([{foo, 5.0, [{bar, 7.0, [{bar, 1.0, []}, {baz, 3.0, []}, {foo, 6.5, []}]}]}],
- merge_facets([{foo, 1.0, [{bar, 2.0, [{baz, 3.0, []}, {foo, 0.5, []}]}]}],
- [{foo, 4.0, [{bar, 5.0, [{foo, 6.0, []}, {bar, 1.0, []}]}]}])).
-
-
--endif.
diff --git a/src/dreyfus/src/dreyfus_httpd.erl b/src/dreyfus/src/dreyfus_httpd.erl
deleted file mode 100644
index 007dace8f..000000000
--- a/src/dreyfus/src/dreyfus_httpd.erl
+++ /dev/null
@@ -1,614 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_httpd).
-
--export([handle_search_req/3, handle_info_req/3, handle_disk_size_req/3,
- handle_cleanup_req/2, handle_analyze_req/1]).
-
--include("dreyfus.hrl").
--include_lib("couch/include/couch_db.hrl").
--import(chttpd, [send_method_not_allowed/2, send_json/2, send_json/3,
- send_error/2]).
-
-handle_search_req(Req, Db, DDoc) ->
- handle_search_req(Req, Db, DDoc, 0, 500).
-
-handle_search_req(#httpd{method=Method, path_parts=[_, _, _, _, IndexName]}=Req
- ,Db, DDoc, RetryCount, RetryPause)
- when Method == 'GET'; Method == 'POST' ->
- DbName = couch_db:name(Db),
- Start = os:timestamp(),
- QueryArgs = #index_query_args{
- include_docs = IncludeDocs,
- grouping = Grouping
- } = parse_index_params(Req, Db),
- validate_search_restrictions(Db, DDoc, QueryArgs),
- Response = case Grouping#grouping.by of
- nil ->
- case dreyfus_fabric_search:go(DbName, DDoc, IndexName, QueryArgs) of
- {ok, Bookmark0, TotalHits, Hits0} -> % legacy clause
- Hits = hits_to_json(DbName, IncludeDocs, Hits0),
- Bookmark = dreyfus_bookmark:pack(Bookmark0),
- send_json(Req, 200, {[
- {total_rows, TotalHits},
- {bookmark, Bookmark},
- {rows, Hits}
- ]});
- {ok, Bookmark0, TotalHits, Hits0, Counts0, Ranges0} ->
- Hits = hits_to_json(DbName, IncludeDocs, Hits0),
- Bookmark = dreyfus_bookmark:pack(Bookmark0),
- Counts = case Counts0 of
- undefined ->
- [];
- _ ->
- [{counts, facets_to_json(Counts0)}]
- end,
- Ranges = case Ranges0 of
- undefined ->
- [];
- _ ->
- [{ranges, facets_to_json(Ranges0)}]
- end,
- send_json(Req, 200, {[
- {total_rows, TotalHits},
- {bookmark, Bookmark},
- {rows, Hits}
- ] ++ Counts ++ Ranges
- });
- {error, Reason} ->
- handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason)
- end;
- _ ->
- % ensure limit in group query >0
- UseNewApi = Grouping#grouping.new_api,
- case dreyfus_fabric_group1:go(DbName, DDoc, IndexName, QueryArgs) of
- {ok, []} ->
- send_grouped_response(Req, {0, 0, []}, UseNewApi);
- {ok, TopGroups} ->
- QueryArgs1 = QueryArgs#index_query_args{grouping=Grouping#grouping{groups=TopGroups}},
- case dreyfus_fabric_group2:go(DbName, DDoc,
- IndexName, QueryArgs1) of
- {ok, {TotalHits, TotalGroupedHits, Groups0}} ->
- Groups = [group_to_json(DbName, IncludeDocs, Group, UseNewApi) || Group <- Groups0],
- send_grouped_response(Req, {TotalHits, TotalGroupedHits, Groups}, UseNewApi);
- {error, Reason} ->
- handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason)
- end;
- {error, Reason} ->
- handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason)
- end
- end,
- RequestTime = timer:now_diff(os:timestamp(), Start) div 1000,
- couch_stats:update_histogram([dreyfus, httpd, search], RequestTime),
- Response;
-handle_search_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc, _RetryCount, _RetryPause) ->
- send_method_not_allowed(Req, "GET,POST");
-handle_search_req(Req, _Db, _DDoc, _RetryCount, _RetryPause) ->
- send_error(Req, {bad_request, "path not recognized"}).
-
-handle_info_req(#httpd{method='GET', path_parts=[_, _, _, _, IndexName]}=Req
- ,Db, #doc{id=Id}=DDoc) ->
- DbName = couch_db:name(Db),
- case dreyfus_fabric_info:go(DbName, DDoc, IndexName, info) of
- {ok, IndexInfoList} ->
- send_json(Req, 200, {[
- {name, <<Id/binary,"/",IndexName/binary>>},
- {search_index, {IndexInfoList}}
- ]});
- {error, Reason} ->
- send_error(Req, Reason)
- end;
-handle_info_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc) ->
- send_method_not_allowed(Req, "GET");
-handle_info_req(Req, _Db, _DDoc) ->
- send_error(Req, {bad_request, "path not recognized"}).
-
-handle_disk_size_req(#httpd{method='GET', path_parts=[_, _, _, _, IndexName]}=Req, Db, #doc{id=Id}=DDoc) ->
- DbName = couch_db:name(Db),
- case dreyfus_fabric_info:go(DbName, DDoc, IndexName, disk_size) of
- {ok, IndexInfoList} ->
- send_json(Req, 200, {[
- {name, <<Id/binary,"/",IndexName/binary>>},
- {search_index, {IndexInfoList}}
- ]});
- {error, Reason} ->
- send_error(Req, Reason)
- end;
-handle_disk_size_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc) ->
- send_method_not_allowed(Req, "GET");
-handle_disk_size_req(Req, _Db, _DDoc) ->
- send_error(Req, {bad_request, "path not recognized"}).
-
-handle_cleanup_req(#httpd{method='POST'}=Req, Db) ->
- ok = dreyfus_fabric_cleanup:go(couch_db:name(Db)),
- send_json(Req, 202, {[{ok, true}]});
-handle_cleanup_req(Req, _Db) ->
- send_method_not_allowed(Req, "POST").
-
-handle_analyze_req(#httpd{method='GET'}=Req) ->
- Analyzer = couch_httpd:qs_value(Req, "analyzer"),
- Text = couch_httpd:qs_value(Req, "text"),
- analyze(Req, Analyzer, Text);
-handle_analyze_req(#httpd{method='POST'}=Req) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- {Fields} = chttpd:json_body_obj(Req),
- Analyzer = couch_util:get_value(<<"analyzer">>, Fields),
- Text = couch_util:get_value(<<"text">>, Fields),
- analyze(Req, Analyzer, Text);
-handle_analyze_req(Req) ->
- send_method_not_allowed(Req, "GET,POST").
-
-analyze(Req, Analyzer, Text) ->
- case Analyzer of
- undefined ->
- throw({bad_request, "analyzer parameter is mandatory"});
- _ when is_list(Analyzer) ->
- ok;
- _ when is_binary(Analyzer) ->
- ok;
- {[_|_]} ->
- ok;
- _ ->
- throw({bad_request, "analyzer parameter must be a string or an object"})
- end,
- case Text of
- undefined ->
- throw({bad_request, "text parameter is mandatory"});
- _ when is_list(Text) ->
- ok;
- _ when is_binary(Text) ->
- ok;
- _ ->
- throw({bad_request, "text parameter must be a string"})
- end,
- case clouseau_rpc:analyze(couch_util:to_binary(Analyzer),
- couch_util:to_binary(Text)) of
- {ok, Tokens} ->
- send_json(Req, 200, {[{tokens, Tokens}]});
- {error, Reason} ->
- send_error(Req, Reason)
- end.
-
-parse_index_params(#httpd{method='GET'}=Req, Db) ->
- IndexParams = lists:flatmap(fun({K, V}) -> parse_index_param(K, V) end,
- chttpd:qs(Req)),
- parse_index_params(IndexParams, Db);
-parse_index_params(#httpd{method='POST'}=Req, Db) ->
- {JsonBody} = chttpd:json_body_obj(Req),
- QSEntry = case chttpd:qs_value(Req, "partition") of
- undefined -> [];
- StrVal -> [{<<"partition">>, ?l2b(StrVal)}]
- end,
- IndexParams = lists:flatmap(fun({K, V}) ->
- parse_json_index_param(K, V)
- end, QSEntry ++ JsonBody),
- ensure_unique_partition(IndexParams),
- parse_index_params(IndexParams, Db);
-parse_index_params(IndexParams, Db) ->
- DefaultLimit = case fabric_util:is_partitioned(Db) of
- true ->
- list_to_integer(config:get("dreyfus", "limit_partitions", "2000"));
- false ->
- list_to_integer(config:get("dreyfus", "limit", "25"))
- end,
- Args = #index_query_args{limit=DefaultLimit},
- lists:foldl(fun({K, V}, Args2) ->
- validate_index_query(K, V, Args2)
- end, Args, IndexParams).
-
-validate_index_query(q, Value, Args) ->
- Args#index_query_args{q=Value};
-validate_index_query(partition, Value, Args) ->
- Args#index_query_args{partition=Value};
-validate_index_query(stale, Value, Args) ->
- Args#index_query_args{stale=Value};
-validate_index_query(limit, Value, Args) ->
- Args#index_query_args{limit=Value};
-validate_index_query(include_docs, Value, Args) ->
- Args#index_query_args{include_docs=Value};
-validate_index_query(include_fields, Value, Args) ->
- Args#index_query_args{include_fields=Value};
-validate_index_query(bookmark, Value, Args) ->
- Args#index_query_args{bookmark=Value};
-validate_index_query(sort, Value, Args) ->
- Args#index_query_args{sort=Value};
-validate_index_query(group_by, Value, #index_query_args{grouping=Grouping}=Args) ->
- Args#index_query_args{grouping=Grouping#grouping{by=Value, new_api=false}};
-validate_index_query(group_field, Value, #index_query_args{grouping=Grouping}=Args) ->
- Args#index_query_args{grouping=Grouping#grouping{by=Value, new_api=true}};
-validate_index_query(group_sort, Value, #index_query_args{grouping=Grouping}=Args) ->
- Args#index_query_args{grouping=Grouping#grouping{sort=Value}};
-validate_index_query(group_limit, Value, #index_query_args{grouping=Grouping}=Args) ->
- Args#index_query_args{grouping=Grouping#grouping{limit=Value}};
-validate_index_query(stable, Value, Args) ->
- Args#index_query_args{stable=Value};
-validate_index_query(counts, Value, Args) ->
- Args#index_query_args{counts=Value};
-validate_index_query(ranges, Value, Args) ->
- Args#index_query_args{ranges=Value};
-validate_index_query(drilldown, [[_|_]|_] = Value, Args) ->
- Args#index_query_args{drilldown=Value};
-validate_index_query(drilldown, Value, Args) ->
- DrillDown = Args#index_query_args.drilldown,
- Args#index_query_args{drilldown=[Value|DrillDown]};
-validate_index_query(highlight_fields, Value, Args) ->
- Args#index_query_args{highlight_fields=Value};
-validate_index_query(highlight_pre_tag, Value, Args) ->
- Args#index_query_args{highlight_pre_tag=Value};
-validate_index_query(highlight_post_tag, Value, Args) ->
- Args#index_query_args{highlight_post_tag=Value};
-validate_index_query(highlight_number, Value, Args) ->
- Args#index_query_args{highlight_number=Value};
-validate_index_query(highlight_size, Value, Args) ->
- Args#index_query_args{highlight_size=Value};
-validate_index_query(extra, _Value, Args) ->
- Args.
-
-parse_index_param("", _) ->
- [];
-parse_index_param("q", Value) ->
- [{q, ?l2b(Value)}];
-parse_index_param("query", Value) ->
- [{q, ?l2b(Value)}];
-parse_index_param("partition", Value) ->
- [{partition, ?l2b(Value)}];
-parse_index_param("bookmark", Value) ->
- [{bookmark, ?l2b(Value)}];
-parse_index_param("sort", Value) ->
- [{sort, ?JSON_DECODE(Value)}];
-parse_index_param("limit", Value) ->
- [{limit, ?JSON_DECODE(Value)}];
-parse_index_param("stale", "ok") ->
- [{stale, ok}];
-parse_index_param("stale", _Value) ->
- throw({query_parse_error, <<"stale only available as stale=ok">>});
-parse_index_param("include_docs", Value) ->
- [{include_docs, parse_bool_param("include_docs", Value)}];
-parse_index_param("group_by", Value) ->
- [{group_by, ?l2b(Value)}];
-parse_index_param("group_field", Value) ->
- [{group_field, ?l2b(Value)}];
-parse_index_param("group_sort", Value) ->
- [{group_sort, ?JSON_DECODE(Value)}];
-parse_index_param("group_limit", Value) ->
- [{group_limit, parse_positive_int_param("group_limit", Value, "max_group_limit", "200")}];
-parse_index_param("stable", Value) ->
- [{stable, parse_bool_param("stable", Value)}];
-parse_index_param("include_fields", Value) ->
- [{include_fields, ?JSON_DECODE(Value)}];
-parse_index_param("counts", Value) ->
- [{counts, ?JSON_DECODE(Value)}];
-parse_index_param("ranges", Value) ->
- [{ranges, ?JSON_DECODE(Value)}];
-parse_index_param("drilldown", Value) ->
- [{drilldown, ?JSON_DECODE(Value)}];
-parse_index_param("highlight_fields", Value) ->
- [{highlight_fields, ?JSON_DECODE(Value)}];
-parse_index_param("highlight_pre_tag", Value) ->
- [{highlight_pre_tag, ?JSON_DECODE(Value)}];
-parse_index_param("highlight_post_tag", Value) ->
- [{highlight_post_tag, ?JSON_DECODE(Value)}];
-parse_index_param("highlight_number", Value) ->
- [{highlight_number, parse_positive_int_param2("highlight_number", Value)}];
-parse_index_param("highlight_size", Value) ->
- [{highlight_size, parse_positive_int_param2("highlight_size", Value)}];
-parse_index_param(Key, Value) ->
- [{extra, {Key, Value}}].
-
-parse_json_index_param(<<"q">>, Value) ->
- [{q, Value}];
-parse_json_index_param(<<"query">>, Value) ->
- [{q, Value}];
-parse_json_index_param(<<"partition">>, Value) ->
- [{partition, Value}];
-parse_json_index_param(<<"bookmark">>, Value) ->
- [{bookmark, Value}];
-parse_json_index_param(<<"sort">>, Value) ->
- [{sort, Value}];
-parse_json_index_param(<<"limit">>, Value) ->
- [{limit, Value}];
-parse_json_index_param(<<"stale">>, <<"ok">>) ->
- [{stale, ok}];
-parse_json_index_param(<<"include_docs">>, Value) when is_boolean(Value) ->
- [{include_docs, Value}];
-parse_json_index_param(<<"group_by">>, Value) ->
- [{group_by, Value}];
-parse_json_index_param(<<"group_field">>, Value) ->
- [{group_field, Value}];
-parse_json_index_param(<<"group_sort">>, Value) ->
- [{group_sort, Value}];
-parse_json_index_param(<<"group_limit">>, Value) ->
- [{group_limit, parse_positive_int_param("group_limit", Value, "max_group_limit", "200")}];
-parse_json_index_param(<<"stable">>, Value) ->
- [{stable, parse_bool_param("stable", Value)}];
-parse_json_index_param(<<"include_fields">>, Value) ->
- [{include_fields, Value}];
-parse_json_index_param(<<"counts">>, Value) ->
- [{counts, Value}];
-parse_json_index_param(<<"ranges">>, Value) ->
- [{ranges, Value}];
-parse_json_index_param(<<"drilldown">>, Value) ->
- [{drilldown, Value}];
-parse_json_index_param(<<"highlight_fields">>, Value) ->
- [{highlight_fields, Value}];
-parse_json_index_param(<<"highlight_pre_tag">>, Value) ->
- [{highlight_pre_tag, Value}];
-parse_json_index_param(<<"highlight_post_tag">>, Value) ->
- [{highlight_post_tag, Value}];
-parse_json_index_param(<<"highlight_number">>, Value) ->
- [{highlight_number, parse_positive_int_param2("highlight_number", Value)}];
-parse_json_index_param(<<"highlight_size">>, Value) ->
- [{highlight_size, parse_positive_int_param2("highlight_size", Value)}];
-parse_json_index_param(Key, Value) ->
- [{extra, {Key, Value}}].
-
-%% VV copied from chttpd_view.erl
-
-parse_bool_param(_, Val) when is_boolean(Val) ->
- Val;
-parse_bool_param(_, "true") -> true;
-parse_bool_param(_, "false") -> false;
-parse_bool_param(Name, Val) ->
- Msg = io_lib:format("Invalid value for ~s: ~p", [Name, Val]),
- throw({query_parse_error, ?l2b(Msg)}).
-
-parse_int_param(_, Val) when is_integer(Val) ->
- Val;
-parse_int_param(Name, Val) ->
- case (catch list_to_integer(Val)) of
- IntVal when is_integer(IntVal) ->
- IntVal;
- _ ->
- Msg = io_lib:format("Invalid value for ~s: ~p", [Name, Val]),
- throw({query_parse_error, ?l2b(Msg)})
- end.
-
-parse_positive_int_param(Name, Val, Prop, Default) ->
- MaximumVal = list_to_integer(
- config:get("dreyfus", Prop, Default)),
- case parse_int_param(Name, Val) of
- IntVal when IntVal > MaximumVal ->
- Fmt = "Value for ~s is too large, must not exceed ~p",
- Msg = io_lib:format(Fmt, [Name, MaximumVal]),
- throw({query_parse_error, ?l2b(Msg)});
- IntVal when IntVal > 0 ->
- IntVal;
- IntVal when IntVal =< 0 ->
- Fmt = "~s must be greater than zero",
- Msg = io_lib:format(Fmt, [Name]),
- throw({query_parse_error, ?l2b(Msg)});
- _ ->
- Fmt = "Invalid value for ~s: ~p",
- Msg = io_lib:format(Fmt, [Name, Val]),
- throw({query_parse_error, ?l2b(Msg)})
- end.
-
-parse_positive_int_param2(Name, Val) ->
- case parse_int_param(Name, Val) of
- IntVal when IntVal > 0 ->
- IntVal;
- IntVal when IntVal =< 0 ->
- Fmt = "~s must be greater than zero",
- Msg = io_lib:format(Fmt, [Name]),
- throw({query_parse_error, ?l2b(Msg)});
- _ ->
- Fmt = "Invalid value for ~s: ~p",
- Msg = io_lib:format(Fmt, [Name, Val]),
- throw({query_parse_error, ?l2b(Msg)})
- end.
-
-parse_non_negative_int_param(Name, Val, Prop, Default) ->
- MaximumVal = list_to_integer(
- config:get("dreyfus", Prop, Default)),
- case parse_int_param(Name, Val) of
- IntVal when IntVal > MaximumVal ->
- Fmt = "Value for ~s is too large, must not exceed ~p",
- Msg = io_lib:format(Fmt, [Name, MaximumVal]),
- throw({query_parse_error, ?l2b(Msg)});
- IntVal when IntVal >= 0 ->
- IntVal;
- IntVal when IntVal < 0 ->
- Fmt = "~s must be greater than or equal to zero",
- Msg = io_lib:format(Fmt, [Name]),
- throw({query_parse_error, ?l2b(Msg)});
- _ ->
- Fmt = "Invalid value for ~s: ~p",
- Msg = io_lib:format(Fmt, [Name, Val]),
- throw({query_parse_error, ?l2b(Msg)})
- end.
-
-
-ensure_unique_partition(IndexParams) ->
- Partitions = lists:filter(fun({Key, _Val}) ->
- Key == partition
- end, IndexParams),
- case length(lists:usort(Partitions)) > 1 of
- true ->
- Msg = <<"Multiple conflicting values for `partition` provided">>,
- throw({bad_request, Msg});
- false ->
- ok
- end.
-
-
-validate_search_restrictions(Db, DDoc, Args) ->
- #index_query_args{
- q = Query,
- partition = Partition,
- grouping = Grouping,
- limit = Limit,
- counts = Counts,
- drilldown = Drilldown,
- ranges = Ranges
- } = Args,
- #grouping{
- by = GroupBy,
- limit = GroupLimit,
- sort = GroupSort
- } = Grouping,
-
- case Query of
- undefined ->
- Msg1 = <<"Query must include a 'q' or 'query' argument">>,
- throw({query_parse_error, Msg1});
- _ ->
- ok
- end,
-
- DbPartitioned = fabric_util:is_partitioned(Db),
- ViewPartitioned = get_view_partition_option(DDoc, DbPartitioned),
-
- case not DbPartitioned andalso is_binary(Partition) of
- true ->
- Msg2 = <<"`partition` not supported on this index">>,
- throw({bad_request, Msg2});
- false ->
- ok
- end,
-
- case {ViewPartitioned, is_binary(Partition)} of
- {false, false} ->
- ok;
- {true, true} ->
- ok;
- {true, false} ->
- Msg3 = <<"`partition` parameter is mandatory "
- "for queries to this index.">>,
- throw({bad_request, Msg3});
- {false, true} ->
- Msg4 = <<"`partition` not supported on this index">>,
- throw({bad_request, Msg4})
- end,
-
- case DbPartitioned of
- true ->
- MaxLimit = config:get("dreyfus", "max_limit", "2000"),
- parse_non_negative_int_param(
- "limit", Limit, "max_limit_partitions", MaxLimit);
- false ->
- MaxLimit = config:get("dreyfus", "max_limit", "200"),
- parse_non_negative_int_param("limit", Limit, "max_limit", MaxLimit)
- end,
-
- DefaultArgs = #index_query_args{},
-
- case is_binary(Partition) andalso (
- Counts /= DefaultArgs#index_query_args.counts
- orelse Drilldown /= DefaultArgs#index_query_args.drilldown
- orelse Ranges /= DefaultArgs#index_query_args.ranges
- orelse GroupSort /= DefaultArgs#index_query_args.grouping#grouping.sort
- orelse GroupBy /= DefaultArgs#index_query_args.grouping#grouping.by
- orelse GroupLimit /= DefaultArgs#index_query_args.grouping#grouping.limit
- ) of
- true ->
- Msg5 = <<"`partition` and any of `drilldown`, `ranges`, `group_field`, `group_sort`, `group_limit` or `group_by` are incompatible">>,
- throw({bad_request, Msg5});
- false ->
- ok
- end.
-
-
-get_view_partition_option(#doc{body = {Props}}, Default) ->
- {Options} = couch_util:get_value(<<"options">>, Props, {[]}),
- couch_util:get_value(<<"partitioned">>, Options, Default).
-
-
-hits_to_json(DbName, IncludeDocs, Hits) ->
- {Ids, HitData} = lists:unzip(lists:map(fun get_hit_data/1, Hits)),
- chttpd_stats:incr_rows(length(Hits)),
- if IncludeDocs ->
- chttpd_stats:incr_reads(length(Hits)),
- {ok, JsonDocs} = dreyfus_fabric:get_json_docs(DbName, Ids),
- lists:zipwith(fun(Hit, {Id, Doc}) ->
- case Hit of
- {Id, Order, Fields} ->
- {[{id, Id}, {order, Order}, {fields, {Fields}}, Doc]};
- {Id, Order, Fields, Highlights} ->
- {[{id, Id}, {order, Order}, {fields, {Fields}},
- {highlights, {Highlights}}, Doc]}
- end
- end, HitData, JsonDocs);
-
- true ->
- lists:map(fun(Hit) ->
- case Hit of
- {Id, Order, Fields} ->
- {[{id, Id}, {order, Order}, {fields, {Fields}}]};
- {Id, Order, Fields, Highlights} ->
- {[{id, Id}, {order, Order}, {fields, {Fields}}, {highlights, {Highlights}}]}
- end
- end, HitData)
- end.
-
-get_hit_data(Hit) ->
- Id = couch_util:get_value(<<"_id">>, Hit#hit.fields),
- Fields = lists:keydelete(<<"_id">>, 1, Hit#hit.fields),
- case couch_util:get_value(<<"_highlights">>, Hit#hit.fields) of
- undefined ->
- {Id, {Id, Hit#hit.order, Fields}};
- Highlights ->
- Fields0 = lists:keydelete(<<"_highlights">>, 1, Fields),
- {Id, {Id, Hit#hit.order, Fields0, Highlights}}
- end.
-
-group_to_json(DbName, IncludeDocs, {Name, TotalHits, Hits}, UseNewApi) ->
- {TotalHitsKey, HitsKey} = case UseNewApi of
- true -> {total_rows, rows};
- false -> {total_hits, hits}
- end,
- {[{by, Name},
- {TotalHitsKey, TotalHits},
- {HitsKey, hits_to_json(DbName, IncludeDocs, Hits)}]}.
-
-facets_to_json(Facets) ->
- {[facet_to_json(F) || F <- Facets]}.
-
-facet_to_json({K, V, []}) ->
- {hd(K), V};
-facet_to_json({K0, _V0, C0}) ->
- C2 = [{tl(K1), V1, C1} || {K1, V1, C1} <- C0],
- {hd(K0), facets_to_json(C2)}.
-
-send_grouped_response(Req, {TotalHits, TotalGroupedHits, Groups}, UseNewApi) ->
- GroupResponsePairs = case UseNewApi of
- true -> [{total_rows, TotalHits}, {groups, Groups}];
- false -> [{total_hits, TotalHits}, {total_grouped_hits, TotalGroupedHits}, {groups, Groups}]
- end,
- send_json(Req, 200, {GroupResponsePairs}).
-
-handle_error(Req, Db, DDoc, RetryCount, RetryPause, {exit, _} = Err) ->
- backoff_and_retry(Req, Db, DDoc, RetryCount, RetryPause, Err);
-handle_error(Req, Db, DDoc, RetryCount, RetryPause, {{normal, _}, _} = Err) ->
- backoff_and_retry(Req, Db, DDoc, RetryPause, RetryCount, Err);
-handle_error(Req, _Db, _DDoc, _RetryCount, _RetryPause, Reason) ->
- send_error(Req, Reason).
-
-backoff_and_retry(Req, Db, DDoc, RetryCount, RetryPause, Error) ->
- RetryLimit = list_to_integer(config:get("dreyfus", "retry_limit", "5")),
- case RetryCount > RetryLimit of
- true ->
- case Error of
- {exit, noconnection} ->
- SvcName = config:get("dreyfus", "name", "clouseau@127.0.0.1"),
- ErrMsg = "Could not connect to the Clouseau Java service at " ++ SvcName,
- send_error(Req, {ou_est_clouseau, ErrMsg});
- _ ->
- send_error(Req, timeout)
- end;
- false ->
- timer:sleep(RetryPause),
- handle_search_req(Req, Db, DDoc, RetryCount + 1, RetryPause * 2)
- end.
diff --git a/src/dreyfus/src/dreyfus_httpd_handlers.erl b/src/dreyfus/src/dreyfus_httpd_handlers.erl
deleted file mode 100644
index bf2be23b1..000000000
--- a/src/dreyfus/src/dreyfus_httpd_handlers.erl
+++ /dev/null
@@ -1,29 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_httpd_handlers).
-
--export([url_handler/1, db_handler/1, design_handler/1]).
-
-url_handler(<<"_search_analyze">>) -> fun dreyfus_httpd:handle_analyze_req/1;
-url_handler(_) -> no_match.
-
-db_handler(<<"_search_cleanup">>) -> fun dreyfus_httpd:handle_cleanup_req/2;
-db_handler(_) -> no_match.
-
-design_handler(<<"_search">>) -> fun dreyfus_httpd:handle_search_req/3;
-design_handler(<<"_search_info">>) -> fun dreyfus_httpd:handle_info_req/3;
-design_handler(<<"_search_disk_size">>) -> fun dreyfus_httpd:handle_disk_size_req/3;
-design_handler(_) -> no_match.
diff --git a/src/dreyfus/src/dreyfus_index.erl b/src/dreyfus/src/dreyfus_index.erl
deleted file mode 100644
index 7236eb16b..000000000
--- a/src/dreyfus/src/dreyfus_index.erl
+++ /dev/null
@@ -1,391 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
-%% A dreyfus_index gen_server is linked to its clouseau twin.
-
--module(dreyfus_index).
--behaviour(gen_server).
--vsn(1).
--include_lib("couch/include/couch_db.hrl").
--include("dreyfus.hrl").
-
-
-% public api.
--export([start_link/2, design_doc_to_index/2, await/2, search/2, info/1,
- group1/2, group2/2,
- design_doc_to_indexes/1]).
-
-% gen_server api.
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3, format_status/2]).
-
-% private definitions.
--record(state, {
- dbname,
- index,
- updater_pid=nil,
- index_pid=nil,
- waiting_list=[]
-}).
-
-% exported for callback.
--export([search_int/2, group1_int/2, group2_int/2, info_int/1]).
-
-% public functions.
-start_link(DbName, Index) ->
- proc_lib:start_link(?MODULE, init, [{DbName, Index}]).
-
-await(Pid, MinSeq) ->
- MFA = {gen_server, call, [Pid, {await, MinSeq}, infinity]},
- dreyfus_util:time([index, await], MFA).
-
-search(Pid0, QueryArgs) ->
- Pid = to_index_pid(Pid0),
- MFA = {?MODULE, search_int, [Pid, QueryArgs]},
- dreyfus_util:time([index, search], MFA).
-
-group1(Pid0, QueryArgs) ->
- Pid = to_index_pid(Pid0),
- MFA = {?MODULE, group1_int, [Pid, QueryArgs]},
- dreyfus_util:time([index, group1], MFA).
-
-group2(Pid0, QueryArgs) ->
- Pid = to_index_pid(Pid0),
- MFA = {?MODULE, group2_int, [Pid, QueryArgs]},
- dreyfus_util:time([index, group2], MFA).
-
-info(Pid0) ->
- Pid = to_index_pid(Pid0),
- MFA = {?MODULE, info_int, [Pid]},
- dreyfus_util:time([index, info], MFA).
-
-%% We either have a dreyfus_index gen_server pid or the remote
-%% clouseau pid.
-to_index_pid(Pid) ->
- case node(Pid) == node() of
- true -> gen_server:call(Pid, get_index_pid, infinity);
- false -> Pid
- end.
-
-design_doc_to_indexes(#doc{body={Fields}}=Doc) ->
- RawIndexes = couch_util:get_value(<<"indexes">>, Fields, {[]}),
- case RawIndexes of
- {IndexList} when is_list(IndexList) ->
- {IndexNames, _} = lists:unzip(IndexList),
- lists:flatmap(
- fun(IndexName) ->
- case (catch design_doc_to_index(Doc, IndexName)) of
- {ok, #index{}=Index} -> [Index];
- _ -> []
- end
- end,
- IndexNames);
- _ -> []
- end.
-
-% gen_server functions.
-
-init({DbName, Index}) ->
- process_flag(trap_exit, true),
- case open_index(DbName, Index) of
- {ok, Pid, Seq} ->
- State=#state{
- dbname=DbName,
- index=Index#index{current_seq=Seq, dbname=DbName},
- index_pid=Pid
- },
- case couch_db:open_int(DbName, []) of
- {ok, Db} ->
- try couch_db:monitor(Db) after couch_db:close(Db) end,
- dreyfus_util:maybe_create_local_purge_doc(Db, Pid, Index),
- proc_lib:init_ack({ok, self()}),
- gen_server:enter_loop(?MODULE, [], State);
- Error ->
- proc_lib:init_ack(Error)
- end;
- Error ->
- proc_lib:init_ack(Error)
- end.
-
-handle_call({await, RequestSeq}, From,
- #state{
- index=#index{dbname=DbName,name=IdxName,ddoc_id=DDocId,current_seq=Seq}=Index,
- index_pid=IndexPid,
- updater_pid=nil,
- waiting_list=WaitList
- }=State) when RequestSeq > Seq ->
- DbName2 = mem3:dbname(DbName),
- <<"_design/", GroupId/binary>> = DDocId,
- NewState = case dreyfus_util:in_black_list(DbName2, GroupId, IdxName) of
- false ->
- UpPid = spawn_link(fun() ->
- dreyfus_index_updater:update(IndexPid,Index)
- end),
- State#state{
- updater_pid=UpPid,
- waiting_list=[{From,RequestSeq}|WaitList]
- };
- _ ->
- couch_log:notice("Index Blocked from Updating - db: ~p,"
- " ddocid: ~p name: ~p", [DbName, DDocId, IdxName]),
- State
- end,
- {noreply, NewState};
-handle_call({await, RequestSeq}, _From,
- #state{index=#index{current_seq=Seq}}=State) when RequestSeq =< Seq ->
- {reply, {ok, State#state.index_pid, Seq}, State};
-handle_call({await, RequestSeq}, From, #state{waiting_list=WaitList}=State) ->
- {noreply, State#state{
- waiting_list=[{From,RequestSeq}|WaitList]
- }};
-
-handle_call(get_index_pid, _From, State) -> % upgrade
- {reply, State#state.index_pid, State};
-
-handle_call({search, QueryArgs0}, _From, State) -> % obsolete
- Reply = search_int(State#state.index_pid, QueryArgs0),
- {reply, Reply, State};
-
-handle_call({group1, QueryArgs0}, _From, State) -> % obsolete
- Reply = group1_int(State#state.index_pid, QueryArgs0),
- {reply, Reply, State};
-
-handle_call({group2, QueryArgs0}, _From, State) -> % obsolete
- Reply = group2_int(State#state.index_pid, QueryArgs0),
- {reply, Reply, State};
-
-handle_call(info, _From, State) -> % obsolete
- Reply = info_int(State#state.index_pid),
- {reply, Reply, State}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info({'EXIT', FromPid, {updated, NewSeq}},
- #state{
- index=#index{dbname=DbName,name=IdxName,ddoc_id=DDocId}=Index0,
- index_pid=IndexPid,
- updater_pid=UpPid,
- waiting_list=WaitList
- }=State) when UpPid == FromPid ->
- Index = Index0#index{current_seq=NewSeq},
- case reply_with_index(IndexPid, Index, WaitList) of
- [] ->
- {noreply, State#state{index=Index,
- updater_pid=nil,
- waiting_list=[]
- }};
- StillWaiting ->
- DbName2 = mem3:dbname(DbName),
- <<"_design/", GroupId/binary>> = DDocId,
- Pid = case dreyfus_util:in_black_list(DbName2, GroupId, IdxName) of
- true ->
- couch_log:notice("Index Blocked from Updating - db: ~p, ddocid: ~p"
- " name: ~p", [DbName, GroupId, IdxName]),
- nil;
- false ->
- spawn_link(fun() ->
- dreyfus_index_updater:update(IndexPid, Index)
- end)
- end,
- {noreply, State#state{index=Index,
- updater_pid=Pid,
- waiting_list=StillWaiting
- }}
- end;
-handle_info({'EXIT', _, {updated, _}}, State) ->
- {noreply, State};
-handle_info({'EXIT', FromPid, Reason}, #state{
- index=Index,
- index_pid=IndexPid,
- waiting_list=WaitList
- }=State) when FromPid == IndexPid ->
- couch_log:notice(
- "index for ~p closed with reason ~p", [index_name(Index), Reason]),
- [gen_server:reply(Pid, {error, Reason}) || {Pid, _} <- WaitList],
- {stop, normal, State};
-handle_info({'EXIT', FromPid, Reason}, #state{
- index=Index,
- updater_pid=UpPid,
- waiting_list=WaitList
- }=State) when FromPid == UpPid ->
- couch_log:info("Shutting down index server ~p, updater ~p closing w/ reason ~w",
- [index_name(Index), UpPid, Reason]),
- [gen_server:reply(Pid, {error, Reason}) || {Pid, _} <- WaitList],
- {stop, normal, State};
-handle_info({'EXIT', Pid, Reason}, State) ->
- % probably dreyfus_index_manager.
- couch_log:notice("Unknown pid ~p closed with reason ~p", [Pid, Reason]),
- {stop, normal, State};
-handle_info({'DOWN',_,_,Pid,Reason}, #state{
- index=Index,
- waiting_list=WaitList
- }=State) ->
- couch_log:info("Shutting down index server ~p, db ~p closing w/ reason ~w",
- [index_name(Index), Pid, Reason]),
- [gen_server:reply(P, {error, Reason}) || {P, _} <- WaitList],
- {stop, normal, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-format_status(_Opt, [_PDict, #state{index = #index{} = Index} = State]) ->
- #index{
- ddoc_id=Id,
- name=IndexName,
- sig=Sig
- } = Index,
- IndexScrubbed = [{
- {ddoc_id, Id},
- {name, IndexName},
- {sig, Sig}
- }],
- Scrubbed = State#state{
- index = IndexScrubbed,
- waiting_list = {length, length(State#state.waiting_list)}
- },
- ?record_to_keyval(state, Scrubbed);
-
-format_status(_Opt, [_PDict, #state{} = State]) ->
- Scrubbed = State#state{
- index = nil,
- waiting_list = {length, length(State#state.waiting_list)}
- },
- ?record_to_keyval(state, Scrubbed).
-
-% private functions.
-
-open_index(DbName, #index{analyzer=Analyzer, sig=Sig}) ->
- Path = <<DbName/binary,"/",Sig/binary>>,
- case clouseau_rpc:open_index(self(), Path, Analyzer) of
- {ok, Pid} ->
- case clouseau_rpc:get_update_seq(Pid) of
- {ok, Seq} ->
- {ok, Pid, Seq};
- Error ->
- Error
- end;
- Error ->
- Error
- end.
-
-design_doc_to_index(#doc{id=Id,body={Fields}}, IndexName) ->
- Language = couch_util:get_value(<<"language">>, Fields, <<"javascript">>),
- {RawIndexes} = couch_util:get_value(<<"indexes">>, Fields, {[]}),
- InvalidDDocError = {invalid_design_doc,
- <<"index `", IndexName/binary, "` must have parameter `index`">>},
- case lists:keyfind(IndexName, 1, RawIndexes) of
- false ->
- {error, {not_found, <<IndexName/binary, " not found.">>}};
- {IndexName, {Index}} ->
- Analyzer = couch_util:get_value(<<"analyzer">>, Index, <<"standard">>),
- case couch_util:get_value(<<"index">>, Index) of
- undefined ->
- {error, InvalidDDocError};
- Def ->
- Sig = ?l2b(couch_util:to_hex(couch_hash:md5_hash(
- term_to_binary({Analyzer, Def})))),
- {ok, #index{
- analyzer=Analyzer,
- ddoc_id=Id,
- def=Def,
- def_lang=Language,
- name=IndexName,
- sig=Sig}}
- end;
- _ ->
- {error, InvalidDDocError}
- end.
-
-reply_with_index(IndexPid, Index, WaitList) ->
- reply_with_index(IndexPid, Index, WaitList, []).
-
-reply_with_index(_IndexPid, _Index, [], Acc) ->
- Acc;
-reply_with_index(IndexPid, #index{current_seq=IndexSeq}=Index, [{Pid, Seq}|Rest], Acc) when Seq =< IndexSeq ->
- gen_server:reply(Pid, {ok, IndexPid, IndexSeq}),
- reply_with_index(IndexPid, Index, Rest, Acc);
-reply_with_index(IndexPid, Index, [{Pid, Seq}|Rest], Acc) ->
- reply_with_index(IndexPid, Index, Rest, [{Pid, Seq}|Acc]).
-
-index_name(#index{dbname=DbName,ddoc_id=DDocId,name=IndexName}) ->
- <<DbName/binary, " ", DDocId/binary, " ", IndexName/binary>>.
-
-args_to_proplist(#index_query_args{} = Args) ->
- [
- {'query', Args#index_query_args.q},
- {partition, Args#index_query_args.partition},
- {limit, Args#index_query_args.limit},
- {refresh, Args#index_query_args.stale =:= false},
- {'after', Args#index_query_args.bookmark},
- {sort, Args#index_query_args.sort},
- {include_fields, Args#index_query_args.include_fields},
- {counts, Args#index_query_args.counts},
- {ranges, Args#index_query_args.ranges},
- {drilldown, Args#index_query_args.drilldown},
- {highlight_fields, Args#index_query_args.highlight_fields},
- {highlight_pre_tag, Args#index_query_args.highlight_pre_tag},
- {highlight_post_tag, Args#index_query_args.highlight_post_tag},
- {highlight_number, Args#index_query_args.highlight_number},
- {highlight_size, Args#index_query_args.highlight_size}
- ].
-
-args_to_proplist2(#index_query_args{} = Args) ->
- [
- {'query', Args#index_query_args.q},
- {field, Args#index_query_args.grouping#grouping.by},
- {refresh, Args#index_query_args.stale =:= false},
- {groups, Args#index_query_args.grouping#grouping.groups},
- {group_sort, Args#index_query_args.grouping#grouping.sort},
- {sort, Args#index_query_args.sort},
- {limit, Args#index_query_args.limit},
- {include_fields, Args#index_query_args.include_fields},
- {highlight_fields, Args#index_query_args.highlight_fields},
- {highlight_pre_tag, Args#index_query_args.highlight_pre_tag},
- {highlight_post_tag, Args#index_query_args.highlight_post_tag},
- {highlight_number, Args#index_query_args.highlight_number},
- {highlight_size, Args#index_query_args.highlight_size}
- ].
-
-search_int(Pid, QueryArgs0) ->
- QueryArgs = dreyfus_util:upgrade(QueryArgs0),
- Props = args_to_proplist(QueryArgs),
- clouseau_rpc:search(Pid, Props).
-
-group1_int(Pid, QueryArgs0) ->
- QueryArgs = dreyfus_util:upgrade(QueryArgs0),
- #index_query_args{
- q = Query,
- stale = Stale,
- grouping = #grouping{
- by = GroupBy,
- offset = Offset,
- limit = Limit,
- sort = Sort
- }
- } = QueryArgs,
- clouseau_rpc:group1(Pid, Query, GroupBy, Stale =:= false, Sort,
- Offset, Limit).
-
-group2_int(Pid, QueryArgs0) ->
- QueryArgs = dreyfus_util:upgrade(QueryArgs0),
- Props = args_to_proplist2(QueryArgs),
- clouseau_rpc:group2(Pid, Props).
-
-info_int(Pid) ->
- clouseau_rpc:info(Pid).
diff --git a/src/dreyfus/src/dreyfus_index_manager.erl b/src/dreyfus/src/dreyfus_index_manager.erl
deleted file mode 100644
index 47f254243..000000000
--- a/src/dreyfus/src/dreyfus_index_manager.erl
+++ /dev/null
@@ -1,153 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_index_manager).
--behaviour(gen_server).
--vsn(1).
--include_lib("couch/include/couch_db.hrl").
--include("dreyfus.hrl").
-
--define(BY_SIG, dreyfus_by_sig).
--define(BY_PID, dreyfus_by_pid).
-
-% public api.
--export([start_link/0, get_index/2, get_disk_size/2]).
-
-% gen_server api.
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
-
--export([handle_db_event/3]).
-
-% public functions.
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-get_index(DbName, Index) ->
- gen_server:call(?MODULE, {get_index, DbName, Index}, infinity).
-
-get_disk_size(DbName, #index{sig=Sig}) ->
- Path = <<DbName/binary, "/", Sig/binary>>,
- clouseau_rpc:disk_size(Path).
-
-% gen_server functions.
-
-init([]) ->
- ets:new(?BY_SIG, [set, private, named_table]),
- ets:new(?BY_PID, [set, private, named_table]),
- couch_event:link_listener(?MODULE, handle_db_event, nil, [all_dbs]),
- process_flag(trap_exit, true),
- {ok, nil}.
-
-handle_call({get_index, DbName, #index{sig=Sig}=Index}, From, State) ->
- case ets:lookup(?BY_SIG, {DbName, Sig}) of
- [] ->
- Pid = spawn_link(fun() -> new_index(DbName, Index) end),
- ets:insert(?BY_PID, {Pid, opening, {DbName, Sig}}),
- ets:insert(?BY_SIG, {{DbName,Sig}, [From]}),
- {noreply, State};
- [{_, WaitList}] when is_list(WaitList) ->
- ets:insert(?BY_SIG, {{DbName, Sig}, [From | WaitList]}),
- {noreply, State};
- [{_, ExistingPid}] ->
- {reply, {ok, ExistingPid}, State}
- end;
-
-handle_call({open_ok, DbName, Sig, NewPid}, {OpenerPid, _}, State) ->
- link(NewPid),
- [{_, WaitList}] = ets:lookup(?BY_SIG, {DbName, Sig}),
- [gen_server:reply(From, {ok, NewPid}) || From <- WaitList],
- ets:delete(?BY_PID, OpenerPid),
- add_to_ets(NewPid, DbName, Sig),
- {reply, ok, State};
-
-handle_call({open_error, DbName, Sig, Error}, {OpenerPid, _}, State) ->
- [{_, WaitList}] = ets:lookup(?BY_SIG, {DbName, Sig}),
- [gen_server:reply(From, Error) || From <- WaitList],
- ets:delete(?BY_PID, OpenerPid),
- ets:delete(?BY_SIG, {DbName, Sig}),
- {reply, ok, State}.
-
-handle_cast({cleanup, DbName}, State) ->
- clouseau_rpc:cleanup(DbName),
- {noreply, State};
-
-handle_cast({rename, DbName}, State) ->
- clouseau_rpc:rename(DbName),
- {noreply, State}.
-
-handle_info({'EXIT', FromPid, Reason}, State) ->
- case ets:lookup(?BY_PID, FromPid) of
- [] ->
- if Reason =/= normal ->
- couch_log:error("Exit on non-updater process: ~p", [Reason]),
- exit(Reason);
- true -> ok
- end;
- % Using Reason /= normal to force a match error
- % if we didn't delete the Pid in a handle_call
- % message for some reason.
- [{_, opening, {DbName, Sig}}] when Reason /= normal ->
- Msg = {open_error, DbName, Sig, Reason},
- {reply, ok, _} = handle_call(Msg, {FromPid, nil}, State);
- [{_, {DbName, Sig}}] ->
- delete_from_ets(FromPid, DbName, Sig)
- end,
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, nil, _Extra) ->
- {ok, nil}.
-
-% private functions
-
-handle_db_event(DbName, created, _St) ->
- gen_server:cast(?MODULE, {cleanup, DbName}),
- {ok, nil};
-handle_db_event(DbName, deleted, _St) ->
- RecoveryEnabled = config:get_boolean("couchdb",
- "enable_database_recovery", false),
- case RecoveryEnabled of
- true ->
- gen_server:cast(?MODULE, {rename, DbName});
- false ->
- gen_server:cast(?MODULE, {cleanup, DbName})
- end,
-
- {ok, nil};
-handle_db_event(_DbName, _Event, _St) ->
- {ok, nil}.
-
-new_index(DbName, #index{sig=Sig}=Index) ->
- case (catch dreyfus_index:start_link(DbName, Index)) of
- {ok, NewPid} ->
- Msg = {open_ok, DbName, Sig, NewPid},
- ok = gen_server:call(?MODULE, Msg, infinity),
- unlink(NewPid);
- Error ->
- Msg = {open_error, DbName, Sig, Error},
- ok = gen_server:call(?MODULE, Msg, infinity)
- end.
-
-add_to_ets(Pid, DbName, Sig) ->
- true = ets:insert(?BY_PID, {Pid, {DbName, Sig}}),
- true = ets:insert(?BY_SIG, {{DbName, Sig}, Pid}).
-
-delete_from_ets(Pid, DbName, Sig) ->
- true = ets:delete(?BY_PID, Pid),
- true = ets:delete(?BY_SIG, {DbName, Sig}).
-
diff --git a/src/dreyfus/src/dreyfus_index_updater.erl b/src/dreyfus/src/dreyfus_index_updater.erl
deleted file mode 100644
index 87edef0ad..000000000
--- a/src/dreyfus/src/dreyfus_index_updater.erl
+++ /dev/null
@@ -1,181 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_index_updater).
--include_lib("couch/include/couch_db.hrl").
--include("dreyfus.hrl").
-
--export([update/2, load_docs/2]).
-
--import(couch_query_servers, [get_os_process/1, ret_os_process/1, proc_prompt/2]).
-
-update(IndexPid, Index) ->
- #index{
- current_seq = CurSeq,
- dbname = DbName,
- ddoc_id = DDocId,
- name = IndexName
- } = Index,
- erlang:put(io_priority, {search, DbName, IndexName}),
- {ok, Db} = couch_db:open_int(DbName, []),
- try
- TotalUpdateChanges = couch_db:count_changes_since(Db, CurSeq),
- TotalPurgeChanges = count_pending_purged_docs_since(Db, IndexPid),
- TotalChanges = TotalUpdateChanges + TotalPurgeChanges,
-
- couch_task_status:add_task([
- {type, search_indexer},
- {database, DbName},
- {design_document, DDocId},
- {index, IndexName},
- {progress, 0},
- {changes_done, 0},
- {total_changes, TotalChanges}
- ]),
-
- %% update status every half second
- couch_task_status:set_update_frequency(500),
-
- %ExcludeIdRevs is [{Id1, Rev1}, {Id2, Rev2}, ...]
- %The Rev is the final Rev, not purged Rev.
- {ok, ExcludeIdRevs} = purge_index(Db, IndexPid, Index),
- %% compute on all docs modified since we last computed.
-
- NewCurSeq = couch_db:get_update_seq(Db),
- Proc = get_os_process(Index#index.def_lang),
- try
- true = proc_prompt(Proc, [<<"add_fun">>, Index#index.def]),
- EnumFun = fun ?MODULE:load_docs/2,
- [Changes] = couch_task_status:get([changes_done]),
- Acc0 = {Changes, IndexPid, Db, Proc, TotalChanges, erlang:timestamp(), ExcludeIdRevs},
- {ok, _} = couch_db:fold_changes(Db, CurSeq, EnumFun, Acc0, []),
- ok = clouseau_rpc:commit(IndexPid, NewCurSeq)
- after
- ret_os_process(Proc)
- end,
- exit({updated, NewCurSeq})
- after
- couch_db:close(Db)
- end.
-
-load_docs(FDI, {I, IndexPid, Db, Proc, Total, LastCommitTime, ExcludeIdRevs}=Acc) ->
- couch_task_status:update([{changes_done, I}, {progress, (I * 100) div Total}]),
- DI = couch_doc:to_doc_info(FDI),
- #doc_info{id=Id, high_seq=Seq, revs=[#rev_info{rev=Rev}|_]} = DI,
- %check if it is processed in purge_index to avoid update the index again.
- case lists:member({Id, Rev}, ExcludeIdRevs) of
- true -> ok;
- false -> update_or_delete_index(IndexPid, Db, DI, Proc)
- end,
- %% Force a commit every minute
- case timer:now_diff(Now = erlang:timestamp(), LastCommitTime) >= 60000000 of
- true ->
- ok = clouseau_rpc:commit(IndexPid, Seq),
- {ok, {I+1, IndexPid, Db, Proc, Total, Now, ExcludeIdRevs}};
- false ->
- {ok, setelement(1, Acc, I+1)}
- end.
-
-purge_index(Db, IndexPid, Index) ->
- {ok, IdxPurgeSeq} = clouseau_rpc:get_purge_seq(IndexPid),
- Proc = get_os_process(Index#index.def_lang),
- try
- true = proc_prompt(Proc, [<<"add_fun">>, Index#index.def]),
- FoldFun = fun({PurgeSeq, _UUID, Id, _Revs}, {Acc, _}) ->
- Acc0 = case couch_db:get_full_doc_info(Db, Id) of
- not_found ->
- ok = clouseau_rpc:delete(IndexPid, Id),
- Acc;
- FDI ->
- DI = couch_doc:to_doc_info(FDI),
- #doc_info{id=Id, revs=[#rev_info{rev=Rev}|_]} = DI,
- case lists:member({Id, Rev}, Acc) of
- true -> Acc;
- false ->
- update_or_delete_index(IndexPid, Db, DI, Proc),
- [{Id, Rev} | Acc]
- end
- end,
- update_task(1),
- {ok, {Acc0, PurgeSeq}}
- end,
-
- {ok, {ExcludeList, NewPurgeSeq}} = couch_db:fold_purge_infos(
- Db, IdxPurgeSeq, FoldFun, {[], 0}, []),
- clouseau_rpc:set_purge_seq(IndexPid, NewPurgeSeq),
- update_local_doc(Db, Index, NewPurgeSeq),
- {ok, ExcludeList}
- after
- ret_os_process(Proc)
- end.
-
-count_pending_purged_docs_since(Db, IndexPid) ->
- DbPurgeSeq = couch_db:get_purge_seq(Db),
- {ok, IdxPurgeSeq} = clouseau_rpc:get_purge_seq(IndexPid),
- DbPurgeSeq - IdxPurgeSeq.
-
-update_or_delete_index(IndexPid, Db, DI, Proc) ->
- #doc_info{id=Id, revs=[#rev_info{deleted=Del}|_]} = DI,
- case Del of
- true ->
- ok = clouseau_rpc:delete(IndexPid, Id);
- false ->
- case maybe_skip_doc(Db, Id) of
- true ->
- ok;
- false ->
- {ok, Doc} = couch_db:open_doc(Db, DI, []),
- Json = couch_doc:to_json_obj(Doc, []),
- [Fields|_] = proc_prompt(Proc, [<<"index_doc">>, Json]),
- Fields1 = [list_to_tuple(Field) || Field <- Fields],
- Fields2 = maybe_add_partition(Db, Id, Fields1),
- case Fields2 of
- [] -> ok = clouseau_rpc:delete(IndexPid, Id);
- _ -> ok = clouseau_rpc:update(IndexPid, Id, Fields2)
- end
- end
- end.
-
-update_local_doc(Db, Index, PurgeSeq) ->
- DocId = dreyfus_util:get_local_purge_doc_id(Index#index.sig),
- DocContent = dreyfus_util:get_local_purge_doc_body(Db, DocId, PurgeSeq, Index),
- couch_db:update_doc(Db, DocContent, []).
-
-update_task(NumChanges) ->
- [Changes, Total] = couch_task_status:get([changes_done, total_changes]),
- Changes2 = Changes + NumChanges,
- Progress = case Total of
- 0 ->
- 0;
- _ ->
- (Changes2 * 100) div Total
- end,
- couch_task_status:update([{progress, Progress}, {changes_done, Changes2}]).
-
-maybe_skip_doc(Db, <<"_design/", _/binary>>) ->
- couch_db:is_partitioned(Db);
-maybe_skip_doc(_Db, _Id) ->
- false.
-
-maybe_add_partition(_Db, _Id, []) ->
- [];
-maybe_add_partition(Db, Id, Fields) ->
- case couch_db:is_partitioned(Db) of
- true ->
- Partition = couch_partition:from_docid(Id),
- [{<<"_partition">>, Partition, {[]}} | Fields];
- false ->
- Fields
- end.
diff --git a/src/dreyfus/src/dreyfus_plugin_couch_db.erl b/src/dreyfus/src/dreyfus_plugin_couch_db.erl
deleted file mode 100644
index b9f48ba74..000000000
--- a/src/dreyfus/src/dreyfus_plugin_couch_db.erl
+++ /dev/null
@@ -1,26 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(dreyfus_plugin_couch_db).
-
--export([
- is_valid_purge_client/2,
- on_compact/2
-]).
-
-
-is_valid_purge_client(DbName, Props) ->
- dreyfus_util:verify_index_exists(DbName, Props).
-
-
-on_compact(DbName, DDocs) ->
- dreyfus_util:ensure_local_purge_docs(DbName, DDocs).
diff --git a/src/dreyfus/src/dreyfus_rpc.erl b/src/dreyfus/src/dreyfus_rpc.erl
deleted file mode 100644
index 5542bd029..000000000
--- a/src/dreyfus/src/dreyfus_rpc.erl
+++ /dev/null
@@ -1,130 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_rpc).
--include_lib("couch/include/couch_db.hrl").
--include("dreyfus.hrl").
--import(couch_query_servers, [get_os_process/1, ret_os_process/1, proc_prompt/2]).
-
-% public api.
--export([search/4, group1/4, group2/4, info/3, disk_size/3]).
-
-% private callback
--export([call/5, info_int/3]).
-
-search(DbName, DDoc, IndexName, QueryArgs) ->
- MFA = {?MODULE, call, [search, DbName, DDoc, IndexName, QueryArgs]},
- dreyfus_util:time([rpc, search], MFA).
-
-group1(DbName, DDoc, IndexName, QueryArgs) ->
- MFA = {?MODULE, call, [group1, DbName, DDoc, IndexName, QueryArgs]},
- dreyfus_util:time([rpc, group1], MFA).
-
-group2(DbName, DDoc, IndexName, QueryArgs) ->
- MFA = {?MODULE, call, [group2, DbName, DDoc, IndexName, QueryArgs]},
- dreyfus_util:time([rpc, group2], MFA).
-
-call(Fun, DbName, DDoc, IndexName, QueryArgs0) ->
- QueryArgs = dreyfus_util:upgrade(QueryArgs0),
- erlang:put(io_priority, {search, DbName}),
- check_interactive_mode(),
- {ok, Db} = get_or_create_db(DbName, []),
- #index_query_args{
- stale = Stale
- } = QueryArgs,
- {_LastSeq, MinSeq} = calculate_seqs(Db, Stale),
- case dreyfus_index:design_doc_to_index(DDoc, IndexName) of
- {ok, Index} ->
- case dreyfus_index_manager:get_index(DbName, Index) of
- {ok, Pid} ->
- case dreyfus_index:await(Pid, MinSeq) of
- {ok, IndexPid, _Seq} ->
- Result = dreyfus_index:Fun(IndexPid, QueryArgs),
- rexi:reply(Result);
- % obsolete clauses, remove after upgrade
- ok ->
- Result = dreyfus_index:Fun(Pid, QueryArgs),
- rexi:reply(Result);
- {ok, _Seq} ->
- Result = dreyfus_index:Fun(Pid, QueryArgs),
- rexi:reply(Result);
- Error ->
- rexi:reply(Error)
- end;
- Error ->
- rexi:reply(Error)
- end;
- Error ->
- rexi:reply(Error)
- end.
-
-info(DbName, DDoc, IndexName) ->
- MFA = {?MODULE, info_int, [DbName, DDoc, IndexName]},
- dreyfus_util:time([rpc, info], MFA).
-
-info_int(DbName, DDoc, IndexName) ->
- erlang:put(io_priority, {search, DbName}),
- check_interactive_mode(),
- case dreyfus_index:design_doc_to_index(DDoc, IndexName) of
- {ok, Index} ->
- case dreyfus_index_manager:get_index(DbName, Index) of
- {ok, Pid} ->
- Result = dreyfus_index:info(Pid),
- rexi:reply(Result);
- Error ->
- rexi:reply(Error)
- end;
- Error ->
- rexi:reply(Error)
- end.
-
-disk_size(DbName, DDoc, IndexName) ->
- erlang:put(io_priority, {search, DbName}),
- check_interactive_mode(),
- case dreyfus_index:design_doc_to_index(DDoc, IndexName) of
- {ok, Index} ->
- Result = dreyfus_index_manager:get_disk_size(DbName, Index),
- rexi:reply(Result);
- Error ->
- rexi:reply(Error)
- end.
-
-get_or_create_db(DbName, Options) ->
- case couch_db:open_int(DbName, Options) of
- {not_found, no_db_file} ->
- couch_log:warning("~p creating ~s", [?MODULE, DbName]),
- couch_server:create(DbName, Options);
- Else ->
- Else
- end.
-
-calculate_seqs(Db, Stale) ->
- LastSeq = couch_db:get_update_seq(Db),
- if
- Stale == ok orelse Stale == update_after ->
- {LastSeq, 0};
- true ->
- {LastSeq, LastSeq}
- end.
-
-check_interactive_mode() ->
- case config:get("couchdb", "maintenance_mode", "false") of
- "true" ->
- % Do this to avoid log spam from rexi_server
- rexi:reply({rexi_EXIT, {maintenance_mode, node()}}),
- exit(normal);
- _ ->
- ok
- end.
diff --git a/src/dreyfus/src/dreyfus_sup.erl b/src/dreyfus/src/dreyfus_sup.erl
deleted file mode 100644
index d855a822e..000000000
--- a/src/dreyfus/src/dreyfus_sup.erl
+++ /dev/null
@@ -1,32 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_sup).
--behaviour(supervisor).
-
--export([start_link/0, init/1]).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init(_Args) ->
- Children = [
- child(dreyfus_index_manager)
- ],
- {ok, {{one_for_one,10,1},
- couch_epi:register_service(dreyfus_epi, Children)}}.
-
-child(Child) ->
- {Child, {Child, start_link, []}, permanent, 1000, worker, [Child]}.
diff --git a/src/dreyfus/src/dreyfus_util.erl b/src/dreyfus/src/dreyfus_util.erl
deleted file mode 100644
index 05ecdb621..000000000
--- a/src/dreyfus/src/dreyfus_util.erl
+++ /dev/null
@@ -1,441 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-
--module(dreyfus_util).
-
--include("dreyfus.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([get_shards/2, get_ring_opts/2, sort/2, upgrade/1, export/1, time/2]).
--export([in_black_list/1, in_black_list/3, maybe_deny_index/3]).
--export([get_design_docid/1]).
--export([
- ensure_local_purge_docs/2,
- get_value_from_options/2,
- get_local_purge_doc_id/1,
- get_local_purge_doc_body/4,
- maybe_create_local_purge_doc/2,
- maybe_create_local_purge_doc/3,
- get_signature_from_idxdir/1,
- verify_index_exists/2
-]).
-
-
-get_shards(DbName, #index_query_args{partition = nil} = Args) ->
- case use_ushards(Args) of
- true ->
- mem3:ushards(DbName);
- false ->
- mem3:shards(DbName)
- end;
-get_shards(DbName, #index_query_args{partition = Partition} = Args) ->
- PartitionId = couch_partition:shard_key(Partition),
- case use_ushards(Args) of
- true ->
- mem3:ushards(DbName, PartitionId);
- false ->
- mem3:shards(DbName, PartitionId)
- end;
-get_shards(DbName, Args) ->
- get_shards(DbName, upgrade(Args)).
-
-use_ushards(#index_query_args{stale=ok}) ->
- true;
-use_ushards(#index_query_args{stable=true}) ->
- true;
-use_ushards(#index_query_args{}) ->
- false.
-
-
-get_ring_opts(#index_query_args{partition = nil}, _Shards) ->
- [];
-get_ring_opts(#index_query_args{}, Shards) ->
- Shards1 = lists:map(fun(#shard{} = S) ->
- S#shard{ref = undefined}
- end, Shards),
- [{any, Shards1}].
-
--spec sort(Order :: relevance | [any()], [#sortable{}]) -> [#sortable{}].
-sort(Sort, List0) ->
- {List1, Stash} = stash_items(List0),
- List2 = lists:sort(fun(A, B) -> sort(Sort, A, B) end, List1),
- unstash_items(List2, Stash).
-
-stash_items(List) ->
- lists:unzip([stash_item(Item) || Item <- List]).
-
-stash_item(Item) ->
- Ref = make_ref(),
- {Item#sortable{item=Ref}, {Ref, Item#sortable.item}}.
-
-unstash_items(List, Stash) ->
- [unstash_item(Item, Stash) || Item <- List].
-
-unstash_item(Stashed, Stash) ->
- {_, Item} = lists:keyfind(Stashed#sortable.item, 1, Stash),
- Stashed#sortable{item=Item}.
-
--spec sort(Order :: relevance | [any()], #sortable{}, #sortable{}) -> boolean().
-sort(relevance, #sortable{}=A, #sortable{}=B) ->
- sort2(pad([<<"-">>], <<"">>, length(A#sortable.order)), A, B);
-sort(Sort, #sortable{}=A, #sortable{}=B) when is_binary(Sort) ->
- sort2(pad([Sort], <<"">>, length(A#sortable.order)), A, B);
-sort(Sort, #sortable{}=A, #sortable{}=B) when is_list(Sort) ->
- sort2(pad(Sort, <<"">>, length(A#sortable.order)), A, B).
-
--spec sort2([any()], #sortable{}, #sortable{}) -> boolean().
-sort2([<<"-",_/binary>>|_], #sortable{order=[A|_]}, #sortable{order=[B|_]}) when A =/= B ->
- A > B;
-sort2([_|_], #sortable{order=[A|_]}, #sortable{order=[B|_]}) when A =/= B ->
- A < B;
-sort2([], #sortable{shard=#shard{range=A}}, #sortable{shard=#shard{range=B}}) ->
- % arbitrary tie-breaker
- A =< B;
-sort2([_|Rest], #sortable{order=[_|RestA]}=SortableA, #sortable{order=[_|RestB]}=SortableB) ->
- sort2(Rest, SortableA#sortable{order=RestA}, SortableB#sortable{order=RestB}).
-
-pad(List, _Padding, Length) when length(List) >= Length ->
- List;
-pad(List, Padding, Length) ->
- pad(List ++ [Padding], Padding, Length).
-
-upgrade(#index_query_args{}=Args) ->
- Args;
-upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark,
- Sort, Grouping, Stable}) ->
- #index_query_args{
- q = Query,
- limit = Limit,
- stale = Stale,
- include_docs = IncludeDocs,
- bookmark = Bookmark,
- sort = Sort,
- grouping = Grouping,
- stable = Stable};
-upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark,
- Sort, Grouping, Stable, Counts, Ranges, Drilldown}) ->
- #index_query_args{
- q = Query,
- limit = Limit,
- stale = Stale,
- include_docs = IncludeDocs,
- bookmark = Bookmark,
- sort = Sort,
- grouping = Grouping,
- stable = Stable,
- counts=Counts,
- ranges = Ranges,
- drilldown = Drilldown};
-upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark,
- Sort, Grouping, Stable, Counts, Ranges, Drilldown,
- IncludeFields, HighlightFields, HighlightPreTag, HighlightPostTag,
- HighlightNumber, HighlightSize}) ->
- #index_query_args{
- q = Query,
- limit = Limit,
- stale = Stale,
- include_docs = IncludeDocs,
- bookmark = Bookmark,
- sort = Sort,
- grouping = Grouping,
- stable = Stable,
- counts = Counts,
- ranges = Ranges,
- drilldown = Drilldown,
- include_fields = IncludeFields,
- highlight_fields = HighlightFields,
- highlight_pre_tag = HighlightPreTag,
- highlight_post_tag = HighlightPostTag,
- highlight_number = HighlightNumber,
- highlight_size = HighlightSize
- };
-upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark,
- Sort, Grouping, Stable, Counts, Ranges, Drilldown,
- IncludeFields, HighlightFields, HighlightPreTag, HighlightPostTag,
- HighlightNumber, HighlightSize, RawBookmark}) ->
- #index_query_args{
- q = Query,
- limit = Limit,
- stale = Stale,
- include_docs = IncludeDocs,
- bookmark = Bookmark,
- sort = Sort,
- grouping = Grouping,
- stable = Stable,
- counts = Counts,
- ranges = Ranges,
- drilldown = Drilldown,
- include_fields = IncludeFields,
- highlight_fields = HighlightFields,
- highlight_pre_tag = HighlightPreTag,
- highlight_post_tag = HighlightPostTag,
- highlight_number = HighlightNumber,
- highlight_size = HighlightSize,
- raw_bookmark = RawBookmark
- }.
-
-export(#index_query_args{partition = nil, counts = nil, ranges = nil,
- drilldown = [], include_fields = nil, highlight_fields = nil} = Args) ->
- % Ensure existing searches work during the upgrade by creating an
- % #index_query_args record in the old format
- {index_query_args,
- Args#index_query_args.q,
- Args#index_query_args.limit,
- Args#index_query_args.stale,
- Args#index_query_args.include_docs,
- Args#index_query_args.bookmark,
- Args#index_query_args.sort,
- Args#index_query_args.grouping,
- Args#index_query_args.stable
- };
-export(#index_query_args{partition = nil, include_fields = nil,
- highlight_fields = nil} = Args) ->
- {index_query_args,
- Args#index_query_args.q,
- Args#index_query_args.limit,
- Args#index_query_args.stale,
- Args#index_query_args.include_docs,
- Args#index_query_args.bookmark,
- Args#index_query_args.sort,
- Args#index_query_args.grouping,
- Args#index_query_args.stable,
- Args#index_query_args.counts,
- Args#index_query_args.ranges,
- Args#index_query_args.drilldown
- };
-export(#index_query_args{partition = nil} = Args) ->
- {index_query_args,
- Args#index_query_args.q,
- Args#index_query_args.limit,
- Args#index_query_args.stale,
- Args#index_query_args.include_docs,
- Args#index_query_args.bookmark,
- Args#index_query_args.sort,
- Args#index_query_args.grouping,
- Args#index_query_args.stable,
- Args#index_query_args.counts,
- Args#index_query_args.ranges,
- Args#index_query_args.drilldown,
- Args#index_query_args.include_fields,
- Args#index_query_args.highlight_fields,
- Args#index_query_args.highlight_pre_tag,
- Args#index_query_args.highlight_post_tag,
- Args#index_query_args.highlight_number,
- Args#index_query_args.highlight_size,
- Args#index_query_args.raw_bookmark
- };
-export(QueryArgs) ->
- QueryArgs.
-
-time(Metric, {M, F, A}) when is_list(Metric) ->
- Start = os:timestamp(),
- try
- erlang:apply(M, F, A)
- after
- Length = timer:now_diff(os:timestamp(), Start) / 1000,
- couch_stats:update_histogram([dreyfus | Metric], Length)
- end.
-
-in_black_list(DbName, GroupId, IndexName) when is_binary(DbName),
- is_binary(GroupId), is_binary(IndexName) ->
- in_black_list(?b2l(DbName), ?b2l(GroupId), ?b2l(IndexName));
-in_black_list(DbName, GroupId, IndexName) when is_list(DbName),
- is_list(GroupId), is_list(IndexName) ->
- in_black_list(lists:flatten([DbName, ".", GroupId, ".", IndexName]));
-in_black_list(_DbName, _GroupId, _IndexName) ->
- false.
-
-in_black_list(IndexEntry) when is_list(IndexEntry) ->
- case dreyfus_config:get(IndexEntry) of
- undefined -> false;
- _ -> true
- end;
-in_black_list(_IndexEntry) ->
- false.
-
-maybe_deny_index(DbName, GroupId, IndexName) ->
- case in_black_list(DbName, GroupId, IndexName) of
- true ->
- Reason = ?l2b(io_lib:format("Index <~s, ~s, ~s>, is BlackListed",
- [?b2l(DbName), ?b2l(GroupId), ?b2l(IndexName)])),
- throw ({bad_request, Reason});
- _ ->
- ok
- end.
-
-get_design_docid(#doc{id = <<"_design/", DesignName/binary>>}) ->
- DesignName.
-
-get_value_from_options(Key, Options) ->
- case couch_util:get_value(Key, Options) of
- undefined ->
- Reason = binary_to_list(Key) ++ " must exist in Options.",
- throw({bad_request, Reason});
- Value -> Value
- end.
-
-ensure_local_purge_docs(DbName, DDocs) ->
- couch_util:with_db(DbName, fun(Db) ->
- lists:foreach(fun(DDoc) ->
- #doc{body = {Props}} = DDoc,
- case couch_util:get_value(<<"indexes">>, Props) of
- undefined -> false;
- _ ->
- try dreyfus_index:design_doc_to_indexes(DDoc) of
- SIndexes -> ensure_local_purge_doc(Db, SIndexes)
- catch _:_ ->
- ok
- end
- end
- end, DDocs)
- end).
-
-ensure_local_purge_doc(Db, SIndexes) ->
- if SIndexes =/= [] ->
- lists:map(fun(SIndex) ->
- maybe_create_local_purge_doc(Db, SIndex)
- end, SIndexes);
- true -> ok end.
-
-maybe_create_local_purge_doc(Db, Index) ->
- DocId = dreyfus_util:get_local_purge_doc_id(Index#index.sig),
- case couch_db:open_doc(Db, DocId) of
- {not_found, _} ->
- DbPurgeSeq = couch_db:get_purge_seq(Db),
- DocContent = dreyfus_util:get_local_purge_doc_body(
- Db, DocId, DbPurgeSeq, Index),
- couch_db:update_doc(Db, DocContent, []);
- _ ->
- ok
- end.
-
-maybe_create_local_purge_doc(Db, IndexPid, Index) ->
- DocId = dreyfus_util:get_local_purge_doc_id(Index#index.sig),
- case couch_db:open_doc(Db, DocId) of
- {not_found, _} ->
- DbPurgeSeq = couch_db:get_purge_seq(Db),
- clouseau_rpc:set_purge_seq(IndexPid, DbPurgeSeq),
- DocContent = dreyfus_util:get_local_purge_doc_body(
- Db, DocId, DbPurgeSeq, Index),
- couch_db:update_doc(Db, DocContent, []);
- _ ->
- ok
- end.
-
-get_local_purge_doc_id(Sig) ->
- ?l2b(?LOCAL_DOC_PREFIX ++ "purge-" ++ "dreyfus-" ++ Sig).
-
-get_signature_from_idxdir(IdxDir) ->
- IdxDirList = filename:split(IdxDir),
- Sig = lists:last(IdxDirList),
- Sig2 = if not is_binary(Sig) -> Sig; true ->
- binary_to_list(Sig)
- end,
- case [Ch || Ch <- Sig2, not (((Ch >= $0) and (Ch =< $9))
- orelse ((Ch >= $a) and (Ch =< $f))
- orelse ((Ch >= $A) and (Ch =< $F)))] == [] of
- true -> Sig;
- false -> undefined
- end.
-
-get_local_purge_doc_body(_, LocalDocId, PurgeSeq, Index) ->
- #index{
- name = IdxName,
- ddoc_id = DDocId,
- sig = Sig
- } = Index,
- {Mega, Secs, _} = os:timestamp(),
- NowSecs = Mega * 1000000 + Secs,
- JsonList = {[
- {<<"_id">>, LocalDocId},
- {<<"purge_seq">>, PurgeSeq},
- {<<"updated_on">>, NowSecs},
- {<<"indexname">>, IdxName},
- {<<"ddoc_id">>, DDocId},
- {<<"signature">>, Sig},
- {<<"type">>, <<"dreyfus">>}
- ]},
- couch_doc:from_json_obj(JsonList).
-
-verify_index_exists(DbName, Props) ->
- try
- Type = couch_util:get_value(<<"type">>, Props),
- if Type =/= <<"dreyfus">> -> false; true ->
- DDocId = couch_util:get_value(<<"ddoc_id">>, Props),
- IndexName = couch_util:get_value(<<"indexname">>, Props),
- Sig = couch_util:get_value(<<"signature">>, Props),
- couch_util:with_db(DbName, fun(Db) ->
- case couch_db:get_design_doc(Db, DDocId) of
- {ok, #doc{} = DDoc} ->
- {ok, IdxState} = dreyfus_index:design_doc_to_index(
- DDoc, IndexName),
- IdxState#index.sig == Sig;
- {not_found, _} ->
- false
- end
- end)
- end
- catch _:_ ->
- false
- end.
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
--define(SORT(T, L), lists:sort(fun(A, B) -> sort(T, A, B) end, L)).
--define(ASC, <<"">>).
--define(DESC, <<"-">>).
-
-%% use proper for this...
-
-empty_test() ->
- ?assertEqual([], ?SORT([], [])).
-
-primary_asc_test() ->
- ?assertMatch([#sortable{order=[1]}, #sortable{order=[2]}],
- ?SORT([?ASC], [#sortable{order=[2]}, #sortable{order=[1]}])).
-
-primary_desc_test() ->
- ?assertMatch([#sortable{order=[2]}, #sortable{order=[1]}],
- ?SORT([?DESC], [#sortable{order=[1]}, #sortable{order=[2]}])).
-
-secondary_asc_test() ->
- ?assertMatch([#sortable{order=[1, 1]}, #sortable{order=[1, 2]}],
- ?SORT([?ASC, ?ASC], [#sortable{order=[1, 2]}, #sortable{order=[1, 1]}])).
-
-secondary_desc_test() ->
- ?assertMatch([#sortable{order=[1, 2]}, #sortable{order=[1, 1]}],
- ?SORT([?DESC, ?DESC], [#sortable{order=[1, 1]}, #sortable{order=[1, 2]}])).
-
-stash_test() ->
- {Stashed, Stash} = stash_items([#sortable{order=foo, item=bar}]),
- First = hd(Stashed),
- ?assert(is_reference(First#sortable.item)),
- Unstashed = hd(unstash_items(Stashed, Stash)),
- ?assertEqual(Unstashed#sortable.item, bar).
-
-
-ring_opts_test() ->
- Shards = [#shard{name = foo, ref = make_ref()}],
-
- QArgs1 = #index_query_args{partition = nil},
- ?assertEqual([], get_ring_opts(QArgs1, Shards)),
-
- QArgs2 = #index_query_args{partition = <<"x">>},
- ?assertMatch([{any, [#shard{name = foo, ref = undefined}]}],
- get_ring_opts(QArgs2, Shards)).
-
--endif.
diff --git a/src/dreyfus/test/dreyfus_blacklist_await_test.erl b/src/dreyfus/test/dreyfus_blacklist_await_test.erl
deleted file mode 100644
index 28a5e7f30..000000000
--- a/src/dreyfus/test/dreyfus_blacklist_await_test.erl
+++ /dev/null
@@ -1,76 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(dreyfus_blacklist_await_test).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("dreyfus/include/dreyfus.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--define(DDOC_ID, <<"_design/black_list_doc">>).
--define(INDEX_NAME, <<"my_index">>).
--define(DBNAME, <<"mydb">>).
--define(TIMEOUT, 1000).
-
-start() ->
- test_util:start_couch([dreyfus]).
-
-stop(_) ->
- test_util:stop_couch([dreyfus]).
-
-setup() ->
- ok = meck:new(couch_log),
- ok = meck:expect(couch_log, notice, fun(_Fmt, _Args) ->
- ?debugFmt(_Fmt, _Args)
- end).
-
-teardown(_) ->
- ok = meck:unload(couch_log).
-
-dreyfus_blacklist_await_test_() ->
- {
- "dreyfus black_list_doc await tests",
- {
- setup,
- fun start/0, fun stop/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun do_not_await_1/0
- ]
- }
- }
- }.
-
-do_not_await_1() ->
- ok = meck:new(dreyfus_index, [passthrough]),
- Denied = lists:flatten([?b2l(?DBNAME), ".", "black_list_doc", ".",
- "my_index"]),
- config:set("dreyfus_blacklist", Denied, "true"),
- dreyfus_test_util:wait_config_change(Denied, "true"),
- Index = #index{dbname=?DBNAME, name=?INDEX_NAME, ddoc_id=?DDOC_ID},
- State = create_state(?DBNAME, Index, nil, nil, []),
- Msg = "Index Blocked from Updating - db: ~p, ddocid: ~p name: ~p",
- Return = wait_log_message(Msg, fun() ->
- {noreply, NewState} = dreyfus_index:handle_call({await, 1},
- self(), State)
- end),
- ?assertEqual(Return, ok).
-
-wait_log_message(Fmt, Fun) ->
- ok = meck:reset(couch_log),
- Fun(),
- ok = meck:wait(couch_log, '_', [Fmt, '_'], 5000).
-
-create_state(DbName, Index, UPid, IPid, WList) ->
- {state, DbName, Index, UPid, IPid, WList}.
diff --git a/src/dreyfus/test/dreyfus_blacklist_request_test.erl b/src/dreyfus/test/dreyfus_blacklist_request_test.erl
deleted file mode 100644
index 8e5598ae1..000000000
--- a/src/dreyfus/test/dreyfus_blacklist_request_test.erl
+++ /dev/null
@@ -1,96 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(dreyfus_blacklist_request_test).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_log/include/couch_log.hrl").
--include_lib("dreyfus/include/dreyfus.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--define(TIMEOUT, 1000).
-
-start() ->
- test_util:start_couch([dreyfus]),
- ok = meck:new(fabric, [passthrough]),
- ok = meck:expect(fabric, open_doc, fun(_, _, _) ->
- {ok, ddoc}
- end).
-
-stop(_) ->
- ok = meck:unload(fabric),
- test_util:stop_couch([dreyfus]).
-
-setup() ->
- ok.
-
-teardown(_) ->
- ok.
-
-dreyfus_blacklist_request_test_() ->
- {
- "dreyfus blacklist request tests",
- {
- setup,
- fun start/0, fun stop/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun deny_fabric_requests/0,
- fun allow_fabric_request/0
- ]
- }
- }
- }.
-
-deny_fabric_requests() ->
- Reason = <<"Index <mydb, myddocid, myindexname>, is BlackListed">>,
- QueryArgs = #index_query_args{},
- IndexQueryArgs = #index_query_args{},
- DDoc = #doc{id = <<"_design/myddocid">>},
- Denied = "mydb.myddocid.myindexname",
- config:set("dreyfus_blacklist", Denied, "true"),
- dreyfus_test_util:wait_config_change(Denied, "true"),
- ?assertThrow({bad_request, Reason}, dreyfus_fabric_search:go(<<"mydb">>,
- <<"myddocid">>, <<"myindexname">>, QueryArgs)),
- ?assertThrow({bad_request, Reason}, dreyfus_fabric_group1:go(<<"mydb">>,
- <<"myddocid">>, <<"myindexname">>, QueryArgs)),
- ?assertThrow({bad_request, Reason}, dreyfus_fabric_group2:go(<<"mydb">>,
- <<"myddocid">>, <<"myindexname">>, QueryArgs)),
- ?assertThrow({bad_request, Reason}, dreyfus_fabric_info:go(<<"mydb">>,
- <<"myddocid">>, <<"myindexname">>, QueryArgs)),
- ?assertThrow({bad_request, Reason}, dreyfus_fabric_search:go(<<"mydb">>,
- DDoc, <<"myindexname">>, IndexQueryArgs)),
- ?assertThrow({bad_request, Reason}, dreyfus_fabric_group1:go(<<"mydb">>,
- DDoc, <<"myindexname">>, IndexQueryArgs)),
- ?assertThrow({bad_request, Reason}, dreyfus_fabric_group2:go(<<"mydb">>,
- DDoc, <<"myindexname">>, IndexQueryArgs)),
- ?assertThrow({bad_request, Reason}, dreyfus_fabric_info:go(<<"mydb">>,
- DDoc, <<"myindexname">>, IndexQueryArgs)).
-
-allow_fabric_request() ->
- ok = meck:new(dreyfus_fabric_search, [passthrough]),
- ok = meck:expect(dreyfus_fabric_search, go,
- fun(A, GroupId, B, C) when is_binary(GroupId) ->
- meck:passthrough([A, GroupId, B, C])
- end),
- ok = meck:expect(dreyfus_fabric_search, go, fun(_, _, _, _) ->
- ok
- end),
- Denied = "mydb2.myddocid2.myindexname2",
- QueryArgs = #index_query_args{},
- config:set("dreyfus_blacklist", Denied, "true"),
- dreyfus_test_util:wait_config_change(Denied, "true"),
- ?assertEqual(ok, dreyfus_fabric_search:go(<<"mydb">>,
- <<"myddocid">>, <<"indexnotthere">>, QueryArgs)),
- ok = meck:unload(dreyfus_fabric_search).
diff --git a/src/dreyfus/test/dreyfus_config_test.erl b/src/dreyfus/test/dreyfus_config_test.erl
deleted file mode 100644
index 775e49d7f..000000000
--- a/src/dreyfus/test/dreyfus_config_test.erl
+++ /dev/null
@@ -1,71 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(dreyfus_config_test).
-
-
--include_lib("couch_log/include/couch_log.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--define(TIMEOUT, 1000).
-
-
-start() ->
- test_util:start_couch([dreyfus]).
-
-setup() ->
- ok.
-
-teardown(_) ->
- ok.
-
-dreyfus_config_test_() ->
- {
- "dreyfus config tests",
- {
- setup,
- fun start/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun check_black_list/0,
- fun check_delete_from_blacklist/0
- ]
- }
- }
- }.
-
-check_black_list() ->
- Index = "mydb.myddocid.myindexname",
- Index2 = "mydb2.myddocid2.myindexname2",
- Index3 = "mydb3.myddocid3.myindexname3",
- ok = config:set("dreyfus_blacklist", Index, "true"),
- ok = config:set("dreyfus_blacklist", Index2, "true"),
- ok = config:set("dreyfus_blacklist", Index3, "true"),
- dreyfus_test_util:wait_config_change(Index3, "true"),
- FinalBl = [Index3, Index2, Index],
- lists:foreach(fun (I) ->
- ?assertEqual("true", dreyfus_config:get(I))
- end, FinalBl).
-
-check_delete_from_blacklist() ->
- Index = "mydb.myddocid.myindexname",
- Index2 = "mydb2.myddocid2.myindexname2",
- ok = config:set("dreyfus_blacklist", Index, "true"),
- dreyfus_test_util:wait_config_change(Index, "true"),
- ok = config:delete("dreyfus_blacklist", Index),
- dreyfus_test_util:wait_config_change(Index, undefined),
- ok = config:set("dreyfus_blacklist", Index2, "true"),
- dreyfus_test_util:wait_config_change(Index2, "true"),
- ?assertEqual(undefined, dreyfus_config:get(Index)),
- ?assertEqual("true", dreyfus_config:get(Index2)).
diff --git a/src/dreyfus/test/dreyfus_purge_test.erl b/src/dreyfus/test/dreyfus_purge_test.erl
deleted file mode 100644
index 5fa4bc90f..000000000
--- a/src/dreyfus/test/dreyfus_purge_test.erl
+++ /dev/null
@@ -1,867 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(dreyfus_purge_test).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("dreyfus/include/dreyfus.hrl").
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-
--export([test_purge_single/0, test_purge_multiple/0, test_purge_multiple2/0,
- test_purge_conflict/0, test_purge_conflict2/0, test_purge_conflict3/0, test_purge_conflict4/0,
- test_purge_update/0, test_purge_update2/0,
- test_delete/0, test_delete_purge_conflict/0, test_delete_conflict/0,
- test_all/0]).
--export([test_verify_index_exists1/0, test_verify_index_exists2/0, test_verify_index_exists_failed/0,
- test_local_doc/0, test_delete_local_doc/0, test_purge_search/0]).
-
--compile(export_all).
-
-test_all() ->
- test_purge_single(),
- test_purge_multiple(),
- test_purge_multiple2(),
- test_purge_conflict(),
- test_purge_conflict2(),
- test_purge_conflict3(),
- test_purge_conflict4(),
- test_purge_update(),
- test_purge_update2(),
- test_delete(),
- test_delete_purge_conflict(),
- test_delete_conflict(),
- test_verify_index_exists1(),
- test_verify_index_exists2(),
- test_verify_index_exists_failed(),
- test_delete_local_doc(),
- test_local_doc(),
- test_purge_search(),
- ok.
-
-test_purge_single() ->
- DbName = db_name(),
- create_db_docs(DbName),
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount1, 1),
- purge_docs(DbName, [<<"apple">>]),
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount2, 0),
- delete_db(DbName),
- ok.
-
-test_purge_multiple() ->
- Query = <<"color:red">>,
-
- %create the db and docs
- DbName = db_name(),
- create_db_docs(DbName),
-
- %first search request
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, Query),
-
- ?assertEqual(HitCount1, 5),
-
- %purge 5 docs
- purge_docs(DbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, <<"haw">>,
- <<"strawberry">>]),
-
- %second search request
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, Query),
-
- ?assertEqual(HitCount2, 0),
-
- %delete the db
- delete_db(DbName),
- ok.
-
-test_purge_multiple2() ->
- %create the db and docs
- DbName = db_name(),
- create_db_docs(DbName),
-
- Query = <<"color:red">>,
-
- %first search request
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, Query),
-
- ?assertEqual(HitCount1, 5),
-
- %purge 2 docs
- purge_docs(DbName, [<<"apple">>, <<"tomato">>]),
-
- %second search request
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, Query),
-
- ?assertEqual(HitCount2, 3),
-
- %purge 2 docs
- purge_docs(DbName, [<<"cherry">>, <<"haw">>]),
-
- %third search request
- {ok, _, HitCount3, _, _, _} = dreyfus_search(DbName, Query),
-
- ?assertEqual(HitCount3, 1),
-
- %delete the db
- delete_db(DbName),
- ok.
-
-test_purge_conflict() ->
- %create dbs and docs
- SourceDbName = db_name(),
- timer:sleep(2000),
- TargetDbName = db_name(),
-
- create_db_docs(SourceDbName),
- create_db_docs(TargetDbName, <<"green">>),
-
- %first search
- {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(5, RedHitCount1 + GreenHitCount1),
-
- %do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
- SourceDbName, TargetDbName), [?ADMIN_CTX]),
-
- %%check doc version
- wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>], 2, 5),
-
- %second search
- {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(5, RedHitCount2 + GreenHitCount2),
-
- purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, <<"haw">>,
- <<"strawberry">>]),
-
- %third search
- {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(TargetDbName,
- <<"color:red">>),
- {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(TargetDbName,
- <<"color:green">>),
-
- ?assertEqual(5, RedHitCount3 + GreenHitCount3),
- ?assertEqual(RedHitCount2, GreenHitCount3),
- ?assertEqual(GreenHitCount2, RedHitCount3),
-
- delete_db(SourceDbName),
- delete_db(TargetDbName),
- ok.
-
-test_purge_conflict2() ->
- %create dbs and docs
- SourceDbName = db_name(),
- timer:sleep(2000),
- TargetDbName = db_name(),
-
- create_db_docs(SourceDbName),
- create_db_docs(TargetDbName, <<"green">>),
-
- %first search
- {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(TargetDbName,
- <<"color:red">>),
- {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(TargetDbName,
- <<"color:green">>),
-
- ?assertEqual(5, RedHitCount1 + GreenHitCount1),
-
- %do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
- SourceDbName, TargetDbName), [?ADMIN_CTX]),
-
- wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>], 2, 5),
-
- %second search
- {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(5, RedHitCount2 + GreenHitCount2),
-
- purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>]),
- purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>]),
-
- %third search
- {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(0, RedHitCount3 + GreenHitCount3),
-
- delete_db(SourceDbName),
- delete_db(TargetDbName),
- ok.
-
-
-test_purge_conflict3() ->
- %create dbs and docs
- SourceDbName = db_name(),
- timer:sleep(2000),
- TargetDbName = db_name(),
-
- create_db_docs(SourceDbName),
- create_db_docs(TargetDbName, <<"green">>),
-
- %first search
- {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(5, RedHitCount1 + GreenHitCount1),
-
- %do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
- SourceDbName, TargetDbName), [?ADMIN_CTX]),
-
- %%check doc version
- wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>], 2, 5),
-
- %second search
- {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(5, RedHitCount2 + GreenHitCount2),
-
- purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>]),
-
- %third search
- {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(5, RedHitCount3 + GreenHitCount3),
- ?assertEqual(RedHitCount2, GreenHitCount3),
- ?assertEqual(GreenHitCount2, RedHitCount3),
-
- purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>]),
- {ok, _, RedHitCount4, _, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount4, _, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(0, RedHitCount4 + GreenHitCount4),
-
- delete_db(SourceDbName),
- delete_db(TargetDbName),
- ok.
-
-test_purge_conflict4() ->
- %create dbs and docs
- SourceDbName = db_name(),
- timer:sleep(2000),
- TargetDbName = db_name(),
-
- create_db_docs(SourceDbName, <<"green">>),
- create_db_docs(TargetDbName, <<"red">>),
-
- %first search
- {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(5, RedHitCount1 + GreenHitCount1),
-
- %do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
- SourceDbName, TargetDbName), [?ADMIN_CTX]),
-
- %%check doc version
- wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>], 2, 5),
-
- %second search
- {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(5, RedHitCount2 + GreenHitCount2),
-
- purge_docs_with_all_revs(TargetDbName, [<<"apple">>, <<"tomato">>,
- <<"cherry">>, <<"haw">>, <<"strawberry">>]),
-
- %third search
- {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(0, RedHitCount3 + GreenHitCount3),
-
- delete_db(SourceDbName),
- delete_db(TargetDbName),
- ok.
-
-test_purge_update() ->
- %create the db and docs
- DbName = db_name(),
- create_db_docs(DbName),
-
- QueryRed = <<"color:red">>,
- QueryGreen = <<"color:green">>,
-
- %first search request
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, QueryRed),
-
- ?assertEqual(HitCount1, 5),
-
- %update doc
- Rev = get_rev(DbName, <<"apple">>),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"apple">>},
- {<<"_rev">>, couch_doc:rev_to_str(Rev)},
- {<<"color">>, <<"green">>},
- {<<"size">>, 8}
- ]}),
- {ok, _} = fabric:update_docs(DbName, [Doc], [?ADMIN_CTX]),
-
- %second search request
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, QueryRed),
- {ok, _, HitCount3, _, _, _} = dreyfus_search(DbName, QueryGreen),
-
- % 4 red and 1 green
- ?assertEqual(HitCount2, 4),
- ?assertEqual(HitCount3, 1),
-
- % purge 2 docs, 1 red and 1 green
- purge_docs(DbName, [<<"apple">>, <<"tomato">>]),
-
- % third search request
- {ok, _, HitCount4, _, _, _} = dreyfus_search(DbName, QueryRed),
- {ok, _, HitCount5, _, _, _} = dreyfus_search(DbName, QueryGreen),
-
- % 3 red and 0 green
- ?assertEqual(HitCount4, 3),
- ?assertEqual(HitCount5, 0),
-
- delete_db(DbName),
- ok.
-
-test_purge_update2() ->
- %create the db and docs
- DbName = db_name(),
- create_db_docs(DbName),
-
- Query1 = <<"size:1">>,
- Query1000 = <<"size:1000">>,
-
- %first search request
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, Query1),
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, Query1000),
-
- ?assertEqual(HitCount1, 5),
- ?assertEqual(HitCount2, 0),
-
- %update doc 999 times, it will take about 30 seconds.
- update_doc(DbName, <<"apple">>, 999),
-
- %second search request
- {ok, _, HitCount3, _, _, _} = dreyfus_search(DbName, Query1),
- {ok, _, HitCount4, _, _, _} = dreyfus_search(DbName, Query1000),
-
- % 4 value(1) and 1 value(1000)
- ?assertEqual(HitCount3, 4),
- ?assertEqual(HitCount4, 1),
-
- % purge doc
- purge_docs(DbName, [<<"apple">>]),
-
- % third search request
- {ok, _, HitCount5, _, _, _} = dreyfus_search(DbName, Query1),
- {ok, _, HitCount6, _, _, _} = dreyfus_search(DbName, Query1000),
-
- % 4 value(1) and 0 value(1000)
- ?assertEqual(HitCount5, 4),
- ?assertEqual(HitCount6, 0),
-
- delete_db(DbName),
- ok.
-
-test_delete() ->
- DbName = db_name(),
- create_db_docs(DbName),
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount1, 1),
- ok = delete_docs(DbName, [<<"apple">>]),
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount2, 0),
- delete_db(DbName),
- ok.
-
-test_delete_conflict() ->
- %create dbs and docs
- SourceDbName = db_name(),
- timer:sleep(2000),
- TargetDbName = db_name(),
-
- create_db_docs(SourceDbName),
- create_db_docs(TargetDbName, <<"green">>),
-
- %first search
- {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(5, RedHitCount1 + GreenHitCount1),
-
- %do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
- SourceDbName, TargetDbName), [?ADMIN_CTX]),
-
- wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>], 2, 5),
-
- %second search
- {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(5, RedHitCount2 + GreenHitCount2),
-
- %delete docs
- delete_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>]),
-
- %third search
- {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(5, RedHitCount3 + GreenHitCount3),
- ?assertEqual(RedHitCount2, GreenHitCount3),
- ?assertEqual(GreenHitCount2, RedHitCount3),
-
- delete_db(SourceDbName),
- delete_db(TargetDbName),
- ok.
-
-test_delete_purge_conflict() ->
- %create dbs and docs
- SourceDbName = db_name(),
- timer:sleep(2000),
- TargetDbName = db_name(),
-
- create_db_docs(SourceDbName),
- create_db_docs(TargetDbName, <<"green">>),
-
- %first search
- {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(5, RedHitCount1 + GreenHitCount1),
-
- %do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
- SourceDbName, TargetDbName), [?ADMIN_CTX]),
-
- wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>], 2, 5),
-
- %second search
- {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(5, RedHitCount2 + GreenHitCount2),
-
- %purge docs
- purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>]),
-
- %delete docs
- delete_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>]),
-
- %third search
- {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
- {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
-
- ?assertEqual(RedHitCount3, 0),
- ?assertEqual(GreenHitCount3, 0),
- ?assertEqual(GreenHitCount3, 0),
- ?assertEqual(RedHitCount3, 0),
-
- delete_db(SourceDbName),
- delete_db(TargetDbName),
- ok.
-
-test_local_doc() ->
- DbName = db_name(),
- create_db_docs(DbName),
-
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount1, 1),
- purge_docs(DbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"strawberry">>]),
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount2, 0),
-
- %get local doc
- [Sig|_] = get_sigs(DbName),
- LocalId = dreyfus_util:get_local_purge_doc_id(Sig),
- LocalShards = mem3:local_shards(DbName),
- PurgeSeqs = lists:map(fun(Shard) ->
- {ok, Db} = couch_db:open_int(Shard#shard.name, [?ADMIN_CTX]),
- {ok, LDoc} = couch_db:open_doc(Db, LocalId, []),
- {Props} = couch_doc:to_json_obj(LDoc, []),
- dreyfus_util:get_value_from_options(<<"updated_on">>, Props),
- PurgeSeq = dreyfus_util:get_value_from_options(<<"purge_seq">>, Props),
- Type = dreyfus_util:get_value_from_options(<<"type">>, Props),
- ?assertEqual(<<"dreyfus">>, Type),
- couch_db:close(Db),
- PurgeSeq
- end, LocalShards),
- ?assertEqual(lists:sum(PurgeSeqs), 4),
-
- delete_db(DbName),
- ok.
-
-test_verify_index_exists1() ->
- DbName = db_name(),
- create_db_docs(DbName),
-
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount1, 1),
-
- ok = purge_docs(DbName, [<<"apple">>]),
-
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount2, 0),
-
- ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
- [ShardDbName | _Rest ] = ShardNames,
- {ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
- {ok, LDoc} = couch_db:open_doc(Db,
- dreyfus_util:get_local_purge_doc_id(
- <<"49e82c2a910b1046b55cc45ad058a7ee">>), []
- ),
- #doc{body = {Props}} = LDoc,
- ?assertEqual(true, dreyfus_util:verify_index_exists(ShardDbName, Props)),
- delete_db(DbName),
- ok.
-
-test_verify_index_exists2() ->
- DbName = db_name(),
- create_db_docs(DbName),
-
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount1, 1),
-
- ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
- [ShardDbName | _Rest ] = ShardNames,
- {ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
- {ok, LDoc} = couch_db:open_doc(Db,
- dreyfus_util:get_local_purge_doc_id(
- <<"49e82c2a910b1046b55cc45ad058a7ee">>), []
- ),
- #doc{body = {Props}} = LDoc,
- ?assertEqual(true, dreyfus_util:verify_index_exists(ShardDbName, Props)),
-
- delete_db(DbName),
- ok.
-
-test_verify_index_exists_failed() ->
- DbName = db_name(),
- create_db_docs(DbName),
-
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount1, 1),
-
- ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
- [ShardDbName | _Rest ] = ShardNames,
- {ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
- {ok, LDoc} = couch_db:open_doc(Db,
- dreyfus_util:get_local_purge_doc_id(
- <<"49e82c2a910b1046b55cc45ad058a7ee">>), []
- ),
- #doc{body = {Options}} = LDoc,
- OptionsDbErr = [
- {<<"indexname">>,
- dreyfus_util:get_value_from_options(<<"indexname">>, Options)},
- {<<"ddoc_id">>,
- dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)},
- {<<"signature">>,
- dreyfus_util:get_value_from_options(<<"signature">>, Options)}
- ],
- ?assertEqual(false, dreyfus_util:verify_index_exists(
- ShardDbName, OptionsDbErr)),
-
- OptionsIdxErr = [
- {<<"indexname">>, <<"someindex">>},
- {<<"ddoc_id">>,
- dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)},
- {<<"signature">>,
- dreyfus_util:get_value_from_options(<<"signature">>, Options)}
- ],
- ?assertEqual(false, dreyfus_util:verify_index_exists(
- ShardDbName, OptionsIdxErr)),
-
- OptionsDDocErr = [
- {<<"indexname">>,
- dreyfus_util:get_value_from_options(<<"indexname">>, Options)},
- {<<"ddoc_id">>,
- <<"somedesigndoc">>},
- {<<"signature">>,
- dreyfus_util:get_value_from_options(<<"signature">>, Options)}
- ],
- ?assertEqual(false, dreyfus_util:verify_index_exists(
- ShardDbName, OptionsDDocErr)),
-
- OptionsSigErr = [
- {<<"indexname">>,
- dreyfus_util:get_value_from_options(<<"indexname">>, Options)},
- {<<"ddoc_id">>,
- dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)},
- {<<"signature">>,
- <<"12345678901234567890123456789012">>}
- ],
- ?assertEqual(false, dreyfus_util:verify_index_exists(
- ShardDbName, OptionsSigErr)),
-
- delete_db(DbName),
- ok.
-
-test_delete_local_doc() ->
- DbName = db_name(),
- create_db_docs(DbName),
-
- {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount1, 1),
-
- ok = purge_docs(DbName, [<<"apple">>]),
-
- {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>),
- ?assertEqual(HitCount2, 0),
-
- LDocId = dreyfus_util:get_local_purge_doc_id(
- <<"49e82c2a910b1046b55cc45ad058a7ee">>),
- ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
- [ShardDbName | _Rest ] = ShardNames,
- {ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
- {ok, _} = couch_db:open_doc(Db, LDocId, []),
-
- delete_docs(DbName, [<<"_design/search">>]),
- io:format("DbName ~p~n", [DbName]),
- ?debugFmt("Converting ... ~n~p~n", [DbName]),
-
-
- dreyfus_fabric_cleanup:go(DbName),
- {ok, Db2} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
- {not_found, _} = couch_db:open_doc(Db2, LDocId, []),
-
- delete_db(DbName),
- ok.
-
-test_purge_search() ->
- DbName = db_name(),
- create_db_docs(DbName),
- purge_docs(DbName, [<<"apple">>, <<"tomato">>, <<"haw">>]),
- {ok, _, HitCount, _, _, _} = dreyfus_search(DbName, <<"color:red">>),
- ?assertEqual(HitCount, 2),
- delete_db(DbName),
- ok.
-
-%private API
-db_name() ->
- Nums = tuple_to_list(erlang:now()),
- Prefix = "test-db",
- Suffix = lists:concat([integer_to_list(Num) || Num <- Nums]),
- list_to_binary(Prefix ++ "-" ++ Suffix).
-
-purge_docs(DBName, DocIds) ->
- IdsRevs = [{DocId, [get_rev(DBName, DocId)]} || DocId <- DocIds],
- {ok, _} = fabric:purge_docs(DBName, IdsRevs, []),
- ok.
-
-purge_docs_with_all_revs(DBName, DocIds) ->
- IdsRevs = [{DocId, get_revs(DBName, DocId)} || DocId <- DocIds],
- {ok, _} = fabric:purge_docs(DBName, IdsRevs, []),
- ok.
-
-dreyfus_search(DbName, KeyWord) ->
- QueryArgs = #index_query_args{q = KeyWord},
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/search">>, []),
- dreyfus_fabric_search:go(DbName, DDoc, <<"index">>, QueryArgs).
-
-create_db_docs(DbName) ->
- create_db(DbName),
- create_docs(DbName, 5, <<"red">>).
-
-create_db_docs(DbName, Color) ->
- create_db(DbName),
- create_docs(DbName, 5, Color).
-
-create_docs(DbName, Count, Color) ->
- {ok, _} = fabric:update_docs(DbName, make_docs(Count, Color), [?ADMIN_CTX]),
- {ok, _} = fabric:update_doc(DbName, make_design_doc(dreyfus), [?ADMIN_CTX]).
-
-create_db(DbName) ->
- ok = fabric:create_db(DbName, [?ADMIN_CTX, {q, 1}]).
-
-delete_db(DbName) ->
- ok = fabric:delete_db(DbName, [?ADMIN_CTX]).
-
-make_docs(Count, Color) ->
- [make_doc(I, Color) || I <- lists:seq(1, Count)].
-
-make_doc(Id, Color) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, get_value(Id)},
- {<<"color">>, Color},
- {<<"size">>, 1}
- ]}).
-
-get_value(Key) ->
- case Key of
- 1 -> <<"apple">>;
- 2 -> <<"tomato">>;
- 3 -> <<"cherry">>;
- 4 -> <<"strawberry">>;
- 5 -> <<"haw">>;
- 6 -> <<"carrot">>;
- 7 -> <<"pitaya">>;
- 8 -> <<"grape">>;
- 9 -> <<"date">>;
- 10 -> <<"watermelon">>
- end.
-
-make_design_doc(dreyfus) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/search">>},
- {<<"language">>, <<"javascript">>},
- {<<"indexes">>, {[
- {<<"index">>, {[
- {<<"analyzer">>, <<"standard">>},
- {<<"index">>, <<
- "function (doc) { \n"
- " index(\"default\", doc._id);\n"
- " if(doc.color) {\n"
- " index(\"color\", doc.color);\n"
- " }\n"
- " if(doc.size) {\n"
- " index(\"size\", doc.size);\n"
- " }\n"
- "}"
- >>}
- ]}}
- ]}}
- ]}).
-
-make_replicate_doc(SourceDbName, TargetDbName) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, list_to_binary("replicate_fm_" ++
- binary_to_list(SourceDbName) ++ "_to_" ++ binary_to_list(TargetDbName))},
- {<<"source">>, list_to_binary("http://localhost:15984/" ++ SourceDbName)},
- {<<"target">>, list_to_binary("http://localhost:15984/" ++ TargetDbName)}
- ]}).
-
-get_rev(DbName, DocId) ->
- FDI = fabric:get_full_doc_info(DbName, DocId, []),
- #doc_info{revs = [#rev_info{} = PrevRev | _]} = couch_doc:to_doc_info(FDI),
- PrevRev#rev_info.rev.
-
-get_revs(DbName, DocId) ->
- FDI = fabric:get_full_doc_info(DbName, DocId, []),
- #doc_info{ revs = Revs } = couch_doc:to_doc_info(FDI),
- [Rev#rev_info.rev || Rev <- Revs].
-
-update_doc(_, _, 0) ->
- ok;
-update_doc(DbName, DocId, Times) ->
- Rev = get_rev(DbName, DocId),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"apple">>},
- {<<"_rev">>, couch_doc:rev_to_str(Rev)},
- {<<"size">>, 1001 - Times}
- ]}),
- {ok, _} = fabric:update_docs(DbName, [Doc], [?ADMIN_CTX]),
- update_doc(DbName, DocId, Times-1).
-
-delete_docs(DbName, DocIds) ->
- lists:foreach(
- fun(DocId) -> ok = delete_doc(DbName, DocId) end,
- DocIds
- ).
-
-delete_doc(DbName, DocId) ->
- Rev = get_rev(DbName, DocId),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DocId},
- {<<"_rev">>, couch_doc:rev_to_str(Rev)},
- {<<"_deleted">>, true}
- ]}),
- {ok, _} = fabric:update_doc(DbName, DDoc, [?ADMIN_CTX]),
- ok.
-
-wait_for_replicate(_, _, _, 0) ->
- couch_log:notice("[~p] wait time out", [?MODULE]),
- ok;
-wait_for_replicate(DbName, DocIds, ExpectRevCount ,TimeOut)
- when is_list(DocIds) ->
- [wait_for_replicate(DbName, DocId, ExpectRevCount ,TimeOut) || DocId <- DocIds];
-wait_for_replicate(DbName, DocId, ExpectRevCount ,TimeOut) ->
- FDI = fabric:get_full_doc_info(DbName, DocId, []),
- #doc_info{ revs = Revs } = couch_doc:to_doc_info(FDI),
- case erlang:length(Revs) of
- ExpectRevCount ->
- couch_log:notice("[~p] wait end by expect, time used:~p, DocId:~p",
- [?MODULE, 5-TimeOut, DocId]),
- ok;
- true ->
- timer:sleep(1000),
- wait_for_replicate(DbName, DocId, ExpectRevCount ,TimeOut-1)
- end,
- ok.
-
-get_sigs(DbName) ->
- {ok, DesignDocs} = fabric:design_docs(DbName),
- lists:usort(lists:flatmap(fun active_sigs/1,
- [couch_doc:from_json_obj(DD) || DD <- DesignDocs])).
-
-active_sigs(#doc{body={Fields}}=Doc) ->
- {RawIndexes} = couch_util:get_value(<<"indexes">>, Fields, {[]}),
- {IndexNames, _} = lists:unzip(RawIndexes),
- [begin
- {ok, Index} = dreyfus_index:design_doc_to_index(Doc, IndexName),
- Index#index.sig
- end || IndexName <- IndexNames].
diff --git a/src/dreyfus/test/dreyfus_test_util.erl b/src/dreyfus/test/dreyfus_test_util.erl
deleted file mode 100644
index 631bc1047..000000000
--- a/src/dreyfus/test/dreyfus_test_util.erl
+++ /dev/null
@@ -1,13 +0,0 @@
--module(dreyfus_test_util).
-
--compile(export_all).
-
--include_lib("couch/include/couch_db.hrl").
-
-wait_config_change(Key, Value) ->
- test_util:wait(fun() ->
- case dreyfus_config:get(Key) of
- Value -> ok;
- _ -> wait
- end
- end).
diff --git a/src/dreyfus/test/elixir/mix.exs b/src/dreyfus/test/elixir/mix.exs
deleted file mode 100644
index 9b0f642dd..000000000
--- a/src/dreyfus/test/elixir/mix.exs
+++ /dev/null
@@ -1,30 +0,0 @@
-defmodule Foo.Mixfile do
- use Mix.Project
-
- def project do
- [
- app: :foo,
- version: "0.1.0",
- elixir: "~> 1.5",
- start_permanent: Mix.env == :prod,
- deps: deps()
- ]
- end
-
- # Run "mix help compile.app" to learn about applications.
- def application do
- [
- extra_applications: [:logger]
- ]
- end
-
- # Run "mix help deps" to learn about dependencies.
- defp deps do
- [
- # {:dep_from_hexpm, "~> 0.3.0"},
- {:httpotion, "~> 3.0"},
- {:jiffy, "~> 0.14.11"}
- # {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"},
- ]
- end
-end
diff --git a/src/dreyfus/test/elixir/mix.lock b/src/dreyfus/test/elixir/mix.lock
deleted file mode 100644
index ed51e5312..000000000
--- a/src/dreyfus/test/elixir/mix.lock
+++ /dev/null
@@ -1,5 +0,0 @@
-%{
- "httpotion": {:hex, :httpotion, "3.1.0", "14d20d9b0ce4e86e253eb91e4af79e469ad949f57a5d23c0a51b2f86559f6589", [:mix], [{:ibrowse, "~> 4.4", [hex: :ibrowse, repo: "hexpm", optional: false]}], "hexpm"},
- "ibrowse": {:hex, :ibrowse, "4.4.1", "2b7d0637b0f8b9b4182de4bd0f2e826a4da2c9b04898b6e15659ba921a8d6ec2", [:rebar3], [], "hexpm"},
- "jiffy": {:hex, :jiffy, "0.14.13", "225a9a35e26417832c611526567194b4d3adc4f0dfa5f2f7008f4684076f2a01", [:rebar3], [], "hexpm"},
-}
diff --git a/src/dreyfus/test/elixir/run b/src/dreyfus/test/elixir/run
deleted file mode 100755
index 66a5947b7..000000000
--- a/src/dreyfus/test/elixir/run
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash -e
-cd "$(dirname "$0")"
-mix deps.get
-mix test --trace
diff --git a/src/dreyfus/test/elixir/test/partition_search_test.exs b/src/dreyfus/test/elixir/test/partition_search_test.exs
deleted file mode 100644
index 121995449..000000000
--- a/src/dreyfus/test/elixir/test/partition_search_test.exs
+++ /dev/null
@@ -1,247 +0,0 @@
-defmodule PartitionSearchTest do
- use CouchTestCase
-
- @moduletag :search
-
- @moduledoc """
- Test Partition functionality with search
- """
-
- def create_search_docs(db_name, pk1 \\ "foo", pk2 \\ "bar") do
- docs = for i <- 1..10 do
- id = if rem(i, 2) == 0 do
- "#{pk1}:#{i}"
- else
- "#{pk2}:#{i}"
- end
- %{
- :_id => id,
- :value => i,
- :some => "field"
- }
- end
-
- resp = Couch.post("/#{db_name}/_bulk_docs", headers: ["Content-Type": "application/json"], body: %{:docs => docs}, query: %{w: 3})
- assert resp.status_code in [201, 202]
- end
-
- def create_ddoc(db_name, opts \\ %{}) do
- index_fn = "function(doc) {\n if (doc.some) {\n index('some', doc.some);\n }\n}"
- default_ddoc = %{
- indexes: %{
- books: %{
- analyzer: %{name: "standard"},
- index: index_fn
- }
- }
- }
-
- ddoc = Enum.into(opts, default_ddoc)
-
- resp = Couch.put("/#{db_name}/_design/library", body: ddoc)
- assert resp.status_code in [201, 202]
- assert Map.has_key?(resp.body, "ok") == true
- end
-
- def get_ids (resp) do
- %{:body => %{"rows" => rows}} = resp
- Enum.map(rows, fn row -> row["id"] end)
- end
-
- @tag :with_partitioned_db
- test "Simple query returns partitioned search results", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field"})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["foo:10", "foo:2", "foo:4", "foo:6", "foo:8"]
-
- url = "/#{db_name}/_partition/bar/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field"})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["bar:1", "bar:3", "bar:5", "bar:7", "bar:9"]
- end
-
- @tag :with_partitioned_db
- test "Only returns docs in partition not those in shard", context do
- db_name = context[:db_name]
- create_search_docs(db_name, "foo", "bar42")
- create_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field"})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["foo:10", "foo:2", "foo:4", "foo:6", "foo:8"]
- end
-
- @tag :with_partitioned_db
- test "Works with bookmarks and limit", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field", limit: 3})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["foo:10", "foo:2", "foo:4"]
-
- %{:body => %{"bookmark" => bookmark}} = resp
-
- resp = Couch.get(url, query: %{q: "some:field", limit: 3, bookmark: bookmark})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["foo:6", "foo:8"]
-
- resp = Couch.get(url, query: %{q: "some:field", limit: 2000, bookmark: bookmark})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["foo:6", "foo:8"]
-
- resp = Couch.get(url, query: %{q: "some:field", limit: 2001, bookmark: bookmark})
- assert resp.status_code == 400
- end
-
- @tag :with_db
- test "Works with limit using POST for on non-partitioned db", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/library/_search/books"
- resp = Couch.post(url, body: %{:q => "some:field", :limit => 1})
- assert resp.status_code == 200
- end
-
- @tag :with_partitioned_db
- test "Works with limit using POST for partitioned db", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/library/_search/books"
- resp = Couch.post(url, body: %{:q => "some:field", :limit => 1})
- assert resp.status_code == 200
- end
-
- @tag :with_partitioned_db
- test "Cannot do global query with partition view", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field"})
- assert resp.status_code == 400
- %{:body => %{"reason" => reason}} = resp
- assert Regex.match?(~r/mandatory for queries to this index./, reason)
- end
-
- @tag :with_partitioned_db
- test "Cannot do partition query with global search ddoc", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name, options: %{partitioned: false})
-
- url = "/#{db_name}/_partition/foo/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field"})
- assert resp.status_code == 400
- %{:body => %{"reason" => reason}} = resp
- assert reason == "`partition` not supported on this index"
- end
-
- @tag :with_db
- test "normal search on non-partitioned dbs still work", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field"})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert Enum.sort(ids) == Enum.sort(["bar:1", "bar:5", "bar:9", "foo:2", "bar:3", "foo:4", "foo:6", "bar:7", "foo:8", "foo:10"])
- end
-
- @tag :with_db
- test "normal search on non-partitioned dbs without limit", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field"})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert Enum.sort(ids) == Enum.sort(["bar:1", "bar:5", "bar:9", "foo:2", "bar:3", "foo:4", "foo:6", "bar:7", "foo:8", "foo:10"])
- end
-
- @tag :with_db
- test "normal search on non-partitioned dbs with limit", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field", limit: 3})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert Enum.sort(ids) == Enum.sort(["bar:1", "bar:5", "bar:9"])
- end
-
- @tag :with_db
- test "normal search on non-partitioned dbs with over limit", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/library/_search/books"
- resp = Couch.get(url, query: %{q: "some:field", limit: 201})
- assert resp.status_code == 400
- end
-
- @tag :with_partitioned_db
- test "rejects conflicting partition values", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/library/_search/books"
- resp = Couch.post(url, body: %{q: "some:field", partition: "bar"})
- assert resp.status_code == 400
- end
-
- @tag :with_partitioned_db
- test "restricted parameters are not allowed in query or body", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- body = %{q: "some:field", partition: "foo"}
-
- Enum.each(
- [
- {:counts, "[\"type\"]"},
- {:group_field, "some"},
- {:ranges, :jiffy.encode(%{price: %{cheap: "[0 TO 100]"}})},
- {:drilldown, "[\"key\",\"a\"]"},
- ],
- fn {key, value} ->
- url = "/#{db_name}/_partition/foo/_design/library/_search/books"
- bannedparam = Map.put(body, key, value)
- get_resp = Couch.get(url, query: bannedparam)
- %{:body => %{"reason" => get_reason}} = get_resp
- assert Regex.match?(~r/are incompatible/, get_reason)
- post_resp = Couch.post(url, body: bannedparam)
- %{:body => %{"reason" => post_reason}} = post_resp
- assert Regex.match?(~r/are incompatible/, post_reason)
- end
- )
- end
-end
diff --git a/src/dreyfus/test/elixir/test/search_test.exs b/src/dreyfus/test/elixir/test/search_test.exs
deleted file mode 100644
index 829b3395f..000000000
--- a/src/dreyfus/test/elixir/test/search_test.exs
+++ /dev/null
@@ -1,226 +0,0 @@
-defmodule SearchTest do
- use CouchTestCase
-
- @moduletag :search
-
- @moduledoc """
- Test search
- """
-
- def create_search_docs(db_name) do
- resp = Couch.post("/#{db_name}/_bulk_docs",
- headers: ["Content-Type": "application/json"],
- body: %{:docs => [
- %{"item" => "apple", "place" => "kitchen", "state" => "new"},
- %{"item" => "banana", "place" => "kitchen", "state" => "new"},
- %{"item" => "carrot", "place" => "kitchen", "state" => "old"},
- %{"item" => "date", "place" => "lobby", "state" => "unknown"},
- ]}
- )
- assert resp.status_code in [201, 202]
- end
-
- def create_ddoc(db_name, opts \\ %{}) do
- default_ddoc = %{
- indexes: %{
- fruits: %{
- analyzer: %{name: "standard"},
- index: "function (doc) {\n index(\"item\", doc.item, {facet: true});\n index(\"place\", doc.place, {facet: true});\n index(\"state\", doc.state, {facet: true});\n}"
- }
- }
- }
-
- ddoc = Enum.into(opts, default_ddoc)
-
- resp = Couch.put("/#{db_name}/_design/inventory", body: ddoc)
- assert resp.status_code in [201, 202]
- assert Map.has_key?(resp.body, "ok") == true
- end
-
- def create_invalid_ddoc(db_name, opts \\ %{}) do
- invalid_ddoc = %{
- :indexes => [
- %{"name" => "foo", "ddoc" => "bar", "type" => "text"},
- ]
- }
-
- ddoc = Enum.into(opts, invalid_ddoc)
-
- resp = Couch.put("/#{db_name}/_design/search", body: ddoc)
- assert resp.status_code in [201, 202]
- assert Map.has_key?(resp.body, "ok") == true
- end
-
- def get_items (resp) do
- %{:body => %{"rows" => rows}} = resp
- Enum.map(rows, fn row -> row["doc"]["item"] end)
- end
-
- @tag :with_db
- test "search returns all items for GET", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.get(url, query: %{q: "*:*", include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == Enum.sort(["apple", "banana", "carrot", "date"])
- end
-
- @tag :with_db
- test "drilldown single key single value for GET", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.get(url, query: %{q: "*:*", drilldown: :jiffy.encode(["place", "kitchen"]), include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == Enum.sort(["apple", "banana", "carrot"])
- end
-
- @tag :with_db
- test "drilldown single key multiple values for GET", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.get(url, query: %{q: "*:*", drilldown: :jiffy.encode(["state", "new", "unknown"]), include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == Enum.sort(["apple", "banana", "date"])
- end
-
- @tag :with_db
- test "drilldown multiple keys single values for GET", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.get(url, query: %{q: "*:*", drilldown: :jiffy.encode([["state", "old"], ["item", "apple"]]), include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == []
- end
-
- @tag :with_db
- test "drilldown multiple query definitions for GET", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits?q=*:*&drilldown=[\"state\",\"old\"]&drilldown=[\"item\",\"apple\"]&include_docs=true"
- resp = Couch.get(url)
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == []
- end
-
-
- @tag :with_db
- test "search returns all items for POST", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.post(url, body: %{q: "*:*", include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == Enum.sort(["apple", "banana", "carrot", "date"])
- end
-
- @tag :with_db
- test "drilldown single key single value for POST", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.post(url, body: %{query: "*:*", drilldown: ["place", "kitchen"], include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == Enum.sort(["apple", "banana", "carrot"])
- end
-
- @tag :with_db
- test "drilldown single key multiple values for POST", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.post(url, body: %{query: "*:*", drilldown: ["state", "new", "unknown"], include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == Enum.sort(["apple", "banana", "date"])
- end
-
- @tag :with_db
- test "drilldown multiple keys single values for POST", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.post(url, body: %{q: "*:*", drilldown: [["state", "old"], ["item", "apple"]], include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == []
- end
-
- @tag :with_db
- test "drilldown three keys single values for POST", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.post(url, body: %{q: "*:*", drilldown: [["place", "kitchen"], ["state", "new"], ["item", "apple"]], include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == ["apple"]
- end
-
- @tag :with_db
- test "drilldown multiple keys multiple values for POST", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.post(url, body: %{q: "*:*", drilldown: [["state", "old", "new"], ["item", "apple"]], include_docs: true})
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == ["apple"]
- end
-
- @tag :with_db
- test "drilldown multiple query definitions for POST", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
-
- url = "/#{db_name}/_design/inventory/_search/fruits"
- resp = Couch.post(url, body: "{\"include_docs\": true, \"q\": \"*:*\", \"drilldown\": [\"state\", \"old\"], \"drilldown\": [\"item\", \"apple\"]}")
- assert resp.status_code == 200
- ids = get_items(resp)
- assert Enum.sort(ids) == ["apple"]
- end
-
- @tag :with_db
- test "clean up search index with invalid design document", context do
- db_name = context[:db_name]
- create_search_docs(db_name)
- create_ddoc(db_name)
- create_invalid_ddoc(db_name)
-
- resp = Couch.post("/#{db_name}/_search_cleanup")
- assert resp.status_code in [201, 202]
- end
-end
diff --git a/src/dreyfus/test/elixir/test/test_helper.exs b/src/dreyfus/test/elixir/test/test_helper.exs
deleted file mode 100644
index 6eb20e242..000000000
--- a/src/dreyfus/test/elixir/test/test_helper.exs
+++ /dev/null
@@ -1,4 +0,0 @@
-Code.require_file "../../../../couchdb/test/elixir/lib/couch.ex", __DIR__
-Code.require_file "../../../../couchdb/test/elixir/test/test_helper.exs", __DIR__
-Code.require_file "../../../../couchdb/test/elixir/test/support/couch_test_case.ex", __DIR__
-Code.require_file "../../../../couchdb/test/elixir/lib/couch/db_test.ex", __DIR__
diff --git a/src/ebtree/README.md b/src/ebtree/README.md
index 9ce79a0c6..8babe85b1 100644
--- a/src/ebtree/README.md
+++ b/src/ebtree/README.md
@@ -11,9 +11,3 @@ intermediate reduction values on the inner nodes for performance.
The FoundationDB keys start with a user defined prefix and the opaque
node id.
-
-TODO
-
-1. Rewrite inner node ids (non-root, non-leaf) so we can safely cache
- them outside of a transaction. (see "immutable" branch)
-2. Chunkify large values over multiple rows?
diff --git a/src/ebtree/src/ebtree.erl b/src/ebtree/src/ebtree.erl
index 97a820304..a62074ca0 100644
--- a/src/ebtree/src/ebtree.erl
+++ b/src/ebtree/src/ebtree.erl
@@ -49,15 +49,14 @@
collate_fun,
reduce_fun,
encode_fun,
- persist_fun,
- cache_fun
+ persist_fun
}).
-define(META, 0).
-define(META_ORDER, 0).
-define(NODE, 1).
--define(NODE_ROOT_ID, <<0>>).
+-define(NODE_ROOT_ID, <<0:128>>).
-define(underflow(Tree, Node), Tree#tree.min > length(Node#node.members)).
-define(at_min(Tree, Node), Tree#tree.min == length(Node#node.members)).
@@ -93,15 +92,13 @@ open(Db, Prefix, Order, Options) when is_binary(Prefix), is_integer(Order), Orde
CollateFun = proplists:get_value(collate_fun, Options, fun collate_raw/2),
EncodeFun = proplists:get_value(encode_fun, Options, fun encode_erlang/3),
PersistFun = proplists:get_value(persist_fun, Options, fun simple_persist/3),
- CacheFun = proplists:get_value(cache_fun, Options, fun cache_noop/2),
Tree = #tree{
prefix = Prefix,
reduce_fun = ReduceFun,
collate_fun = CollateFun,
encode_fun = EncodeFun,
- persist_fun = PersistFun,
- cache_fun = CacheFun
+ persist_fun = PersistFun
},
erlfdb:transactional(Db, fun(Tx) ->
@@ -135,13 +132,13 @@ max() ->
{Key :: term(), Value :: term()} | false.
lookup(Db, #tree{} = Tree, Key) ->
Fun = fun
- ({visit, K, V}, _Acc) when K =:= Key ->
- {stop, {K, V}};
- ({visit, K, _V}, Acc) ->
- case collate(Tree, K, Key, [gt]) of
- true ->
+ ({visit, K, V}, Acc) ->
+ case collate(Tree, K, Key) of
+ eq ->
+ {stop, {K, V}};
+ gt ->
{stop, Acc};
- false ->
+ lt ->
{ok, Acc}
end;
({traverse, F, L, _R}, Acc) ->
@@ -176,18 +173,17 @@ lookup_multi_fold(_, {_, [], _} = Acc) ->
{stop, Acc};
lookup_multi_fold({visit, Key1, Value}, {Tree, [Key2 | Rest], Acc}) ->
- {NewKeys, NewAcc} = case collate(Tree, Key1, Key2) of
+ case collate(Tree, Key1, Key2) of
lt ->
% Still looking for the next user key
- {[Key2 | Rest], Acc};
+ {ok, {Tree, [Key2 | Rest], Acc}};
eq ->
% Found a requested key
- {Rest, [{Key2, Value} | Acc]};
+ {ok, {Tree, Rest, [{Key2, Value} | Acc]}};
gt ->
% The user key wasn't found so we drop it
- {Rest, Acc}
- end,
- {ok, {Tree, NewKeys, NewAcc}};
+ lookup_multi_fold({visit, Key1, Value}, {Tree, Rest, Acc})
+ end;
lookup_multi_fold({traverse, FKey, LKey, R}, {Tree, [UKey | Rest], Acc}) ->
case collate(Tree, FKey, UKey, [gt]) of
@@ -607,10 +603,9 @@ split_child(Tx, #tree{} = Tree, #node{} = Parent0, #node{} = Child) ->
umerge_members(Tree, Parent0#node.level, [{FirstRightKey, LastRightKey, RightId, RightReduction}],
lists:keydelete(Child#node.id, 3, Parent0#node.members)))
},
- Parent2 = new_node_id_if_cacheable(Tx, Tree, Parent0, Parent1),
clear_node(Tx, Tree, Child),
- set_nodes(Tx, Tree, [LeftChild, RightChild, Parent2]),
- {Parent2, LeftChild, RightChild}.
+ set_nodes(Tx, Tree, [LeftChild, RightChild, Parent1]),
+ {Parent1, LeftChild, RightChild}.
update_prev_neighbour(_Tx, #tree{} = _Tree, #node{prev = undefined} = _Node) ->
@@ -634,7 +629,7 @@ insert_nonfull(Tx, #tree{} = Tree, #node{level = 0} = Node0, Key, Value) ->
members = umerge_members(Tree, 0, [{Key, Value}], Node0#node.members)
},
set_node(Tx, Tree, Node0, Node1),
- {Node1#node.id, reduce_node(Tree, Node1)};
+ reduce_node(Tree, Node1);
insert_nonfull(Tx, #tree{} = Tree, #node{} = Node0, Key, Value) ->
ChildId0 = find_child_id(Tree, Node0, Key),
@@ -654,17 +649,16 @@ insert_nonfull(Tx, #tree{} = Tree, #node{} = Node0, Key, Value) ->
{Node0, Child0}
end,
ChildId1 = Child1#node.id,
- {ChildId2, NewReduction} = insert_nonfull(Tx, Tree, Child1, Key, Value),
+ NewReduction = insert_nonfull(Tx, Tree, Child1, Key, Value),
{CurrentFirstKey, CurrentLastKey, ChildId1, _OldReduction} = lists:keyfind(ChildId1, 3, Node1#node.members),
[NewFirstKey, _] = sort_keys(Tree, [Key, CurrentFirstKey]),
[_, NewLastKey] = sort_keys(Tree, [Key, CurrentLastKey]),
Node2 = Node1#node{
members = lists:keyreplace(ChildId1, 3, Node1#node.members,
- {NewFirstKey, NewLastKey, ChildId2, NewReduction})
+ {NewFirstKey, NewLastKey, ChildId1, NewReduction})
},
- Node3 = new_node_id_if_cacheable(Tx, Tree, Node0, Node2),
- set_node(Tx, Tree, Node0, Node3),
- {Node3#node.id, reduce_node(Tree, Node2)}.
+ set_node(Tx, Tree, Node0, Node2),
+ reduce_node(Tree, Node2).
%% @doc Inserts or updates multiple values in the ebtree
@@ -724,14 +718,8 @@ split_node_multi(Tx, Tree, Node) ->
true when Node#node.id == ?NODE_ROOT_ID ->
Node#node.members;
true ->
- NewNode = case node_is_cacheable(Node) of
- true ->
- Node#node{id = new_node_id()};
- false ->
- Node
- end,
- set_node(Tx, Tree, NewNode),
- [to_member(Tree, NewNode)];
+ set_node(Tx, Tree, Node),
+ [to_member(Tree, Node)];
false ->
clear_node(Tx, Tree, Node),
Nodes0 = create_nodes(Tx, Tree, Node),
@@ -883,18 +871,16 @@ delete(Tx, #tree{} = Tree, #node{} = Parent0, Key) ->
Parent1 = Parent0#node{
members = Members3
},
- Parent2 = new_node_id_if_cacheable(Tx, Tree, Parent0, Parent1),
clear_nodes(Tx, Tree, [Child0, Sibling]),
set_nodes(Tx, Tree, NewNodes),
- Parent2;
+ Parent1;
false ->
set_node(Tx, Tree, Child0, Child1),
{_OldFirstKey, _OldLastKey, ChildId0, _OldReduction} = lists:keyfind(ChildId0, 3, Parent0#node.members),
- Parent1 = Parent0#node{
+ Parent0#node{
members = lists:keyreplace(ChildId0, 3, Parent0#node.members,
{first_key(Child1), last_key(Child1), Child1#node.id, reduce_node(Tree, Child1)})
- },
- new_node_id_if_cacheable(Tx, Tree, Parent0, Parent1)
+ }
end.
@@ -989,16 +975,10 @@ meta_key(Prefix, MetaKey) when is_binary(Prefix) ->
%% node persistence functions
get_node(Tx, #tree{} = Tree, Id) ->
- case cache(Tree, get, Id) of
- undefined ->
- Key = node_key(Tree#tree.prefix, Id),
- Value = persist(Tree, Tx, get, Key),
- Node = decode_node(Tree, Id, Key, Value),
- cache(Tree, set, [Id, Node]),
- Node;
- #node{} = Node ->
- Node
- end.
+ Key = node_key(Tree#tree.prefix, Id),
+ Value = persist(Tree, Tx, get, Key),
+ decode_node(Tree, Id, Key, Value).
+
clear_nodes(Tx, #tree{} = Tree, Nodes) ->
lists:foreach(fun(Node) ->
@@ -1008,7 +988,6 @@ clear_nodes(Tx, #tree{} = Tree, Nodes) ->
clear_node(Tx, #tree{} = Tree, #node{} = Node) ->
Key = node_key(Tree#tree.prefix, Node#node.id),
- cache(Tree, clear, Node#node.id),
persist(Tree, Tx, clear, Key).
@@ -1029,11 +1008,10 @@ set_node(Tx, #tree{} = Tree, #node{} = Node) ->
?validate_node(Tree, Node),
Key = node_key(Tree#tree.prefix, Node#node.id),
Value = encode_node(Tree, Key, Node),
- cache(Tree, set, [Node#node.id, Node]),
persist(Tree, Tx, set, [Key, Value]).
-node_key(Prefix, Id) when is_binary(Prefix), is_binary(Id) ->
+node_key(Prefix, Id) when is_binary(Prefix), is_binary(Id), bit_size(Id) =:= 128 ->
erlfdb_tuple:pack({?NODE, Id}, Prefix).
@@ -1263,38 +1241,6 @@ simple_persist(Tx, get, Key) ->
simple_persist(Tx, clear, Key) ->
erlfdb:clear(Tx, Key).
-
-%% cache functions
-
-cache_noop(set, _) ->
- ok;
-cache_noop(clear, _) ->
- ok;
-cache_noop(get, _) ->
- undefined.
-
-
-cache(#tree{} = Tree, set, [Id, #node{} = Node]) ->
- #tree{cache_fun = CacheFun} = Tree,
- case node_is_cacheable(Node) of
- true ->
- CacheFun(set, [Id, Node]);
- false ->
- ok
- end;
-
-cache(#tree{} = Tree, clear, Id) ->
- #tree{cache_fun = CacheFun} = Tree,
- CacheFun(clear, Id);
-
-cache(#tree{} = _Tree, get, ?NODE_ROOT_ID) ->
- undefined;
-
-cache(#tree{} = Tree, get, Id) ->
- #tree{cache_fun = CacheFun} = Tree,
- CacheFun(get, Id).
-
-
%% private functions
init_order(#tree{} = Tree, Order)
@@ -1324,28 +1270,6 @@ last_key(Members) when is_list(Members) ->
end.
-new_node_id_if_cacheable(Tx, #tree{} = Tree, #node{} = Old, #node{} = New) ->
- MembersChanged = Old#node.members /= New#node.members,
- NodeIsCacheable = node_is_cacheable(New),
- if
- MembersChanged andalso NodeIsCacheable ->
- clear_node(Tx, Tree, New),
- New#node{id = new_node_id()};
- true ->
- New
- end.
-
-
-node_is_cacheable(#node{id = ?NODE_ROOT_ID}) ->
- false;
-
-node_is_cacheable(#node{level = 0}) ->
- false;
-
-node_is_cacheable(#node{}) ->
- true.
-
-
new_node_id() ->
crypto:strong_rand_bytes(16).
@@ -1469,7 +1393,8 @@ lookup_multi_test() ->
validate_tree(Db, Tree),
?assertEqual([{1, 2}], lookup_multi(Db, Tree, [1])),
?assertEqual([{15, 16}, {2, 3}], lookup_multi(Db, Tree, [2, 15])),
- ?assertEqual([{15, 16}, {4, 5}, {2, 3}], lookup_multi(Db, Tree, [2, 101, 15, 4, -3])).
+ ?assertEqual([{15, 16}, {4, 5}, {2, 3}], lookup_multi(Db, Tree, [2, 101, 15, 4, -3])),
+ ?assertEqual([{2, 3}], lookup_multi(Db, Tree, [1.5, 2])).
insert_multi_test() ->
@@ -1782,25 +1707,6 @@ validate_node_test_() ->
].
-cache_test_() ->
- {spawn, [fun() ->
- Db = erlfdb_util:get_test_db([empty]),
- CacheFun = fun
- (set, [Id, Node]) ->
- erlang:put(Id, Node);
- (clear, Id) ->
- erlang:erase(Id);
- (get, Id) ->
- erlang:get(Id)
- end,
- Tree = open(Db, <<1,2,3>>, 4, [{cache_fun, CacheFun}]),
- [ebtree:insert(Db, Tree, I, I) || I <- lists:seq(1, 16)],
- ?assertEqual({1, 1}, ebtree:lookup(Db, Tree, 1)),
- NodeCache = [V || {_K, V} <- erlang:get(), is_record(V, node)],
- ?assertEqual(3, length(NodeCache))
- end]}.
-
-
umerge_members_test() ->
Tree = #tree{collate_fun = fun collate_raw/2},
NewList = fun() ->
diff --git a/src/fabric/include/fabric.hrl b/src/fabric/include/fabric.hrl
deleted file mode 100644
index 2a4da8bcf..000000000
--- a/src/fabric/include/fabric.hrl
+++ /dev/null
@@ -1,46 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--include_lib("eunit/include/eunit.hrl").
-
--record(collector, {
- db_name=nil,
- query_args,
- callback,
- counters,
- buffer_size,
- blocked = [],
- total_rows = 0,
- offset = 0,
- rows = [],
- skip,
- limit,
- keys,
- os_proc,
- reducer,
- collation,
- lang,
- sorted,
- user_acc,
- update_seq
-}).
-
--record(stream_acc, {
- workers,
- ready,
- start_fun,
- replacements,
- ring_opts
-}).
-
--record(view_row, {key, id, value, doc, worker}).
--record(change, {key, id, value, deleted=false, doc, worker}).
diff --git a/src/fabric/include/fabric2.hrl b/src/fabric/include/fabric2.hrl
index ebbb7c7c5..f0d789940 100644
--- a/src/fabric/include/fabric2.hrl
+++ b/src/fabric/include/fabric2.hrl
@@ -11,6 +11,9 @@
% the License.
+-include_lib("erlfdb/include/erlfdb.hrl").
+
+
-define(uint2bin(I), binary:encode_unsigned(I, little)).
-define(bin2uint(I), binary:decode_unsigned(I, little)).
-define(bin2int(V), binary_to_integer(V)).
@@ -77,12 +80,4 @@
-define(PDICT_TX_RES_KEY, '$fabric_tx_result').
-define(PDICT_FOLD_ACC_STATE, '$fabric_fold_acc_state').
-% Let's keep these in ascending order
--define(TRANSACTION_TOO_OLD, 1007).
--define(FUTURE_VERSION, 1009).
--define(COMMIT_UNKNOWN_RESULT, 1021).
--define(TRANSACTION_CANCELLED, 1025).
--define(TRANSACTION_TOO_LARGE, 2101).
-
-
-define(DEFAULT_BINARY_CHUNK_SIZE, 100000).
diff --git a/src/fabric/priv/stats_descriptions.cfg b/src/fabric/priv/stats_descriptions.cfg
deleted file mode 100644
index d12aa0c84..000000000
--- a/src/fabric/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,28 +0,0 @@
-{[fabric, worker, timeouts], [
- {type, counter},
- {desc, <<"number of worker timeouts">>}
-]}.
-{[fabric, open_shard, timeouts], [
- {type, counter},
- {desc, <<"number of open shard timeouts">>}
-]}.
-{[fabric, read_repairs, success], [
- {type, counter},
- {desc, <<"number of successful read repair operations">>}
-]}.
-{[fabric, read_repairs, failure], [
- {type, counter},
- {desc, <<"number of failed read repair operations">>}
-]}.
-{[fabric, doc_update, errors], [
- {type, counter},
- {desc, <<"number of document update errors">>}
-]}.
-{[fabric, doc_update, mismatched_errors], [
- {type, counter},
- {desc, <<"number of document update errors with multiple error types">>}
-]}.
-{[fabric, doc_update, write_quorum_errors], [
- {type, counter},
- {desc, <<"number of write quorum errors">>}
-]}.
diff --git a/src/fabric/src/fabric.app.src b/src/fabric/src/fabric.app.src
index a7059fd10..1974f8ae6 100644
--- a/src/fabric/src/fabric.app.src
+++ b/src/fabric/src/fabric.app.src
@@ -24,8 +24,6 @@
couch_epi,
couch,
ctrace,
- rexi,
- mem3,
couch_log,
couch_stats,
erlfdb,
diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl
deleted file mode 100644
index bb538e2db..000000000
--- a/src/fabric/src/fabric.erl
+++ /dev/null
@@ -1,720 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-% DBs
--export([all_dbs/0, all_dbs/1, create_db/1, create_db/2, delete_db/1,
- delete_db/2, get_db_info/1, get_doc_count/1, get_doc_count/2,
- set_revs_limit/3, set_security/2, set_security/3,
- get_revs_limit/1, get_security/1, get_security/2,
- get_all_security/1, get_all_security/2,
- get_purge_infos_limit/1, set_purge_infos_limit/3,
- compact/1, compact/2, get_partition_info/2]).
-
-% Documents
--export([open_doc/3, open_revs/4, get_doc_info/3, get_full_doc_info/3,
- get_missing_revs/2, get_missing_revs/3, update_doc/3, update_docs/3,
- purge_docs/3, att_receiver/2]).
-
-% Views
--export([all_docs/4, all_docs/5, changes/4, query_view/3, query_view/4,
- query_view/6, query_view/7, get_view_group_info/2, end_changes/0]).
-
-% miscellany
--export([design_docs/1, reset_validation_funs/1, cleanup_index_files/0,
- cleanup_index_files/1, cleanup_index_files_all_nodes/1, dbname/1,
- inactive_index_files/1]).
-
--include_lib("fabric/include/fabric.hrl").
-
--type dbname() :: (iodata() | tuple()).
--type docid() :: iodata().
--type revision() :: {integer(), binary()}.
--type callback() :: fun((any(), any()) -> {ok | stop, any()}).
--type json_obj() :: {[{binary() | atom(), any()}]}.
--type option() :: atom() | {atom(), any()}.
-
-%% db operations
-%% @equiv all_dbs(<<>>)
-all_dbs() ->
- all_dbs(<<>>).
-
-%% @doc returns a list of all database names
--spec all_dbs(Prefix::iodata()) -> {ok, [binary()]}.
-all_dbs(Prefix) when is_binary(Prefix) ->
- Length = byte_size(Prefix),
- MatchingDbs = mem3:fold_shards(fun(#shard{dbname=DbName}, Acc) ->
- case DbName of
- <<Prefix:Length/binary, _/binary>> ->
- [DbName | Acc];
- _ ->
- Acc
- end
- end, []),
- {ok, lists:usort(MatchingDbs)};
-
-%% @equiv all_dbs(list_to_binary(Prefix))
-all_dbs(Prefix) when is_list(Prefix) ->
- all_dbs(list_to_binary(Prefix)).
-
-%% @doc returns a property list of interesting properties
-%% about the database such as `doc_count', `disk_size',
-%% etc.
--spec get_db_info(dbname()) ->
- {ok, [
- {instance_start_time, binary()} |
- {doc_count, non_neg_integer()} |
- {doc_del_count, non_neg_integer()} |
- {purge_seq, non_neg_integer()} |
- {compact_running, boolean()} |
- {disk_size, non_neg_integer()} |
- {disk_format_version, pos_integer()}
- ]}.
-get_db_info(DbName) ->
- fabric_db_info:go(dbname(DbName)).
-
-%% @doc returns the size of a given partition
--spec get_partition_info(dbname(), Partition::binary()) ->
- {ok, [
- {db_name, binary()} |
- {partition, binary()} |
- {doc_count, non_neg_integer()} |
- {doc_del_count, non_neg_integer()} |
- {sizes, json_obj()}
- ]}.
-get_partition_info(DbName, Partition) ->
- fabric_db_partition_info:go(dbname(DbName), Partition).
-
-
-%% @doc the number of docs in a database
-%% @equiv get_doc_count(DbName, <<"_all_docs">>)
-get_doc_count(DbName) ->
- get_doc_count(DbName, <<"_all_docs">>).
-
-%% @doc the number of design docs in a database
--spec get_doc_count(dbname(), Namespace::binary()) ->
- {ok, non_neg_integer() | null} |
- {error, atom()} |
- {error, atom(), any()}.
-get_doc_count(DbName, <<"_all_docs">>) ->
- fabric_db_doc_count:go(dbname(DbName));
-get_doc_count(DbName, <<"_design">>) ->
- fabric_design_doc_count:go(dbname(DbName));
-get_doc_count(_DbName, <<"_local">>) ->
- {ok, null}.
-
-%% @equiv create_db(DbName, [])
-create_db(DbName) ->
- create_db(DbName, []).
-
-%% @doc creates a database with the given name.
-%%
-%% Options can include values for q and n,
-%% for example `{q, "8"}' and `{n, "3"}', which
-%% control how many shards to split a database into
-%% and how many nodes each doc is copied to respectively.
-%%
--spec create_db(dbname(), [option()]) -> ok | accepted | {error, atom()}.
-create_db(DbName, Options) ->
- fabric_db_create:go(dbname(DbName), opts(Options)).
-
-%% @equiv delete_db([])
-delete_db(DbName) ->
- delete_db(DbName, []).
-
-%% @doc delete a database
--spec delete_db(dbname(), [option()]) -> ok | accepted | {error, atom()}.
-delete_db(DbName, Options) ->
- fabric_db_delete:go(dbname(DbName), opts(Options)).
-
-%% @doc provide an upper bound for the number of tracked document revisions
--spec set_revs_limit(dbname(), pos_integer(), [option()]) -> ok.
-set_revs_limit(DbName, Limit, Options) when is_integer(Limit), Limit > 0 ->
- fabric_db_meta:set_revs_limit(dbname(DbName), Limit, opts(Options)).
-
-%% @doc retrieves the maximum number of document revisions
--spec get_revs_limit(dbname()) -> pos_integer() | no_return().
-get_revs_limit(DbName) ->
- {ok, Db} = fabric_util:get_db(dbname(DbName), [?ADMIN_CTX]),
- try couch_db:get_revs_limit(Db) after catch couch_db:close(Db) end.
-
-%% @doc sets the readers/writers/admin permissions for a database
--spec set_security(dbname(), SecObj::json_obj()) -> ok.
-set_security(DbName, SecObj) ->
- fabric_db_meta:set_security(dbname(DbName), SecObj, [?ADMIN_CTX]).
-
-%% @doc sets the readers/writers/admin permissions for a database
--spec set_security(dbname(), SecObj::json_obj(), [option()]) -> ok.
-set_security(DbName, SecObj, Options) ->
- fabric_db_meta:set_security(dbname(DbName), SecObj, opts(Options)).
-
-%% @doc sets the upper bound for the number of stored purge requests
--spec set_purge_infos_limit(dbname(), pos_integer(), [option()]) -> ok.
-set_purge_infos_limit(DbName, Limit, Options)
- when is_integer(Limit), Limit > 0 ->
- fabric_db_meta:set_purge_infos_limit(dbname(DbName), Limit, opts(Options)).
-
-%% @doc retrieves the upper bound for the number of stored purge requests
--spec get_purge_infos_limit(dbname()) -> pos_integer() | no_return().
-get_purge_infos_limit(DbName) ->
- {ok, Db} = fabric_util:get_db(dbname(DbName), [?ADMIN_CTX]),
- try couch_db:get_purge_infos_limit(Db) after catch couch_db:close(Db) end.
-
-get_security(DbName) ->
- get_security(DbName, [?ADMIN_CTX]).
-
-%% @doc retrieve the security object for a database
--spec get_security(dbname()) -> json_obj() | no_return().
-get_security(DbName, Options) ->
- {ok, Db} = fabric_util:get_db(dbname(DbName), opts(Options)),
- try couch_db:get_security(Db) after catch couch_db:close(Db) end.
-
-%% @doc retrieve the security object for all shards of a database
--spec get_all_security(dbname()) ->
- {ok, [{#shard{}, json_obj()}]} |
- {error, no_majority | timeout} |
- {error, atom(), any()}.
-get_all_security(DbName) ->
- get_all_security(DbName, []).
-
-%% @doc retrieve the security object for all shards of a database
--spec get_all_security(dbname(), [option()]) ->
- {ok, [{#shard{}, json_obj()}]} |
- {error, no_majority | timeout} |
- {error, atom(), any()}.
-get_all_security(DbName, Options) ->
- fabric_db_meta:get_all_security(dbname(DbName), opts(Options)).
-
-compact(DbName) ->
- [rexi:cast(Node, {fabric_rpc, compact, [Name]}) ||
- #shard{node=Node, name=Name} <- mem3:shards(dbname(DbName))],
- ok.
-
-compact(DbName, DesignName) ->
- [rexi:cast(Node, {fabric_rpc, compact, [Name, DesignName]}) ||
- #shard{node=Node, name=Name} <- mem3:shards(dbname(DbName))],
- ok.
-
-% doc operations
-
-%% @doc retrieve the doc with a given id
--spec open_doc(dbname(), docid(), [option()]) ->
- {ok, #doc{}} |
- {not_found, missing | deleted} |
- {timeout, any()} |
- {error, any()} |
- {error, any() | any()}.
-open_doc(DbName, Id, Options) ->
- case proplists:get_value(doc_info, Options) of
- undefined ->
- fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options));
- Else ->
- {error, {invalid_option, {doc_info, Else}}}
- end.
-
-%% @doc retrieve a collection of revisions, possible all
--spec open_revs(dbname(), docid(), [revision()] | all, [option()]) ->
- {ok, [{ok, #doc{}} | {{not_found,missing}, revision()}]} |
- {timeout, any()} |
- {error, any()} |
- {error, any(), any()}.
-open_revs(DbName, Id, Revs, Options) ->
- fabric_doc_open_revs:go(dbname(DbName), docid(Id), Revs, opts(Options)).
-
-%% @doc Retrieves an information on a document with a given id
--spec get_doc_info(dbname(), docid(), [options()]) ->
- {ok, #doc_info{}} |
- {not_found, missing} |
- {timeout, any()} |
- {error, any()} |
- {error, any() | any()}.
-get_doc_info(DbName, Id, Options) ->
- Options1 = [doc_info|Options],
- fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options1)).
-
-%% @doc Retrieves a full information on a document with a given id
--spec get_full_doc_info(dbname(), docid(), [options()]) ->
- {ok, #full_doc_info{}} |
- {not_found, missing | deleted} |
- {timeout, any()} |
- {error, any()} |
- {error, any() | any()}.
-get_full_doc_info(DbName, Id, Options) ->
- Options1 = [{doc_info, full}|Options],
- fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options1)).
-
-%% @equiv get_missing_revs(DbName, IdsRevs, [])
-get_missing_revs(DbName, IdsRevs) ->
- get_missing_revs(DbName, IdsRevs, []).
-
-%% @doc retrieve missing revisions for a list of `{Id, Revs}'
--spec get_missing_revs(dbname(),[{docid(), [revision()]}], [option()]) ->
- {ok, [{docid(), any(), [any()]}]}.
-get_missing_revs(DbName, IdsRevs, Options) when is_list(IdsRevs) ->
- Sanitized = [idrevs(IdR) || IdR <- IdsRevs],
- fabric_doc_missing_revs:go(dbname(DbName), Sanitized, opts(Options)).
-
-%% @doc update a single doc
-%% @equiv update_docs(DbName,[Doc],Options)
--spec update_doc(dbname(), #doc{} | json_obj(), [option()]) ->
- {ok, any()} | any().
-update_doc(DbName, Doc, Options) ->
- case update_docs(DbName, [Doc], opts(Options)) of
- {ok, [{ok, NewRev}]} ->
- {ok, NewRev};
- {accepted, [{accepted, NewRev}]} ->
- {accepted, NewRev};
- {ok, [{{_Id, _Rev}, Error}]} ->
- throw(Error);
- {ok, [Error]} ->
- throw(Error);
- {ok, []} ->
- % replication success
- #doc{revs = {Pos, [RevId | _]}} = doc(DbName, Doc),
- {ok, {Pos, RevId}};
- {error, [Error]} ->
- throw(Error)
- end.
-
-%% @doc update a list of docs
--spec update_docs(dbname(), [#doc{} | json_obj()], [option()]) ->
- {ok, any()} | any().
-update_docs(DbName, Docs0, Options) ->
- try
- Docs1 = docs(DbName, Docs0),
- fabric_doc_update:go(dbname(DbName), Docs1, opts(Options)) of
- {ok, Results} ->
- {ok, Results};
- {accepted, Results} ->
- {accepted, Results};
- {error, Error} ->
- {error, Error};
- Error ->
- throw(Error)
- catch {aborted, PreCommitFailures} ->
- {aborted, PreCommitFailures}
- end.
-
-
-%% @doc purge revisions for a list '{Id, Revs}'
-%% returns {ok, {PurgeSeq, Results}}
--spec purge_docs(dbname(), [{docid(), [revision()]}], [option()]) ->
- {ok, [{Health, [revision()]}] | {error, any()}} when
- Health :: ok | accepted.
-purge_docs(DbName, IdsRevs, Options) when is_list(IdsRevs) ->
- IdsRevs2 = [idrevs(IdRs) || IdRs <- IdsRevs],
- fabric_doc_purge:go(dbname(DbName), IdsRevs2, opts(Options)).
-
-
-%% @doc spawns a process to upload attachment data and
-%% returns a fabric attachment receiver context tuple
-%% with the spawned middleman process, an empty binary,
-%% or exits with an error tuple {Error, Arg}
--spec att_receiver(#httpd{}, Length :: undefined | chunked | pos_integer() |
- {unknown_transfer_encoding, any()}) ->
- {fabric_attachment_receiver, pid(), chunked | pos_integer()} | binary().
-att_receiver(Req, Length) ->
- fabric_doc_atts:receiver(Req, Length).
-
-%% @equiv all_docs(DbName, [], Callback, Acc0, QueryArgs)
-all_docs(DbName, Callback, Acc, QueryArgs) ->
- all_docs(DbName, [], Callback, Acc, QueryArgs).
-
-%% @doc retrieves all docs. Additional query parameters, such as `limit',
-%% `start_key' and `end_key', `descending', and `include_docs', can
-%% also be passed to further constrain the query. See <a href=
-%% "http://wiki.apache.org/couchdb/HTTP_Document_API#All_Documents">
-%% all_docs</a> for details
--spec all_docs(
- dbname(), [{atom(), any()}], callback(), [] | tuple(),
- #mrargs{} | [option()]) ->
- {ok, any()} | {error, Reason :: term()}.
-
-all_docs(DbName, Options, Callback, Acc0, #mrargs{} = QueryArgs) when
- is_function(Callback, 2) ->
- fabric_view_all_docs:go(dbname(DbName), opts(Options), QueryArgs, Callback, Acc0);
-
-%% @doc convenience function that takes a keylist rather than a record
-%% @equiv all_docs(DbName, Callback, Acc0, kl_to_query_args(QueryArgs))
-all_docs(DbName, Options, Callback, Acc0, QueryArgs) ->
- all_docs(DbName, Options, Callback, Acc0, kl_to_query_args(QueryArgs)).
-
-
--spec changes(dbname(), callback(), any(), #changes_args{} | [{atom(),any()}]) ->
- {ok, any()}.
-changes(DbName, Callback, Acc0, #changes_args{}=Options) ->
- Feed = Options#changes_args.feed,
- fabric_view_changes:go(dbname(DbName), Feed, Options, Callback, Acc0);
-
-%% @doc convenience function, takes keylist instead of record
-%% @equiv changes(DbName, Callback, Acc0, kl_to_changes_args(Options))
-changes(DbName, Callback, Acc0, Options) ->
- changes(DbName, Callback, Acc0, kl_to_changes_args(Options)).
-
-%% @equiv query_view(DbName, DesignName, ViewName, #mrargs{})
-query_view(DbName, DesignName, ViewName) ->
- query_view(DbName, DesignName, ViewName, #mrargs{}).
-
-%% @equiv query_view(DbName, DesignName,
-%% ViewName, fun default_callback/2, [], QueryArgs)
-query_view(DbName, DesignName, ViewName, QueryArgs) ->
- Callback = fun default_callback/2,
- query_view(DbName, DesignName, ViewName, Callback, [], QueryArgs).
-
-
-%% @equiv query_view(DbName, DesignName, [],
-%% ViewName, fun default_callback/2, [], QueryArgs)
-query_view(DbName, DDoc, ViewName, Callback, Acc, QueryArgs) ->
- query_view(DbName, [], DDoc, ViewName, Callback, Acc, QueryArgs).
-
-
-%% @doc execute a given view.
-%% There are many additional query args that can be passed to a view,
-%% see <a href="http://wiki.apache.org/couchdb/HTTP_view_API#Querying_Options">
-%% query args</a> for details.
--spec query_view(dbname(), [{atom(), any()}] | [],
- #doc{} | binary(), iodata(), callback(), any(), #mrargs{}) ->
- any().
-query_view(Db, Options, GroupId, ViewName, Callback, Acc0, QueryArgs)
- when is_binary(GroupId) ->
- DbName = dbname(Db),
- {ok, DDoc} = ddoc_cache:open(DbName, <<"_design/", GroupId/binary>>),
- query_view(Db, Options, DDoc, ViewName, Callback, Acc0, QueryArgs);
-query_view(Db, Options, DDoc, ViewName, Callback, Acc0, QueryArgs0) ->
- DbName = dbname(Db),
- View = name(ViewName),
- case fabric_util:is_users_db(DbName) of
- true ->
- FakeDb = fabric_util:open_cluster_db(DbName, Options),
- couch_users_db:after_doc_read(DDoc, FakeDb);
- false ->
- ok
- end,
- {ok, #mrst{views=Views, language=Lang}} =
- couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
- QueryArgs1 = couch_mrview_util:set_view_type(QueryArgs0, View, Views),
- QueryArgs2 = fabric_util:validate_args(Db, DDoc, QueryArgs1),
- VInfo = couch_mrview_util:extract_view(Lang, QueryArgs2, View, Views),
- case is_reduce_view(QueryArgs2) of
- true ->
- fabric_view_reduce:go(
- Db,
- DDoc,
- View,
- QueryArgs2,
- Callback,
- Acc0,
- VInfo
- );
- false ->
- fabric_view_map:go(
- Db,
- Options,
- DDoc,
- View,
- QueryArgs2,
- Callback,
- Acc0,
- VInfo
- )
- end.
-
-%% @doc retrieve info about a view group, disk size, language, whether compaction
-%% is running and so forth
--spec get_view_group_info(dbname(), #doc{} | docid()) ->
- {ok, [
- {signature, binary()} |
- {language, binary()} |
- {disk_size, non_neg_integer()} |
- {compact_running, boolean()} |
- {updater_running, boolean()} |
- {waiting_commit, boolean()} |
- {waiting_clients, non_neg_integer()} |
- {update_seq, pos_integer()} |
- {purge_seq, non_neg_integer()} |
- {sizes, [
- {active, non_neg_integer()} |
- {external, non_neg_integer()} |
- {file, non_neg_integer()}
- ]} |
- {updates_pending, [
- {minimum, non_neg_integer()} |
- {preferred, non_neg_integer()} |
- {total, non_neg_integer()}
- ]}
- ]}.
-get_view_group_info(DbName, DesignId) ->
- fabric_group_info:go(dbname(DbName), design_doc(DesignId)).
-
--spec end_changes() -> ok.
-end_changes() ->
- fabric_view_changes:increment_changes_epoch().
-
-%% @doc retrieve all the design docs from a database
--spec design_docs(dbname()) -> {ok, [json_obj()]} | {error, Reason :: term()}.
-design_docs(DbName) ->
- Extra = case get(io_priority) of
- undefined -> [];
- Else -> [{io_priority, Else}]
- end,
- QueryArgs0 = #mrargs{
- include_docs=true,
- extra=Extra
- },
- QueryArgs = set_namespace(<<"_design">>, QueryArgs0),
- Callback = fun({meta, _}, []) ->
- {ok, []};
- ({row, Props}, Acc) ->
- {ok, [couch_util:get_value(doc, Props) | Acc]};
- (complete, Acc) ->
- {ok, lists:reverse(Acc)};
- ({error, Reason}, _Acc) ->
- {error, Reason}
- end,
- fabric:all_docs(dbname(DbName), [?ADMIN_CTX], Callback, [], QueryArgs).
-
-%% @doc forces a reload of validation functions, this is performed after
-%% design docs are update
-%% NOTE: This function probably doesn't belong here as part fo the API
--spec reset_validation_funs(dbname()) -> [reference()].
-reset_validation_funs(DbName) ->
- [rexi:cast(Node, {fabric_rpc, reset_validation_funs, [Name]}) ||
- #shard{node=Node, name=Name} <- mem3:shards(DbName)].
-
-%% @doc clean up index files for all Dbs
--spec cleanup_index_files() -> [ok].
-cleanup_index_files() ->
- {ok, Dbs} = fabric:all_dbs(),
- [cleanup_index_files(Db) || Db <- Dbs].
-
-%% @doc clean up index files for a specific db
--spec cleanup_index_files(dbname()) -> ok.
-cleanup_index_files(DbName) ->
- lists:foreach(fun(File) ->
- file:delete(File)
- end, inactive_index_files(DbName)).
-
-%% @doc inactive index files for a specific db
--spec inactive_index_files(dbname()) -> ok.
-inactive_index_files(DbName) ->
- {ok, DesignDocs} = fabric:design_docs(DbName),
-
- ActiveSigs = maps:from_list(lists:map(fun(#doc{id = GroupId}) ->
- {ok, Info} = fabric:get_view_group_info(DbName, GroupId),
- {binary_to_list(couch_util:get_value(signature, Info)), nil}
- end, [couch_doc:from_json_obj(DD) || DD <- DesignDocs])),
-
- FileList = lists:flatmap(fun(#shard{name = ShardName}) ->
- IndexDir = couch_index_util:index_dir(mrview, ShardName),
- filelib:wildcard([IndexDir, "/*"])
- end, mem3:local_shards(dbname(DbName))),
-
- if ActiveSigs =:= [] -> FileList; true ->
- %% <sig>.view and <sig>.compact.view where <sig> is in ActiveSigs
- %% will be excluded from FileList because they are active view
- %% files and should not be deleted.
- lists:filter(fun(FilePath) ->
- not maps:is_key(get_view_sig_from_filename(FilePath), ActiveSigs)
- end, FileList)
- end.
-
-%% @doc clean up index files for a specific db on all nodes
--spec cleanup_index_files_all_nodes(dbname()) -> [reference()].
-cleanup_index_files_all_nodes(DbName) ->
- lists:foreach(fun(Node) ->
- rexi:cast(Node, {?MODULE, cleanup_index_files, [DbName]})
- end, mem3:nodes()).
-
-%% some simple type validation and transcoding
-dbname(DbName) when is_list(DbName) ->
- list_to_binary(DbName);
-dbname(DbName) when is_binary(DbName) ->
- DbName;
-dbname(Db) ->
- try
- couch_db:name(Db)
- catch error:badarg ->
- erlang:error({illegal_database_name, Db})
- end.
-
-name(Thing) ->
- couch_util:to_binary(Thing).
-
-docid(DocId) when is_list(DocId) ->
- list_to_binary(DocId);
-docid(DocId) ->
- DocId.
-
-docs(Db, Docs) when is_list(Docs) ->
- [doc(Db, D) || D <- Docs];
-docs(_Db, Docs) ->
- erlang:error({illegal_docs_list, Docs}).
-
-doc(_Db, #doc{} = Doc) ->
- Doc;
-doc(Db0, {_} = Doc) ->
- Db = case couch_db:is_db(Db0) of
- true ->
- Db0;
- false ->
- Shard = hd(mem3:shards(Db0)),
- Props = couch_util:get_value(props, Shard#shard.opts, []),
- {ok, Db1} = couch_db:clustered_db(Db0, [{props, Props}]),
- Db1
- end,
- couch_db:doc_from_json_obj_validate(Db, Doc);
-doc(_Db, Doc) ->
- erlang:error({illegal_doc_format, Doc}).
-
-design_doc(#doc{} = DDoc) ->
- DDoc;
-design_doc(DocId) when is_list(DocId) ->
- design_doc(list_to_binary(DocId));
-design_doc(<<"_design/", _/binary>> = DocId) ->
- DocId;
-design_doc(GroupName) ->
- <<"_design/", GroupName/binary>>.
-
-idrevs({Id, Revs}) when is_list(Revs) ->
- {docid(Id), [rev(R) || R <- Revs]}.
-
-rev(Rev) when is_list(Rev); is_binary(Rev) ->
- couch_doc:parse_rev(Rev);
-rev({Seq, Hash} = Rev) when is_integer(Seq), is_binary(Hash) ->
- Rev.
-
-%% @doc convenience method, useful when testing or calling fabric from the shell
-opts(Options) ->
- add_option(user_ctx, add_option(io_priority, Options)).
-
-add_option(Key, Options) ->
- case couch_util:get_value(Key, Options) of
- undefined ->
- case erlang:get(Key) of
- undefined ->
- Options;
- Value ->
- [{Key, Value} | Options]
- end;
- _ ->
- Options
- end.
-
-default_callback(complete, Acc) ->
- {ok, lists:reverse(Acc)};
-default_callback(Row, Acc) ->
- {ok, [Row | Acc]}.
-
-is_reduce_view(#mrargs{view_type=ViewType}) ->
- ViewType =:= red;
-is_reduce_view({Reduce, _, _}) ->
- Reduce =:= red.
-
-%% @doc convenience method for use in the shell, converts a keylist
-%% to a `changes_args' record
-kl_to_changes_args(KeyList) ->
- kl_to_record(KeyList, changes_args).
-
-%% @doc convenience method for use in the shell, converts a keylist
-%% to a `mrargs' record
-kl_to_query_args(KeyList) ->
- kl_to_record(KeyList, mrargs).
-
-%% @doc finds the index of the given Key in the record.
-%% note that record_info is only known at compile time
-%% so the code must be written in this way. For each new
-%% record type add a case clause
-lookup_index(Key,RecName) ->
- Indexes =
- case RecName of
- changes_args ->
- lists:zip(record_info(fields, changes_args),
- lists:seq(2, record_info(size, changes_args)));
- mrargs ->
- lists:zip(record_info(fields, mrargs),
- lists:seq(2, record_info(size, mrargs)))
- end,
- couch_util:get_value(Key, Indexes).
-
-%% @doc convert a keylist to record with given `RecName'
-%% @see lookup_index
-kl_to_record(KeyList,RecName) ->
- Acc0 = case RecName of
- changes_args -> #changes_args{};
- mrargs -> #mrargs{}
- end,
- lists:foldl(fun({Key, Value}, Acc) ->
- Index = lookup_index(couch_util:to_existing_atom(Key),RecName),
- setelement(Index, Acc, Value)
- end, Acc0, KeyList).
-
-set_namespace(NS, #mrargs{extra = Extra} = Args) ->
- Args#mrargs{extra = [{namespace, NS} | Extra]}.
-
-get_view_sig_from_filename(FilePath) ->
- filename:basename(filename:basename(FilePath, ".view"), ".compact").
-
-%% -ifdef(TEST).
-%% -include_lib("eunit/include/eunit.hrl").
-%%
-%% update_doc_test_() ->
-%% {
-%% "Update doc tests", {
-%% setup, fun setup/0, fun teardown/1,
-%% fun(Ctx) -> [
-%% should_throw_conflict(Ctx)
-%% ] end
-%% }
-%% }.
-%%
-%% should_throw_conflict(Doc) ->
-%% ?_test(begin
-%% ?assertThrow(conflict, update_doc(<<"test-db">>, Doc, []))
-%% end).
-%%
-%%
-%% setup() ->
-%% Doc = #doc{
-%% id = <<"test_doc">>,
-%% revs = {3, [<<5,68,252,180,43,161,216,223,26,119,71,219,212,229,
-%% 159,113>>]},
-%% body = {[{<<"foo">>,<<"asdf">>},{<<"author">>,<<"tom">>}]},
-%% atts = [], deleted = false, meta = []
-%% },
-%% ok = application:ensure_started(config),
-%% ok = meck:expect(mem3, shards, fun(_, _) -> [] end),
-%% ok = meck:expect(mem3, quorum, fun(_) -> 1 end),
-%% ok = meck:expect(rexi, cast, fun(_, _) -> ok end),
-%% ok = meck:expect(rexi_utils, recv,
-%% fun(_, _, _, _, _, _) ->
-%% {ok, {error, [{Doc, conflict}]}}
-%% end),
-%% ok = meck:expect(couch_util, reorder_results,
-%% fun(_, [{_, Res}]) ->
-%% [Res]
-%% end),
-%% ok = meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
-%% ok = meck:expect(rexi_monitor, stop, fun(_) -> ok end),
-%% Doc.
-%%
-%%
-%% teardown(_) ->
-%% meck:unload(),
-%% ok = application:stop(config).
-%%
-%%
-%% -endif.
diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
index b3e510b2e..d4f15c5b0 100644
--- a/src/fabric/src/fabric2_db.erl
+++ b/src/fabric/src/fabric2_db.erl
@@ -128,7 +128,6 @@
%% wait_for_compaction/2,
dbname_suffix/1,
- normalize_dbname/1,
validate_dbname/1,
%% make_doc/5,
@@ -140,6 +139,7 @@
-include_lib("couch/include/couch_db.hrl").
-include("fabric2.hrl").
+-include_lib("kernel/include/logger.hrl").
% Default max database name length is based on CouchDb < 4.x compatibility. See
@@ -173,7 +173,7 @@
create(DbName, Options) ->
case validate_dbname(DbName) of
ok ->
- Result = fabric2_fdb:transactional(DbName, Options, fun(TxDb) ->
+ Result = fabric2_fdb:transactional(DbName, fun(TxDb) ->
case fabric2_fdb:exists(TxDb) of
true ->
{error, file_exists};
@@ -205,7 +205,7 @@ open(DbName, Options) ->
Db2 = maybe_set_interactive(Db1, Options),
{ok, require_member_check(Db2)};
undefined ->
- Result = fabric2_fdb:transactional(DbName, Options, fun(TxDb) ->
+ Result = fabric2_fdb:transactional(DbName, fun(TxDb) ->
fabric2_fdb:open(TxDb, Options)
end),
% Cache outside the transaction retry loop
@@ -227,7 +227,7 @@ delete(DbName, Options) ->
Options1 = lists:keystore(user_ctx, 1, Options, ?ADMIN_CTX),
case lists:keyfind(deleted_at, 1, Options1) of
{deleted_at, TimeStamp} ->
- fabric2_fdb:transactional(DbName, Options1, fun(TxDb) ->
+ fabric2_fdb:transactional(DbName, fun(TxDb) ->
fabric2_fdb:remove_deleted_db(TxDb, TimeStamp)
end);
false ->
@@ -245,7 +245,7 @@ delete(DbName, Options) ->
undelete(DbName, TgtDbName, TimeStamp, Options) ->
case validate_dbname(TgtDbName) of
ok ->
- Resp = fabric2_fdb:transactional(DbName, Options,
+ Resp = fabric2_fdb:transactional(DbName,
fun(TxDb) ->
fabric2_fdb:undelete(TxDb, TgtDbName, TimeStamp)
end
@@ -276,20 +276,15 @@ list_dbs(UserFun, UserAcc0, Options) ->
FoldFun = fun
(DbName, Acc) -> maybe_stop(UserFun({row, [{id, DbName}]}, Acc))
end,
- fabric2_fdb:transactional(fun(Tx) ->
- try
- UserAcc1 = maybe_stop(UserFun({meta, []}, UserAcc0)),
- UserAcc2 = fabric2_fdb:list_dbs(
- Tx,
- FoldFun,
- UserAcc1,
- Options
- ),
- {ok, maybe_stop(UserFun(complete, UserAcc2))}
- catch throw:{stop, FinalUserAcc} ->
- {ok, FinalUserAcc}
- end
- end).
+ try
+ UserAcc1 = maybe_stop(UserFun({meta, []}, UserAcc0)),
+ UserAcc2 = fabric2_fdb:transactional(fun(Tx) ->
+ fabric2_fdb:list_dbs(Tx, FoldFun, UserAcc1, Options)
+ end),
+ {ok, maybe_stop(UserFun(complete, UserAcc2))}
+ catch throw:{stop, FinalUserAcc} ->
+ {ok, FinalUserAcc}
+ end.
list_dbs_info() ->
@@ -314,22 +309,22 @@ list_dbs_info(UserFun, UserAcc0, Options) ->
NewFutureQ = queue:in({DbName, InfoFuture}, FutureQ),
drain_info_futures(NewFutureQ, Count + 1, UserFun, Acc)
end,
- fabric2_fdb:transactional(fun(Tx) ->
- try
- UserAcc1 = maybe_stop(UserFun({meta, []}, UserAcc0)),
- InitAcc = {queue:new(), 0, UserAcc1},
+ try
+ UserAcc1 = maybe_stop(UserFun({meta, []}, UserAcc0)),
+ InitAcc = {queue:new(), 0, UserAcc1},
+ UserAcc3 = fabric2_fdb:transactional(fun(Tx) ->
{FinalFutureQ, _, UserAcc2} = fabric2_fdb:list_dbs_info(
Tx,
FoldFun,
InitAcc,
Options
),
- UserAcc3 = drain_all_info_futures(FinalFutureQ, UserFun, UserAcc2),
- {ok, maybe_stop(UserFun(complete, UserAcc3))}
- catch throw:{stop, FinalUserAcc} ->
- {ok, FinalUserAcc}
- end
- end).
+ drain_all_info_futures(FinalFutureQ, UserFun, UserAcc2)
+ end),
+ {ok, maybe_stop(UserFun(complete, UserAcc3))}
+ catch throw:{stop, FinalUserAcc} ->
+ {ok, FinalUserAcc}
+ end.
list_deleted_dbs_info() ->
@@ -391,26 +386,22 @@ list_deleted_dbs_info(UserFun, UserAcc0, Options0) ->
NewFutureQ = queue:in({DbName, TimeStamp, InfoFuture}, FutureQ),
drain_deleted_info_futures(NewFutureQ, Count + 1, UserFun, Acc)
end,
- fabric2_fdb:transactional(fun(Tx) ->
- try
- UserAcc1 = maybe_stop(UserFun({meta, []}, UserAcc0)),
- InitAcc = {queue:new(), 0, UserAcc1},
+ try
+ UserAcc1 = maybe_stop(UserFun({meta, []}, UserAcc0)),
+ InitAcc = {queue:new(), 0, UserAcc1},
+ UserAcc3 = fabric2_fdb:transactional(fun(Tx) ->
{FinalFutureQ, _, UserAcc2} = fabric2_fdb:list_deleted_dbs_info(
Tx,
FoldFun,
InitAcc,
Options2
),
- UserAcc3 = drain_all_deleted_info_futures(
- FinalFutureQ,
- UserFun,
- UserAcc2
- ),
- {ok, maybe_stop(UserFun(complete, UserAcc3))}
- catch throw:{stop, FinalUserAcc} ->
- {ok, FinalUserAcc}
- end
- end).
+ drain_all_deleted_info_futures(FinalFutureQ, UserFun, UserAcc2)
+ end),
+ {ok, maybe_stop(UserFun(complete, UserAcc3))}
+ catch throw:{stop, FinalUserAcc} ->
+ {ok, FinalUserAcc}
+ end.
is_admin(Db, {SecProps}) when is_list(SecProps) ->
@@ -851,6 +842,7 @@ validate_docid(Id) when is_binary(Id) ->
_Else -> ok
end;
validate_docid(Id) ->
+ ?LOG_DEBUG(#{what => illegal_docid, docid => Id}),
couch_log:debug("Document id is not a string: ~p", [Id]),
throw({illegal_docid, <<"Document id must be a string">>}).
@@ -1093,14 +1085,18 @@ fold_changes(Db, SinceSeq, UserFun, UserAcc, Options) ->
end,
StartKey = get_since_seq(TxDb, Dir, SinceSeq),
- EndKey = case Dir of
- rev -> fabric2_util:seq_zero_vs();
- _ -> fabric2_util:seq_max_vs()
+ EndKey = case fabric2_util:get_value(end_key, Options) of
+ undefined when Dir == rev ->
+ fabric2_util:seq_zero_vs();
+ undefined ->
+ fabric2_util:seq_max_vs();
+ EK when is_binary(EK) ->
+ fabric2_fdb:seq_to_vs(EK);
+ EK when is_tuple(EK), element(1, EK) == versionstamp ->
+ EK
end,
- FoldOpts = [
- {start_key, StartKey},
- {end_key, EndKey}
- ] ++ RestartTx ++ Options,
+ BaseOpts = [{start_key, StartKey}] ++ RestartTx ++ Options,
+ FoldOpts = lists:keystore(end_key, 1, BaseOpts, {end_key, EndKey}),
{ok, fabric2_fdb:fold_range(TxDb, Prefix, fun({K, V}, Acc) ->
{SeqVS} = erlfdb_tuple:unpack(K, Prefix),
@@ -1122,27 +1118,19 @@ fold_changes(Db, SinceSeq, UserFun, UserAcc, Options) ->
dbname_suffix(DbName) ->
- filename:basename(normalize_dbname(DbName)).
-
-
-normalize_dbname(DbName) ->
- % Remove in the final cleanup. We don't need to handle shards prefix or
- % remove .couch suffixes anymore. Keep it for now to pass all the existing
- % tests.
- couch_db:normalize_dbname(DbName).
+ filename:basename(DbName).
validate_dbname(DbName) when is_list(DbName) ->
validate_dbname(?l2b(DbName));
validate_dbname(DbName) when is_binary(DbName) ->
- Normalized = normalize_dbname(DbName),
fabric2_db_plugin:validate_dbname(
- DbName, Normalized, fun validate_dbname_int/2).
+ DbName, DbName, fun validate_dbname_int/2).
-validate_dbname_int(DbName, Normalized) when is_binary(DbName) ->
+validate_dbname_int(DbName, DbName) when is_binary(DbName) ->
case validate_dbname_length(DbName) of
- ok -> validate_dbname_pat(DbName, Normalized);
+ ok -> validate_dbname_pat(DbName);
{error, _} = Error -> Error
end.
@@ -1156,13 +1144,12 @@ validate_dbname_length(DbName) ->
end.
-validate_dbname_pat(DbName, Normalized) ->
- DbNoExt = couch_util:drop_dot_couch_ext(DbName),
- case re:run(DbNoExt, ?DBNAME_REGEX, [{capture,none}, dollar_endonly]) of
+validate_dbname_pat(DbName) ->
+ case re:run(DbName, ?DBNAME_REGEX, [{capture,none}, dollar_endonly]) of
match ->
ok;
nomatch ->
- case is_system_db_name(Normalized) of
+ case is_system_db_name(DbName) of
true -> ok;
false -> {error, {illegal_database_name, DbName}}
end
@@ -2150,7 +2137,7 @@ validate_doc_update(Db, Doc, PrevDoc) ->
validate_ddoc(Db, DDoc) ->
try
- ok = couch_index_server:validate(Db, couch_doc:with_ejson_body(DDoc))
+ ok = couch_views_validate:validate_ddoc(Db, DDoc)
catch
throw:{invalid_design_doc, Reason} ->
throw({bad_request, invalid_design_doc, Reason});
diff --git a/src/fabric/src/fabric2_db_expiration.erl b/src/fabric/src/fabric2_db_expiration.erl
index 92f22e749..b8a063c08 100644
--- a/src/fabric/src/fabric2_db_expiration.erl
+++ b/src/fabric/src/fabric2_db_expiration.erl
@@ -34,6 +34,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("fabric/include/fabric2.hrl").
+-include_lib("kernel/include/logger.hrl").
-define(JOB_TYPE, <<"db_expiration">>).
-define(JOB_ID, <<"db_expiration_job">>).
@@ -81,7 +82,9 @@ handle_info(timeout, #st{job = undefined} = St) ->
handle_info({'EXIT', Pid, Exit}, #st{job = Pid} = St) ->
case Exit of
normal -> ok;
- Error -> couch_log:error("~p : job error ~p", [?MODULE, Error])
+ Error ->
+ ?LOG_ERROR(#{what => job_error, details => Error}),
+ couch_log:error("~p : job error ~p", [?MODULE, Error])
end,
NewPid = spawn_link(?MODULE, cleanup, [is_enabled()]),
{noreply, St#st{job = NewPid}};
@@ -131,8 +134,12 @@ cleanup(true) ->
{ok, Job1, Data1} = ?MODULE:process_expirations(Job, Data),
ok = resubmit_job(Job1, Data1, schedule_sec())
catch
- _Tag:Error ->
- Stack = erlang:get_stacktrace(),
+ _Tag:Error:Stack ->
+ ?LOG_ERROR(#{
+ what => process_expirations_error,
+ job => Job,
+ details => Error
+ }),
couch_log:error("~p : processing error ~p ~p ~p",
[?MODULE, Job, Error, Stack]),
ok = resubmit_job(Job, Data, ?ERROR_RESCHEDULE_SEC),
@@ -180,6 +187,11 @@ process_row(DbInfo) ->
Since = Now - Retention,
case Since >= timestamp_to_sec(TimeStamp) of
true ->
+ ?LOG_NOTICE(#{
+ what => expire_db,
+ db => DbName,
+ deleted_at => TimeStamp
+ }),
couch_log:notice("Permanently deleting ~s database with"
" timestamp ~s", [DbName, TimeStamp]),
ok = fabric2_db:delete(DbName, [{deleted_at, TimeStamp}]);
diff --git a/src/fabric/src/fabric2_fdb.erl b/src/fabric/src/fabric2_fdb.erl
index 36fa451ab..e86b03778 100644
--- a/src/fabric/src/fabric2_fdb.erl
+++ b/src/fabric/src/fabric2_fdb.erl
@@ -15,8 +15,10 @@
-export([
transactional/1,
- transactional/3,
transactional/2,
+ transactional/3,
+
+ with_snapshot/2,
create/2,
open/2,
@@ -89,9 +91,6 @@
-include("fabric2.hrl").
--define(MAX_FOLD_RANGE_RETRIES, 3).
-
-
-record(fold_acc, {
db,
restart_tx,
@@ -116,18 +115,25 @@
transactional(Fun) ->
- do_transaction(Fun, undefined).
+ do_transaction(Fun, undefined, #{}).
-transactional(DbName, Options, Fun) when is_binary(DbName) ->
- with_span(Fun, #{'db.name' => DbName}, fun() ->
- transactional(fun(Tx) ->
- Fun(init_db(Tx, DbName, Options))
- end)
- end).
+transactional(DbName, Fun) when is_binary(DbName), is_function(Fun) ->
+ transactional(DbName, #{}, Fun);
+
+transactional(#{} = Db, Fun) when is_function(Fun) ->
+ transactional(Db, #{}, Fun).
+
-transactional(#{tx := undefined} = Db, Fun) ->
+transactional(DbName, #{} = TxOptions, Fun) when is_binary(DbName) ->
+ with_span(Fun, #{'db.name' => DbName}, fun() ->
+ do_transaction(fun(Tx) ->
+ Fun(init_db(Tx, DbName))
+ end, undefined, TxOptions)
+ end);
+
+transactional(#{tx := undefined} = Db, #{} = TxOptions, Fun) ->
DbName = maps:get(name, Db, undefined),
try
Db1 = refresh(Db),
@@ -143,25 +149,31 @@ transactional(#{tx := undefined} = Db, Fun) ->
true -> Fun(reopen(Db2#{tx => Tx}));
false -> Fun(Db2#{tx => Tx})
end
- end, LayerPrefix)
+ end, LayerPrefix, TxOptions)
end)
catch throw:{?MODULE, reopen} ->
with_span('db.reopen', #{'db.name' => DbName}, fun() ->
transactional(Db#{reopen => true}, Fun)
end)
end;
+transactional(#{tx := {erlfdb_snapshot, _}} = Db, #{} = _TxOptions, Fun) ->
+ DbName = maps:get(name, Db, undefined),
+ with_span(Fun, #{'db.name' => DbName}, fun() ->
+ Fun(Db)
+ end);
-transactional(#{tx := {erlfdb_transaction, _}} = Db, Fun) ->
+transactional(#{tx := {erlfdb_transaction, _}} = Db, #{} = _TxOptions, Fun) ->
DbName = maps:get(name, Db, undefined),
with_span(Fun, #{'db.name' => DbName}, fun() ->
Fun(Db)
end).
-do_transaction(Fun, LayerPrefix) when is_function(Fun, 1) ->
+do_transaction(Fun, LayerPrefix, #{} = TxOptions) when is_function(Fun, 1) ->
Db = get_db_handle(),
try
erlfdb:transactional(Db, fun(Tx) ->
+ apply_tx_options(Tx, TxOptions),
case get(erlfdb_trace) of
Name when is_binary(Name) ->
UId = erlang:unique_integer([positive]),
@@ -183,6 +195,20 @@ do_transaction(Fun, LayerPrefix) when is_function(Fun, 1) ->
end.
+apply_tx_options(Tx, #{} = TxOptions) ->
+ maps:map(fun(K, V) ->
+ erlfdb:set_option(Tx, K, V)
+ end, TxOptions).
+
+
+with_snapshot(#{tx := {erlfdb_transaction, _} = Tx} = TxDb, Fun) ->
+ SSDb = TxDb#{tx := erlfdb:snapshot(Tx)},
+ Fun(SSDb);
+
+with_snapshot(#{tx := {erlfdb_snapshot, _}} = SSDb, Fun) ->
+ Fun(SSDb).
+
+
create(#{} = Db0, Options) ->
#{
name := DbName,
@@ -350,7 +376,7 @@ reopen(#{} = OldDb) ->
interactive := Interactive
} = OldDb,
Options1 = lists:keystore(user_ctx, 1, Options, {user_ctx, UserCtx}),
- NewDb = open(init_db(Tx, DbName, Options1), Options1),
+ NewDb = open(init_db(Tx, DbName), Options1),
% Check if database was re-created
case {Interactive, maps:get(uuid, NewDb)} of
@@ -495,7 +521,9 @@ get_info(#{} = Db) ->
tx := Tx,
db_prefix := DbPrefix
} = ensure_current(Db),
- get_info_wait(get_info_future(Tx, DbPrefix)).
+ DbInfo = get_info_wait(get_info_future(Tx, DbPrefix)),
+ AegisProps = aegis:get_db_info(Db),
+ [{encryption, {AegisProps}} | DbInfo].
get_info_future(Tx, DbPrefix) ->
@@ -536,10 +564,10 @@ get_info_wait(#info_future{tx = Tx, retries = Retries} = Future) ->
try
get_info_wait_int(Future)
catch
- error:{erlfdb_error, ?TRANSACTION_CANCELLED} ->
+ error:{erlfdb_error, ?ERLFDB_TRANSACTION_CANCELLED} ->
Future1 = get_info_future(Tx, Future#info_future.db_prefix),
get_info_wait(Future1#info_future{retries = Retries + 1});
- error:{erlfdb_error, ?TRANSACTION_TOO_OLD} ->
+ error:{erlfdb_error, Error} when ?ERLFDB_IS_RETRYABLE(Error) ->
ok = erlfdb:reset(Tx),
Future1 = get_info_future(Tx, Future#info_future.db_prefix),
get_info_wait(Future1#info_future{retries = Retries + 1})
@@ -1130,7 +1158,8 @@ fold_range(Tx, FAcc) ->
user_acc = FinalUserAcc
} = erlfdb:fold_range(Tx, Start, End, Callback, FAcc, Opts),
FinalUserAcc
- catch error:{erlfdb_error, ?TRANSACTION_TOO_OLD} when DoRestart ->
+ catch error:{erlfdb_error, Error} when
+ ?ERLFDB_IS_RETRYABLE(Error) andalso DoRestart ->
% Possibly handle cluster_version_changed and future_version as well to
% continue iteration instead fallback to transactional and retrying
% from the beginning which is bound to fail when streaming data out to a
@@ -1154,18 +1183,27 @@ seq_to_vs(Seq) when is_binary(Seq) ->
next_vs({versionstamp, VS, Batch, TxId}) ->
- {V, B, T} = case TxId =< 65535 of
+ {V, B, T} = case TxId < 16#FFFF of
true ->
{VS, Batch, TxId + 1};
false ->
- case Batch =< 65535 of
+ case Batch < 16#FFFF of
true ->
{VS, Batch + 1, 0};
false ->
{VS + 1, 0, 0}
end
end,
- {versionstamp, V, B, T}.
+ {versionstamp, V, B, T};
+
+next_vs({versionstamp, VS, Batch}) ->
+ {V, B} = case Batch < 16#FFFF of
+ true ->
+ {VS, Batch + 1};
+ false ->
+ {VS + 1, 0}
+ end,
+ {versionstamp, V, B}.
new_versionstamp(Tx) ->
@@ -1209,7 +1247,7 @@ debug_cluster(Start, End) ->
end).
-init_db(Tx, DbName, Options) ->
+init_db(Tx, DbName) ->
Prefix = get_dir(Tx),
Version = erlfdb:wait(erlfdb:get(Tx, ?METADATA_VERSION_KEY)),
#{
@@ -1219,7 +1257,7 @@ init_db(Tx, DbName, Options) ->
md_version => Version,
security_fun => undefined,
- db_options => Options
+ db_options => []
}.
@@ -1841,10 +1879,11 @@ restart_fold(Tx, #fold_acc{} = Acc) ->
ok = erlfdb:reset(Tx),
+ MaxRetries = fabric2_server:get_retry_limit(),
case {erase(?PDICT_FOLD_ACC_STATE), Acc#fold_acc.retries} of
{#fold_acc{db = Db} = Acc1, _} ->
Acc1#fold_acc{db = check_db_instance(Db), retries = 0};
- {undefined, Retries} when Retries < ?MAX_FOLD_RANGE_RETRIES ->
+ {undefined, Retries} when Retries < MaxRetries ->
Db = check_db_instance(Acc#fold_acc.db),
Acc#fold_acc{db = Db, retries = Retries + 1};
{undefined, _} ->
@@ -1863,6 +1902,8 @@ get_db_handle() ->
end.
+require_transaction(#{tx := {erlfdb_snapshot, _}} = _Db) ->
+ ok;
require_transaction(#{tx := {erlfdb_transaction, _}} = _Db) ->
ok;
require_transaction(#{} = _Db) ->
@@ -1958,7 +1999,7 @@ clear_transaction() ->
is_commit_unknown_result() ->
- erlfdb:get_last_error() == ?COMMIT_UNKNOWN_RESULT.
+ erlfdb:get_last_error() == ?ERLFDB_COMMIT_UNKNOWN_RESULT.
has_transaction_id() ->
diff --git a/src/fabric/src/fabric2_index.erl b/src/fabric/src/fabric2_index.erl
index 25c31a8c8..8d52e8b19 100644
--- a/src/fabric/src/fabric2_index.erl
+++ b/src/fabric/src/fabric2_index.erl
@@ -34,6 +34,7 @@
-include_lib("couch/include/couch_db.hrl").
+-include_lib("kernel/include/logger.hrl").
-callback build_indices(Db :: map(), DDocs :: list(#doc{})) ->
@@ -67,9 +68,15 @@ cleanup(Db) ->
catch
error:database_does_not_exist ->
ok;
- Tag:Reason ->
- Stack = erlang:get_stacktrace(),
+ Tag:Reason:Stack ->
DbName = fabric2_db:name(Db),
+ ?LOG_ERROR(#{
+ what => index_cleanup_failure,
+ db => DbName,
+ tag => Tag,
+ details => Reason,
+ stacktrace => Stack
+ }),
LogMsg = "~p failed to cleanup indices for `~s` ~p:~p ~p",
couch_log:error(LogMsg, [?MODULE, DbName, Tag, Reason, Stack])
end.
@@ -168,8 +175,14 @@ process_updates_iter([Db | Rest], Cont) ->
catch
error:database_does_not_exist ->
ok;
- Tag:Reason ->
- Stack = erlang:get_stacktrace(),
+ Tag:Reason:Stack ->
+ ?LOG_ERROR(#{
+ what => index_build_failure,
+ db => Db,
+ tag => Tag,
+ details => Reason,
+ stacktrace => Stack
+ }),
LogMsg = "~p failed to build indices for `~s` ~p:~p ~p",
couch_log:error(LogMsg, [?MODULE, Db, Tag, Reason, Stack])
end,
diff --git a/src/fabric/src/fabric2_server.erl b/src/fabric/src/fabric2_server.erl
index be674b10e..0da2b79e9 100644
--- a/src/fabric/src/fabric2_server.erl
+++ b/src/fabric/src/fabric2_server.erl
@@ -27,7 +27,8 @@
maybe_remove/1,
fdb_directory/0,
- fdb_cluster/0
+ fdb_cluster/0,
+ get_retry_limit/0
]).
@@ -42,9 +43,12 @@
-include_lib("couch/include/couch_db.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("kernel/include/logger.hrl").
-
--define(CLUSTER_FILE, "/usr/local/etc/foundationdb/fdb.cluster").
+-define(CLUSTER_FILE_MACOS, "/usr/local/etc/foundationdb/fdb.cluster").
+-define(CLUSTER_FILE_LINUX, "/etc/foundationdb/fdb.cluster").
+-define(CLUSTER_FILE_WIN32, "C:/ProgramData/foundationdb/fdb.cluster").
-define(FDB_DIRECTORY, fdb_directory).
-define(FDB_CLUSTER, fdb_cluster).
-define(DEFAULT_FDB_DIRECTORY, <<"couchdb">>).
@@ -192,6 +196,12 @@ fdb_directory() ->
fdb_cluster() ->
get_env(?FDB_CLUSTER).
+
+get_retry_limit() ->
+ Default = list_to_integer(?DEFAULT_RETRY_LIMIT),
+ config:get_integer(?TX_OPTIONS_SECTION, "retry_limit", Default).
+
+
get_env(Key) ->
case get(Key) of
undefined ->
@@ -212,7 +222,7 @@ get_db_and_cluster(EunitDbOpts) ->
{ok, true} ->
{<<"eunit_test">>, erlfdb_util:get_test_db(EunitDbOpts)};
undefined ->
- ClusterFileStr = config:get("erlfdb", "cluster_file", ?CLUSTER_FILE),
+ ClusterFileStr = get_cluster_file_path(),
{ok, ConnectionStr} = file:read_file(ClusterFileStr),
DbHandle = erlfdb:open(iolist_to_binary(ClusterFileStr)),
{string:trim(ConnectionStr), DbHandle}
@@ -220,6 +230,106 @@ get_db_and_cluster(EunitDbOpts) ->
apply_tx_options(Db, config:get(?TX_OPTIONS_SECTION)),
{Cluster, Db}.
+get_cluster_file_path() ->
+ Locations = [
+ {custom, config:get("erlfdb", "cluster_file")},
+ {custom, os:getenv("FDB_CLUSTER_FILE", undefined)}
+ ] ++ default_locations(os:type()),
+ case find_cluster_file(Locations) of
+ {ok, Location} ->
+ Location;
+ {error, Reason} ->
+ erlang:error(Reason)
+ end.
+
+
+default_locations({unix, _}) ->
+ [
+ {default, ?CLUSTER_FILE_MACOS},
+ {default, ?CLUSTER_FILE_LINUX}
+ ];
+
+default_locations({win32, _}) ->
+ [
+ {default, ?CLUSTER_FILE_WIN32}
+ ].
+
+
+find_cluster_file([]) ->
+ {error, cluster_file_missing};
+
+find_cluster_file([{custom, undefined} | Rest]) ->
+ find_cluster_file(Rest);
+
+find_cluster_file([{Type, Location} | Rest]) ->
+ Msg = #{
+ what => fdb_connection_setup,
+ configuration_type => Type,
+ cluster_file => Location
+ },
+ case file:read_file_info(Location, [posix]) of
+ {ok, #file_info{access = read_write}} ->
+ ?LOG_INFO(Msg#{status => ok}),
+ couch_log:info(
+ "Using ~s FDB cluster file: ~s",
+ [Type, Location]
+ ),
+ {ok, Location};
+ {ok, #file_info{access = read}} ->
+ ?LOG_WARNING(Msg#{
+ status => read_only_file,
+ details => "If coordinators are changed without updating this "
+ "file CouchDB may be unable to connect to the FDB cluster!"
+ }),
+ couch_log:warning(
+ "Using read-only ~s FDB cluster file: ~s -- if coordinators "
+ "are changed without updating this file CouchDB may be unable "
+ "to connect to the FDB cluster!",
+ [Type, Location]
+ ),
+ {ok, Location};
+ {ok, _} ->
+ ?LOG_ERROR(Msg#{
+ status => permissions_error,
+ details => "CouchDB needs read/write access to FDB cluster file"
+ }),
+ couch_log:error(
+ "CouchDB needs read/write access to FDB cluster file: ~s",
+ [Location]
+ ),
+ {error, cluster_file_permissions};
+ {error, Reason} when Type =:= custom ->
+ ?LOG_ERROR(Msg#{
+ status => Reason,
+ details => file:format_error(Reason)
+ }),
+ couch_log:error(
+ "Encountered ~p error looking for FDB cluster file: ~s",
+ [Reason, Location]
+ ),
+ {error, Reason};
+ {error, enoent} when Type =:= default ->
+ ?LOG_INFO(Msg#{
+ status => enoent,
+ details => file:format_error(enoent)
+ }),
+ couch_log:info(
+ "No FDB cluster file found at ~s",
+ [Location]
+ ),
+ find_cluster_file(Rest);
+ {error, Reason} when Type =:= default ->
+ ?LOG_WARNING(Msg#{
+ status => Reason,
+ details => file:format_error(Reason)
+ }),
+ couch_log:warning(
+ "Encountered ~p error looking for FDB cluster file: ~s",
+ [Reason, Location]
+ ),
+ find_cluster_file(Rest)
+ end.
+
apply_tx_options(Db, Cfg) ->
maps:map(fun(Option, {Type, Default}) ->
@@ -240,6 +350,11 @@ apply_tx_option(Db, Option, Val, integer) ->
set_option(Db, Option, list_to_integer(Val))
catch
error:badarg ->
+ ?LOG_ERROR(#{
+ what => invalid_transaction_option_value,
+ option => Option,
+ value => Val
+ }),
Msg = "~p : Invalid integer tx option ~p = ~p",
couch_log:error(Msg, [?MODULE, Option, Val])
end;
@@ -250,6 +365,12 @@ apply_tx_option(Db, Option, Val, binary) ->
true ->
set_option(Db, Option, BinVal);
false ->
+ ?LOG_ERROR(#{
+ what => invalid_transaction_option_value,
+ option => Option,
+ value => Val,
+ details => "string transaction option must be less than 16 bytes"
+ }),
Msg = "~p : String tx option ~p is larger than 16 bytes",
couch_log:error(Msg, [?MODULE, Option])
end.
@@ -262,6 +383,11 @@ set_option(Db, Option, Val) ->
% This could happen if the option is not supported by erlfdb or
% fdbsever.
error:badarg ->
+ ?LOG_ERROR(#{
+ what => transaction_option_error,
+ option => Option,
+ value => Val
+ }),
Msg = "~p : Could not set fdb tx option ~p = ~p",
couch_log:error(Msg, [?MODULE, Option, Val])
end.
@@ -274,3 +400,72 @@ sanitize(#{} = Db) ->
security_fun := undefined,
interactive := false
}.
+
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+setup() ->
+ meck:new(file, [unstick, passthrough]),
+ meck:expect(file, read_file_info, fun
+ ("ok.cluster", _) ->
+ {ok, #file_info{access = read_write}};
+ ("readonly.cluster", _) ->
+ {ok, #file_info{access = read}};
+ ("noaccess.cluster", _) ->
+ {ok, #file_info{access = none}};
+ ("missing.cluster", _) ->
+ {error, enoent};
+ (Path, Options) ->
+ meck:passthrough([Path, Options])
+ end).
+
+teardown(_) ->
+ meck:unload().
+
+find_cluster_file_test_() ->
+ {setup,
+ fun setup/0,
+ fun teardown/1,
+ [
+ {"ignore unspecified config", ?_assertEqual(
+ {ok, "ok.cluster"},
+ find_cluster_file([
+ {custom, undefined},
+ {custom, "ok.cluster"}
+ ])
+ )},
+
+ {"allow read-only file", ?_assertEqual(
+ {ok, "readonly.cluster"},
+ find_cluster_file([
+ {custom, "readonly.cluster"}
+ ])
+ )},
+
+ {"fail if no access to configured cluster file", ?_assertEqual(
+ {error, cluster_file_permissions},
+ find_cluster_file([
+ {custom, "noaccess.cluster"}
+ ])
+ )},
+
+ {"fail if configured cluster file is missing", ?_assertEqual(
+ {error, enoent},
+ find_cluster_file([
+ {custom, "missing.cluster"},
+ {default, "ok.cluster"}
+ ])
+ )},
+
+ {"check multiple default locations", ?_assertEqual(
+ {ok, "ok.cluster"},
+ find_cluster_file([
+ {default, "missing.cluster"},
+ {default, "ok.cluster"}
+ ])
+ )}
+ ]
+ }.
+
+-endif.
diff --git a/src/fabric/src/fabric2_users_db.erl b/src/fabric/src/fabric2_users_db.erl
index 9a8a462c3..3714d341e 100644
--- a/src/fabric/src/fabric2_users_db.erl
+++ b/src/fabric/src/fabric2_users_db.erl
@@ -19,6 +19,7 @@
]).
-include_lib("couch/include/couch_db.hrl").
+-include_lib("kernel/include/logger.hrl").
-define(NAME, <<"name">>).
-define(PASSWORD, <<"password">>).
@@ -30,6 +31,8 @@
-define(ITERATIONS, <<"iterations">>).
-define(SALT, <<"salt">>).
-define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
+-define(REQUIREMENT_ERROR, "Password does not conform to requirements.").
+-define(PASSWORD_SERVER_ERROR, "Server cannot hash passwords at this time.").
-define(
DDOCS_ADMIN_ONLY,
@@ -76,6 +79,7 @@ save_doc(#doc{body={Body}} = Doc) ->
{undefined, _} ->
Doc;
{ClearPassword, "simple"} -> % deprecated
+ ok = validate_password(ClearPassword),
Salt = couch_uuids:random(),
PasswordSha = couch_passwords:simple(ClearPassword, Salt),
Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?SIMPLE),
@@ -84,6 +88,7 @@ save_doc(#doc{body={Body}} = Doc) ->
Body3 = proplists:delete(?PASSWORD, Body2),
Doc#doc{body={Body3}};
{ClearPassword, "pbkdf2"} ->
+ ok = validate_password(ClearPassword),
Iterations = list_to_integer(config:get("couch_httpd_auth", "iterations", "1000")),
Salt = couch_uuids:random(),
DerivedKey = couch_passwords:pbkdf2(ClearPassword, Salt, Iterations),
@@ -94,8 +99,89 @@ save_doc(#doc{body={Body}} = Doc) ->
Body4 = proplists:delete(?PASSWORD, Body3),
Doc#doc{body={Body4}};
{_ClearPassword, Scheme} ->
+ ?LOG_ERROR(#{
+ what => invalid_config_setting,
+ section => couch_httpd_auth,
+ key => password_scheme,
+ value => Scheme,
+ details => "password_scheme must one of (simple, pbkdf2)"
+ }),
couch_log:error("[couch_httpd_auth] password_scheme value of '~p' is invalid.", [Scheme]),
- throw({forbidden, "Server cannot hash passwords at this time."})
+ throw({forbidden, ?PASSWORD_SERVER_ERROR})
+ end.
+
+% Validate if a new password matches all RegExp in the password_regexp setting.
+% Throws if not.
+% In this function the [couch_httpd_auth] password_regexp config is parsed.
+validate_password(ClearPassword) ->
+ case config:get("couch_httpd_auth", "password_regexp", "") of
+ "" ->
+ ok;
+ "[]" ->
+ ok;
+ ValidateConfig ->
+ RequirementList = case couch_util:parse_term(ValidateConfig) of
+ {ok, RegExpList} when is_list(RegExpList) ->
+ RegExpList;
+ {ok, NonListValue} ->
+ couch_log:error(
+ "[couch_httpd_auth] password_regexp value of '~p'"
+ " is not a list.",
+ [NonListValue]
+ ),
+ throw({forbidden, ?PASSWORD_SERVER_ERROR});
+ {error, ErrorInfo} ->
+ couch_log:error(
+ "[couch_httpd_auth] password_regexp value of '~p'"
+ " could not get parsed. ~p",
+ [ValidateConfig, ErrorInfo]
+ ),
+ throw({forbidden, ?PASSWORD_SERVER_ERROR})
+ end,
+ % Check the password on every RegExp.
+ lists:foreach(fun(RegExpTuple) ->
+ case get_password_regexp_and_error_msg(RegExpTuple) of
+ {ok, RegExp, PasswordErrorMsg} ->
+ check_password(ClearPassword, RegExp, PasswordErrorMsg);
+ {error} ->
+ couch_log:error(
+ "[couch_httpd_auth] password_regexp part of '~p' "
+ "is not a RegExp string or "
+ "a RegExp and Reason tuple.",
+ [RegExpTuple]
+ ),
+ throw({forbidden, ?PASSWORD_SERVER_ERROR})
+ end
+ end, RequirementList),
+ ok
+ end.
+
+% Get the RegExp out of the tuple and combine the the error message.
+% First is with a Reason string.
+get_password_regexp_and_error_msg({RegExp, Reason})
+ when is_list(RegExp) andalso is_list(Reason)
+ andalso length(Reason) > 0 ->
+ {ok, RegExp, lists:concat([?REQUIREMENT_ERROR, " ", Reason])};
+% With a not correct Reason string.
+get_password_regexp_and_error_msg({RegExp, _Reason}) when is_list(RegExp) ->
+ {ok, RegExp, ?REQUIREMENT_ERROR};
+% Without a Reason string.
+get_password_regexp_and_error_msg({RegExp}) when is_list(RegExp) ->
+ {ok, RegExp, ?REQUIREMENT_ERROR};
+% If the RegExp is only a list/string.
+get_password_regexp_and_error_msg(RegExp) when is_list(RegExp) ->
+ {ok, RegExp, ?REQUIREMENT_ERROR};
+% Not correct RegExpValue.
+get_password_regexp_and_error_msg(_) ->
+ {error}.
+
+% Check the password if it matches a RegExp.
+check_password(Password, RegExp, ErrorMsg) ->
+ case re:run(Password, RegExp, [{capture, none}]) of
+ match ->
+ ok;
+ _ ->
+ throw({bad_request, ErrorMsg})
end.
diff --git a/src/fabric/src/fabric2_util.erl b/src/fabric/src/fabric2_util.erl
index 136762b34..cd22778ef 100644
--- a/src/fabric/src/fabric2_util.erl
+++ b/src/fabric/src/fabric2_util.erl
@@ -50,7 +50,7 @@
-include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
revinfo_to_revs(RevInfo) ->
@@ -383,23 +383,9 @@ pmap(Fun, Args, Opts) ->
end, Refs).
-% OTP_RELEASE is defined in OTP 21+ only
--ifdef(OTP_RELEASE).
-
pmap_exec(Fun, Arg) ->
try
{'$res', Fun(Arg)}
catch Tag:Reason:Stack ->
{'$err', Tag, Reason, Stack}
end.
-
--else.
-
-pmap_exec(Fun, Arg) ->
- try
- {'$res', Fun(Arg)}
- catch Tag:Reason ->
- {'$err', Tag, Reason, erlang:get_stacktrace()}
- end.
-
--endif.
diff --git a/src/fabric/src/fabric_db_create.erl b/src/fabric/src/fabric_db_create.erl
deleted file mode 100644
index a2833e6aa..000000000
--- a/src/fabric/src/fabric_db_create.erl
+++ /dev/null
@@ -1,228 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_create).
--export([go/2]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-%% @doc Create a new database, and all its partition files across the cluster
-%% Options is proplist with user_ctx, n, q, validate_name
-go(DbName, Options) ->
- case validate_dbname(DbName, Options) of
- ok ->
- couch_partition:validate_dbname(DbName, Options),
- case db_exists(DbName) of
- true ->
- {error, file_exists};
- false ->
- {Shards, Doc} = generate_shard_map(DbName, Options),
- CreateShardResult = create_shard_files(Shards, Options),
- case CreateShardResult of
- enametoolong ->
- {error, {database_name_too_long, DbName}};
- _ ->
- case {CreateShardResult, create_shard_db_doc(Doc)} of
- {ok, {ok, Status}} ->
- Status;
- {file_exists, {ok, _}} ->
- {error, file_exists};
- {_, Error} ->
- Error
- end
- end
- end;
- Error ->
- Error
- end.
-
-validate_dbname(DbName, Options) ->
- case couch_util:get_value(validate_name, Options, true) of
- false ->
- ok;
- true ->
- couch_db:validate_dbname(DbName)
- end.
-
-generate_shard_map(DbName, Options) ->
- {MegaSecs, Secs, _} = os:timestamp(),
- Suffix = "." ++ integer_to_list(MegaSecs*1000000 + Secs),
- Shards = mem3:choose_shards(DbName, [{shard_suffix,Suffix} | Options]),
- case mem3_util:open_db_doc(DbName) of
- {ok, Doc} ->
- % the DB already exists, and may have a different Suffix
- ok;
- {not_found, _} ->
- Doc = make_document(Shards, Suffix, Options)
- end,
- {Shards, Doc}.
-
-create_shard_files(Shards, Options) ->
- Workers = fabric_util:submit_jobs(Shards, create_db, [Options]),
- RexiMon = fabric_util:create_monitors(Shards),
- try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Workers) of
- {error, file_exists} ->
- file_exists;
- {error, enametoolong} ->
- enametoolong;
- {timeout, DefunctWorkers} ->
- fabric_util:log_timeout(DefunctWorkers, "create_db"),
- {error, timeout};
- _ ->
- ok
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({error, enametoolong}, _, _) ->
- {error, enametoolong};
-
-handle_message(file_exists, _, _) ->
- {error, file_exists};
-
-handle_message({rexi_DOWN, _, {_, Node}, _}, _, Workers) ->
- case lists:filter(fun(S) -> S#shard.node =/= Node end, Workers) of
- [] ->
- {stop, ok};
- RemainingWorkers ->
- {ok, RemainingWorkers}
- end;
-
-handle_message(_, Worker, Workers) ->
- case lists:delete(Worker, Workers) of
- [] ->
- {stop, ok};
- RemainingWorkers ->
- {ok, RemainingWorkers}
- end.
-
-create_shard_db_doc(Doc) ->
- Shards = [#shard{node=N} || N <- mem3:nodes()],
- RexiMon = fabric_util:create_monitors(Shards),
- Workers = fabric_util:submit_jobs(Shards, create_shard_db_doc, [Doc]),
- Acc0 = {length(Shards), fabric_dict:init(Workers, nil)},
- try fabric_util:recv(Workers, #shard.ref, fun handle_db_update/3, Acc0) of
- {timeout, {_, WorkersDict}} ->
- DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
- fabric_util:log_timeout(
- DefunctWorkers,
- "create_shard_db_doc"
- ),
- {error, timeout};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_db_update({rexi_DOWN, _, {_, Node}, _}, _Worker, {W, Counters}) ->
- New = fabric_dict:filter(fun(S, _) -> S#shard.node =/= Node end, Counters),
- maybe_stop(W, New);
-
-handle_db_update({rexi_EXIT, _Reason}, Worker, {W, Counters}) ->
- maybe_stop(W, fabric_dict:erase(Worker, Counters));
-
-handle_db_update(conflict, _, _) ->
- % just fail when we get any conflicts
- {error, conflict};
-
-handle_db_update(Msg, Worker, {W, Counters}) ->
- maybe_stop(W, fabric_dict:store(Worker, Msg, Counters)).
-
-maybe_stop(W, Counters) ->
- case fabric_dict:any(nil, Counters) of
- true ->
- {ok, {W, Counters}};
- false ->
- case lists:sum([1 || {_, ok} <- Counters]) of
- NumOk when NumOk >= (W div 2 +1) ->
- {stop, ok};
- NumOk when NumOk > 0 ->
- {stop, accepted};
- _ ->
- {error, internal_server_error}
- end
- end.
-
-make_document([#shard{dbname=DbName}|_] = Shards, Suffix, Options) ->
- {RawOut, ByNodeOut, ByRangeOut} =
- lists:foldl(fun(#shard{node=N, range=[B,E]}, {Raw, ByNode, ByRange}) ->
- Range = ?l2b([couch_util:to_hex(<<B:32/integer>>), "-",
- couch_util:to_hex(<<E:32/integer>>)]),
- Node = couch_util:to_binary(N),
- {[[<<"add">>, Range, Node] | Raw], orddict:append(Node, Range, ByNode),
- orddict:append(Range, Node, ByRange)}
- end, {[], [], []}, Shards),
- EngineProp = case couch_util:get_value(engine, Options) of
- E when is_binary(E) -> [{<<"engine">>, E}];
- _ -> []
- end,
- DbProps = case couch_util:get_value(props, Options) of
- Props when is_list(Props) -> [{<<"props">>, {Props}}];
- _ -> []
- end,
- #doc{
- id = DbName,
- body = {[
- {<<"shard_suffix">>, Suffix},
- {<<"changelog">>, lists:sort(RawOut)},
- {<<"by_node">>, {[{K,lists:sort(V)} || {K,V} <- ByNodeOut]}},
- {<<"by_range">>, {[{K,lists:sort(V)} || {K,V} <- ByRangeOut]}}
- ] ++ EngineProp ++ DbProps}
- }.
-
-db_exists(DbName) -> is_list(catch mem3:shards(DbName)).
-
-%% -ifdef(TEST).
-%% -include_lib("eunit/include/eunit.hrl").
-%%
-%% db_exists_test_() ->
-%% {
-%% setup,
-%% fun setup_all/0,
-%% fun teardown_all/1,
-%% [
-%% fun db_exists_for_existing_db/0,
-%% fun db_exists_for_missing_db/0
-%% ]
-%% }.
-%%
-%%
-%% setup_all() ->
-%% meck:new(mem3).
-%%
-%%
-%% teardown_all(_) ->
-%% meck:unload().
-%%
-%%
-%% db_exists_for_existing_db() ->
-%% Mock = fun(DbName) when is_binary(DbName) ->
-%% [#shard{dbname = DbName, range = [0,100]}]
-%% end,
-%% ok = meck:expect(mem3, shards, Mock),
-%% ?assertEqual(true, db_exists(<<"foobar">>)),
-%% ?assertEqual(true, meck:validate(mem3)).
-%%
-%%
-%% db_exists_for_missing_db() ->
-%% Mock = fun(DbName) ->
-%% erlang:error(database_does_not_exist, DbName)
-%% end,
-%% ok = meck:expect(mem3, shards, Mock),
-%% ?assertEqual(false, db_exists(<<"foobar">>)),
-%% ?assertEqual(false, meck:validate(mem3)).
-%%
-%% -endif.
diff --git a/src/fabric/src/fabric_db_delete.erl b/src/fabric/src/fabric_db_delete.erl
deleted file mode 100644
index c146cb6cd..000000000
--- a/src/fabric/src/fabric_db_delete.erl
+++ /dev/null
@@ -1,98 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_delete).
--export([go/2]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-%% @doc Options aren't used at all now in couch on delete but are left here
-%% to be consistent with fabric_db_create for possible future use
-%% @see couch_server:delete/2
-%%
-go(DbName, _Options) ->
- Shards = mem3:shards(DbName),
- % delete doc from shard_db
- try delete_shard_db_doc(DbName) of
- {ok, ok} ->
- ok;
- {ok, accepted} ->
- accepted;
- {ok, not_found} ->
- erlang:error(database_does_not_exist, DbName);
- Error ->
- Error
- after
- % delete the shard files
- fabric_util:submit_jobs(Shards, delete_db, [])
- end.
-
-delete_shard_db_doc(Doc) ->
- Shards = [#shard{node=N} || N <- mem3:nodes()],
- RexiMon = fabric_util:create_monitors(Shards),
- Workers = fabric_util:submit_jobs(Shards, delete_shard_db_doc, [Doc]),
- Acc0 = {length(Shards), fabric_dict:init(Workers, nil)},
- try fabric_util:recv(Workers, #shard.ref, fun handle_db_update/3, Acc0) of
- {timeout, {_, WorkersDict}} ->
- DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
- fabric_util:log_timeout(
- DefunctWorkers,
- "delete_shard_db_doc"
- ),
- {error, timeout};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_db_update({rexi_DOWN, _, {_, Node}, _}, _Worker, {W, Counters}) ->
- New = fabric_dict:filter(fun(S, _) -> S#shard.node =/= Node end, Counters),
- maybe_stop(W, New);
-
-handle_db_update({rexi_EXIT, _Reason}, Worker, {W, Counters}) ->
- maybe_stop(W, fabric_dict:erase(Worker, Counters));
-
-handle_db_update(conflict, _, _) ->
- % just fail when we get any conflicts
- {error, conflict};
-
-handle_db_update(Msg, Worker, {W, Counters}) ->
- maybe_stop(W, fabric_dict:store(Worker, Msg, Counters)).
-
-maybe_stop(W, Counters) ->
- case fabric_dict:any(nil, Counters) of
- true ->
- {ok, {W, Counters}};
- false ->
- {Ok,NotFound} = fabric_dict:fold(fun count_replies/3, {0,0}, Counters),
- case {Ok + NotFound, Ok, NotFound} of
- {W, 0, W} ->
- {#shard{dbname=Name}, _} = hd(Counters),
- couch_log:warning("~p not_found ~d", [?MODULE, Name]),
- {stop, not_found};
- {W, _, _} ->
- {stop, ok};
- {_, M, _} when M > 0 ->
- {stop,accepted};
- _ ->
- {error, internal_server_error}
- end
- end.
-
-count_replies(_, ok, {Ok, NotFound}) ->
- {Ok+1, NotFound};
-count_replies(_, not_found, {Ok, NotFound}) ->
- {Ok, NotFound+1};
-count_replies(_, _, Acc) ->
- Acc.
diff --git a/src/fabric/src/fabric_db_doc_count.erl b/src/fabric/src/fabric_db_doc_count.erl
deleted file mode 100644
index a91014b7c..000000000
--- a/src/fabric/src/fabric_db_doc_count.erl
+++ /dev/null
@@ -1,62 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_doc_count).
-
--export([go/1]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(DbName) ->
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, get_doc_count, []),
- RexiMon = fabric_util:create_monitors(Shards),
- Acc0 = {fabric_dict:init(Workers, nil), []},
- try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
- {timeout, {WorkersDict, _}} ->
- DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
- fabric_util:log_timeout(DefunctWorkers, "get_doc_count"),
- {error, timeout};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, {Counters, Resps}) ->
- case fabric_ring:node_down(NodeRef, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps}};
- error -> {error, {nodedown, <<"progress not possible">>}}
- end;
-
-handle_message({rexi_EXIT, Reason}, Shard, {Counters, Resps}) ->
- case fabric_ring:handle_error(Shard, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps}};
- error -> {error, Reason}
- end;
-
-handle_message({ok, Count}, Shard, {Counters, Resps}) ->
- case fabric_ring:handle_response(Shard, Count, Counters, Resps) of
- {ok, {Counters1, Resps1}} ->
- {ok, {Counters1, Resps1}};
- {stop, Resps1} ->
- Total = fabric_dict:fold(fun(_, C, A) -> A + C end, 0, Resps1),
- {stop, Total}
- end;
-
-handle_message(Reason, Shard, {Counters, Resps}) ->
- case fabric_ring:handle_error(Shard, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps}};
- error -> {error, Reason}
- end.
diff --git a/src/fabric/src/fabric_db_info.erl b/src/fabric/src/fabric_db_info.erl
deleted file mode 100644
index 6c7d2d177..000000000
--- a/src/fabric/src/fabric_db_info.erl
+++ /dev/null
@@ -1,171 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_info).
-
--export([go/1]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-go(DbName) ->
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, get_db_info, []),
- RexiMon = fabric_util:create_monitors(Shards),
- Fun = fun handle_message/3,
- {ok, ClusterInfo} = get_cluster_info(Shards),
- CInfo = [{cluster, ClusterInfo}],
- Acc0 = {fabric_dict:init(Workers, nil), [], CInfo},
- try
- case fabric_util:recv(Workers, #shard.ref, Fun, Acc0) of
-
- {ok, Acc} ->
- {ok, Acc};
- {timeout, {WorkersDict, _, _}} ->
- DefunctWorkers = fabric_util:remove_done_workers(
- WorkersDict,
- nil
- ),
- fabric_util:log_timeout(
- DefunctWorkers,
- "get_db_info"
- ),
- {error, timeout};
- {error, Error} ->
- throw(Error)
- end
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _, {Counters, Resps, CInfo}) ->
- case fabric_ring:node_down(NodeRef, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps, CInfo}};
- error -> {error, {nodedown, <<"progress not possible">>}}
- end;
-
-handle_message({rexi_EXIT, Reason}, Shard, {Counters, Resps, CInfo}) ->
- case fabric_ring:handle_error(Shard, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps, CInfo}};
- error -> {error, Reason}
- end;
-
-handle_message({ok, Info}, Shard, {Counters, Resps, CInfo}) ->
- case fabric_ring:handle_response(Shard, Info, Counters, Resps) of
- {ok, {Counters1, Resps1}} ->
- {ok, {Counters1, Resps1, CInfo}};
- {stop, Resps1} ->
- {stop, build_final_response(CInfo, Shard#shard.dbname, Resps1)}
- end;
-
-handle_message(Reason, Shard, {Counters, Resps, CInfo}) ->
- case fabric_ring:handle_error(Shard, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps, CInfo}};
- error -> {error, Reason}
- end.
-
-
-build_final_response(CInfo, DbName, Responses) ->
- AccF = fabric_dict:fold(fun(Shard, Info, {Seqs, PSeqs, Infos}) ->
- Seq = couch_util:get_value(update_seq, Info),
- PSeq = couch_util:get_value(purge_seq, Info),
- {[{Shard, Seq} | Seqs], [{Shard, PSeq} | PSeqs], [Info | Infos]}
- end, {[], [], []}, Responses),
- {Seqs, PSeqs, Infos} = AccF,
- PackedSeq = fabric_view_changes:pack_seqs(Seqs),
- PackedPSeq = fabric_view_changes:pack_seqs(PSeqs),
- MergedInfos = merge_results(lists:flatten([CInfo | Infos])),
- Sequences = [{purge_seq, PackedPSeq}, {update_seq, PackedSeq}],
- [{db_name, DbName}] ++ Sequences ++ MergedInfos.
-
-
-merge_results(Info) ->
- Dict = lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end,
- orddict:new(), Info),
- orddict:fold(fun
- (doc_count, X, Acc) ->
- [{doc_count, lists:sum(X)} | Acc];
- (doc_del_count, X, Acc) ->
- [{doc_del_count, lists:sum(X)} | Acc];
- (compact_running, X, Acc) ->
- [{compact_running, lists:member(true, X)} | Acc];
- (sizes, X, Acc) ->
- [{sizes, {merge_object(X)}} | Acc];
- (disk_format_version, X, Acc) ->
- [{disk_format_version, lists:max(X)} | Acc];
- (cluster, [X], Acc) ->
- [{cluster, {X}} | Acc];
- (props, Xs, Acc) ->
- [{props, {merge_object(Xs)}} | Acc];
- (_K, _V, Acc) ->
- Acc
- end, [{instance_start_time, <<"0">>}], Dict).
-
-merge_object(Objects) ->
- Dict = lists:foldl(fun({Props}, D) ->
- lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end, D, Props)
- end, orddict:new(), Objects),
- orddict:fold(fun
- (Key, [X | _] = Xs, Acc) when is_integer(X) ->
- [{Key, lists:sum(Xs)} | Acc];
- (Key, [X | _] = Xs, Acc) when is_boolean(X) ->
- [{Key, lists:all(fun all_true/1, Xs)} | Acc];
- (_Key, _Xs, Acc) ->
- Acc
- end, [], Dict).
-
-all_true(true) -> true;
-all_true(_) -> false.
-
-get_cluster_info(Shards) ->
- Dict = lists:foldl(fun(#shard{range = R}, Acc) ->
- dict:update_counter(R, 1, Acc)
- end, dict:new(), Shards),
- Q = dict:size(Dict),
- N = dict:fold(fun(_, X, Acc) -> max(X, Acc) end, 0, Dict),
- %% defaults as per mem3:quorum/1
- WR = N div 2 + 1,
- {ok, [{q, Q}, {n, N}, {w, WR}, {r, WR}]}.
-
-
-%% -ifdef(TEST).
-%% -include_lib("eunit/include/eunit.hrl").
-%%
-%% get_cluster_info_test_() ->
-%% {
-%% setup,
-%% fun setup/0,
-%% fun get_cluster_info_test_generator/1
-%% }.
-%%
-%%
-%% setup() ->
-%% Quorums = [1, 2, 3],
-%% Shards = [1, 3, 5, 8, 12, 24],
-%% [{N, Q} || N <- Quorums, Q <- Shards].
-%%
-%% get_cluster_info_test_generator([]) ->
-%% [];
-%% get_cluster_info_test_generator([{N, Q} | Rest]) ->
-%% {generator,
-%% fun() ->
-%% Nodes = lists:seq(1, 8),
-%% Shards = mem3_util:create_partition_map(<<"foo">>, N, Q, Nodes),
-%% {ok, Info} = get_cluster_info(Shards),
-%% [
-%% ?_assertEqual(N, couch_util:get_value(n, Info)),
-%% ?_assertEqual(Q, couch_util:get_value(q, Info))
-%% ] ++ get_cluster_info_test_generator(Rest)
-%% end}.
-%%
-%% -endif.
diff --git a/src/fabric/src/fabric_db_meta.erl b/src/fabric/src/fabric_db_meta.erl
deleted file mode 100644
index 348b06d51..000000000
--- a/src/fabric/src/fabric_db_meta.erl
+++ /dev/null
@@ -1,198 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_meta).
-
--export([set_revs_limit/3, set_security/3, get_all_security/2,
- set_purge_infos_limit/3]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--record(acc, {
- workers,
- finished,
- num_workers
-}).
-
-
-set_revs_limit(DbName, Limit, Options) ->
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, set_revs_limit, [Limit, Options]),
- Handler = fun handle_revs_message/3,
- Acc0 = {Workers, length(Workers) - 1},
- case fabric_util:recv(Workers, #shard.ref, Handler, Acc0) of
- {ok, ok} ->
- ok;
- {timeout, {DefunctWorkers, _}} ->
- fabric_util:log_timeout(DefunctWorkers, "set_revs_limit"),
- {error, timeout};
- Error ->
- Error
- end.
-
-handle_revs_message(ok, _, {_Workers, 0}) ->
- {stop, ok};
-handle_revs_message(ok, Worker, {Workers, Waiting}) ->
- {ok, {lists:delete(Worker, Workers), Waiting - 1}};
-handle_revs_message(Error, _, _Acc) ->
- {error, Error}.
-
-
-set_purge_infos_limit(DbName, Limit, Options) ->
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, set_purge_infos_limit, [Limit, Options]),
- Handler = fun handle_purge_message/3,
- Acc0 = {Workers, length(Workers) - 1},
- case fabric_util:recv(Workers, #shard.ref, Handler, Acc0) of
- {ok, ok} ->
- ok;
- {timeout, {DefunctWorkers, _}} ->
- fabric_util:log_timeout(DefunctWorkers, "set_purged_docs_limit"),
- {error, timeout};
- Error ->
- Error
- end.
-
-handle_purge_message(ok, _, {_Workers, 0}) ->
- {stop, ok};
-handle_purge_message(ok, Worker, {Workers, Waiting}) ->
- {ok, {lists:delete(Worker, Workers), Waiting - 1}};
-handle_purge_message(Error, _, _Acc) ->
- {error, Error}.
-
-
-set_security(DbName, SecObj, Options) ->
- Shards = mem3:shards(DbName),
- RexiMon = fabric_util:create_monitors(Shards),
- Workers = fabric_util:submit_jobs(Shards, set_security, [SecObj, Options]),
- Handler = fun handle_set_message/3,
- Acc = #acc{
- workers=Workers,
- finished=[],
- num_workers=length(Workers)
- },
- try fabric_util:recv(Workers, #shard.ref, Handler, Acc) of
- {ok, #acc{finished=Finished}} ->
- case check_sec_set(length(Workers), Finished) of
- ok -> ok;
- Error -> Error
- end;
- {timeout, #acc{workers=DefunctWorkers}} ->
- fabric_util:log_timeout(DefunctWorkers, "set_security"),
- {error, timeout};
- Error ->
- Error
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_set_message({rexi_DOWN, _, {_, Node}, _}, _, #acc{workers=Wrkrs}=Acc) ->
- RemWorkers = lists:filter(fun(S) -> S#shard.node =/= Node end, Wrkrs),
- maybe_finish_set(Acc#acc{workers=RemWorkers});
-handle_set_message(ok, W, Acc) ->
- NewAcc = Acc#acc{
- workers = (Acc#acc.workers -- [W]),
- finished = [W | Acc#acc.finished]
- },
- maybe_finish_set(NewAcc);
-handle_set_message({rexi_EXIT, {maintenance_mode, _}}, W, Acc) ->
- NewAcc = Acc#acc{workers = (Acc#acc.workers -- [W])},
- maybe_finish_set(NewAcc);
-handle_set_message(Error, W, Acc) ->
- Dst = {W#shard.node, W#shard.name},
- couch_log:error("Failed to set security object on ~p :: ~p", [Dst, Error]),
- NewAcc = Acc#acc{workers = (Acc#acc.workers -- [W])},
- maybe_finish_set(NewAcc).
-
-maybe_finish_set(#acc{workers=[]}=Acc) ->
- {stop, Acc};
-maybe_finish_set(#acc{finished=Finished, num_workers=NumWorkers}=Acc) ->
- case check_sec_set(NumWorkers, Finished) of
- ok -> {stop, Acc};
- _ -> {ok, Acc}
- end.
-
-check_sec_set(NumWorkers, SetWorkers) ->
- try
- check_sec_set_int(NumWorkers, SetWorkers)
- catch throw:Reason ->
- {error, Reason}
- end.
-
-check_sec_set_int(NumWorkers, SetWorkers) ->
- case length(SetWorkers) < ((NumWorkers div 2) + 1) of
- true -> throw(no_majority);
- false -> ok
- end,
- % Hack to reuse fabric_ring:is_progress_possible/1
- FakeCounters = [{S, 0} || S <- SetWorkers],
- case fabric_ring:is_progress_possible(FakeCounters) of
- false -> throw(no_ring);
- true -> ok
- end,
- ok.
-
-
-get_all_security(DbName, Options) ->
- Shards = case proplists:get_value(shards, Options) of
- Shards0 when is_list(Shards0) -> Shards0;
- _ -> mem3:shards(DbName)
- end,
- RexiMon = fabric_util:create_monitors(Shards),
- Workers = fabric_util:submit_jobs(Shards, get_all_security, [[?ADMIN_CTX]]),
- Handler = fun handle_get_message/3,
- Acc = #acc{
- workers=Workers,
- finished=[],
- num_workers=length(Workers)
- },
- try fabric_util:recv(Workers, #shard.ref, Handler, Acc) of
- {ok, #acc{finished=SecObjs}} when length(SecObjs) > length(Workers) / 2 ->
- {ok, SecObjs};
- {ok, _} ->
- {error, no_majority};
- {timeout, #acc{workers=DefunctWorkers}} ->
- fabric_util:log_timeout(
- DefunctWorkers,
- "get_all_security"
- ),
- {error, timeout};
- Error ->
- Error
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_get_message({rexi_DOWN, _, {_, Node}, _}, _, #acc{workers=Wrkrs}=Acc) ->
- RemWorkers = lists:filter(fun(S) -> S#shard.node =/= Node end, Wrkrs),
- maybe_finish_get(Acc#acc{workers=RemWorkers});
-handle_get_message({Props}=SecObj, W, Acc) when is_list(Props) ->
- NewAcc = Acc#acc{
- workers = (Acc#acc.workers -- [W]),
- finished = [{W, SecObj} | Acc#acc.finished]
- },
- maybe_finish_get(NewAcc);
-handle_get_message({rexi_EXIT, {maintenance_mode, _}}, W, Acc) ->
- NewAcc = Acc#acc{workers = (Acc#acc.workers -- [W])},
- maybe_finish_get(NewAcc);
-handle_get_message(Error, W, Acc) ->
- Dst = {W#shard.node, W#shard.name},
- couch_log:error("Failed to get security object on ~p :: ~p", [Dst, Error]),
- NewAcc = Acc#acc{workers = (Acc#acc.workers -- [W])},
- maybe_finish_get(NewAcc).
-
-maybe_finish_get(#acc{workers=[]}=Acc) ->
- {stop, Acc};
-maybe_finish_get(Acc) ->
- {ok, Acc}.
diff --git a/src/fabric/src/fabric_db_partition_info.erl b/src/fabric/src/fabric_db_partition_info.erl
deleted file mode 100644
index 954c52db2..000000000
--- a/src/fabric/src/fabric_db_partition_info.erl
+++ /dev/null
@@ -1,155 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_partition_info).
-
--export([go/2]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-
--record(acc, {
- counters,
- replies,
- ring_opts
-}).
-
-
-go(DbName, Partition) ->
- Shards = mem3:shards(DbName, couch_partition:shard_key(Partition)),
- Workers = fabric_util:submit_jobs(Shards, get_partition_info, [Partition]),
- RexiMon = fabric_util:create_monitors(Shards),
- Fun = fun handle_message/3,
- Acc0 = #acc{
- counters = fabric_dict:init(Workers, nil),
- replies = [],
- ring_opts = [{any, Shards}]
- },
- try
- case fabric_util:recv(Workers, #shard.ref, Fun, Acc0) of
- {ok, Res} -> {ok, Res};
- {timeout, {WorkersDict, _}} ->
- DefunctWorkers = fabric_util:remove_done_workers(
- WorkersDict,
- nil
- ),
- fabric_util:log_timeout(
- DefunctWorkers,
- "get_partition_info"
- ),
- {error, timeout};
- {error, Error} -> throw(Error)
- end
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, #acc{} = Acc) ->
- #acc{counters = Counters, ring_opts = RingOpts} = Acc,
- case fabric_util:remove_down_workers(Counters, NodeRef, RingOpts) of
- {ok, NewCounters} ->
- {ok, Acc#acc{counters = NewCounters}};
- error ->
- {error, {nodedown, <<"progress not possible">>}}
- end;
-
-handle_message({rexi_EXIT, Reason}, Shard, #acc{} = Acc) ->
- #acc{counters = Counters, ring_opts = RingOpts} = Acc,
- NewCounters = fabric_dict:erase(Shard, Counters),
- case fabric_ring:is_progress_possible(NewCounters, RingOpts) of
- true ->
- {ok, Acc#acc{counters = NewCounters}};
- false ->
- {error, Reason}
- end;
-
-handle_message({ok, Info}, #shard{dbname=Name} = Shard, #acc{} = Acc) ->
- #acc{counters = Counters, replies = Replies} = Acc,
- Replies1 = [Info | Replies],
- Counters1 = fabric_dict:erase(Shard, Counters),
- case fabric_dict:size(Counters1) =:= 0 of
- true ->
- [FirstInfo | RestInfos] = Replies1,
- PartitionInfo = get_max_partition_size(FirstInfo, RestInfos),
- {stop, [{db_name, Name} | format_partition(PartitionInfo)]};
- false ->
- {ok, Acc#acc{counters = Counters1, replies = Replies1}}
- end;
-
-handle_message(_, _, #acc{} = Acc) ->
- {ok, Acc}.
-
-
-get_max_partition_size(Max, []) ->
- Max;
-get_max_partition_size(MaxInfo, [NextInfo | Rest]) ->
- {sizes, MaxSize} = lists:keyfind(sizes, 1, MaxInfo),
- {sizes, NextSize} = lists:keyfind(sizes, 1, NextInfo),
-
- {external, MaxExtSize} = lists:keyfind(external, 1, MaxSize),
- {external, NextExtSize} = lists:keyfind(external, 1, NextSize),
- case NextExtSize > MaxExtSize of
- true ->
- get_max_partition_size(NextInfo, Rest);
- false ->
- get_max_partition_size(MaxInfo, Rest)
- end.
-
-
-% for JS to work nicely we need to convert the size list
-% to a jiffy object
-format_partition(PartitionInfo) ->
- {value, {sizes, Size}, PartitionInfo1} = lists:keytake(sizes, 1, PartitionInfo),
- [{sizes, {Size}} | PartitionInfo1].
-
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-
-node_down_test() ->
- [S1, S2] = [mk_shard("n1", [0, 4]), mk_shard("n2", [0, 8])],
- Acc1 = #acc{
- counters = fabric_dict:init([S1, S2], nil),
- ring_opts = [{any, [S1, S2]}]
- },
-
- N1 = S1#shard.node,
- {ok, Acc2} = handle_message({rexi_DOWN, nil, {nil, N1}, nil}, nil, Acc1),
- ?assertEqual([{S2, nil}], Acc2#acc.counters),
-
- N2 = S2#shard.node,
- ?assertEqual({error, {nodedown, <<"progress not possible">>}},
- handle_message({rexi_DOWN, nil, {nil, N2}, nil}, nil, Acc2)).
-
-
-worker_exit_test() ->
- [S1, S2] = [mk_shard("n1", [0, 4]), mk_shard("n2", [0, 8])],
- Acc1 = #acc{
- counters = fabric_dict:init([S1, S2], nil),
- ring_opts = [{any, [S1, S2]}]
- },
-
- {ok, Acc2} = handle_message({rexi_EXIT, boom}, S1, Acc1),
- ?assertEqual([{S2, nil}], Acc2#acc.counters),
-
- ?assertEqual({error, bam}, handle_message({rexi_EXIT, bam}, S2, Acc2)).
-
-
-mk_shard(Name, Range) ->
- Node = list_to_atom(Name),
- BName = list_to_binary(Name),
- #shard{name = BName, node = Node, range = Range}.
-
--endif.
diff --git a/src/fabric/src/fabric_db_update_listener.erl b/src/fabric/src/fabric_db_update_listener.erl
deleted file mode 100644
index fb2937be1..000000000
--- a/src/fabric/src/fabric_db_update_listener.erl
+++ /dev/null
@@ -1,177 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_update_listener).
-
--export([go/4, start_update_notifier/1, stop/1, wait_db_updated/1]).
--export([handle_db_event/3]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--record(worker, {
- ref,
- node,
- pid
-}).
-
--record(cb_state, {
- client_pid,
- client_ref,
- notify
-}).
-
--record(acc, {
- parent,
- state,
- shards
-}).
-
-go(Parent, ParentRef, DbName, Timeout) ->
- Shards = mem3:shards(DbName),
- Notifiers = start_update_notifiers(Shards),
- MonRefs = lists:usort([rexi_utils:server_pid(N) || #worker{node = N} <- Notifiers]),
- RexiMon = rexi_monitor:start(MonRefs),
- MonPid = start_cleanup_monitor(self(), Notifiers),
- %% This is not a common pattern for rexi but to enable the calling
- %% process to communicate via handle_message/3 we "fake" it as a
- %% a spawned worker.
- Workers = [#worker{ref=ParentRef, pid=Parent} | Notifiers],
- Acc = #acc{
- parent = Parent,
- state = unset,
- shards = Shards
- },
- Resp = try
- receive_results(Workers, Acc, Timeout)
- after
- rexi_monitor:stop(RexiMon),
- stop_cleanup_monitor(MonPid)
- end,
- case Resp of
- {ok, _} -> ok;
- {error, Error} -> erlang:error(Error);
- Error -> erlang:error(Error)
- end.
-
-start_update_notifiers(Shards) ->
- EndPointDict = lists:foldl(fun(#shard{node=Node, name=Name}, Acc) ->
- dict:append(Node, Name, Acc)
- end, dict:new(), Shards),
- lists:map(fun({Node, DbNames}) ->
- Ref = rexi:cast(Node, {?MODULE, start_update_notifier, [DbNames]}),
- #worker{ref=Ref, node=Node}
- end, dict:to_list(EndPointDict)).
-
-% rexi endpoint
-start_update_notifier(DbNames) ->
- {Caller, Ref} = get(rexi_from),
- Notify = config:get("couchdb", "maintenance_mode", "false") /= "true",
- State = #cb_state{client_pid = Caller, client_ref = Ref, notify = Notify},
- Options = [{parent, Caller}, {dbnames, DbNames}],
- couch_event:listen(?MODULE, handle_db_event, State, Options).
-
-handle_db_event(_DbName, updated, #cb_state{notify = true} = St) ->
- erlang:send(St#cb_state.client_pid, {St#cb_state.client_ref, db_updated}),
- {ok, St};
-handle_db_event(_DbName, deleted, St) ->
- erlang:send(St#cb_state.client_pid, {St#cb_state.client_ref, db_deleted}),
- stop;
-handle_db_event(_DbName, _Event, St) ->
- {ok, St}.
-
-start_cleanup_monitor(Parent, Notifiers) ->
- spawn(fun() ->
- Ref = erlang:monitor(process, Parent),
- cleanup_monitor(Parent, Ref, Notifiers)
- end).
-
-stop_cleanup_monitor(MonPid) ->
- MonPid ! {self(), stop}.
-
-cleanup_monitor(Parent, Ref, Notifiers) ->
- receive
- {'DOWN', Ref, _, _, _} ->
- stop_update_notifiers(Notifiers);
- {Parent, stop} ->
- stop_update_notifiers(Notifiers);
- Else ->
- couch_log:error("Unkown message in ~w :: ~w", [?MODULE, Else]),
- stop_update_notifiers(Notifiers),
- exit(Parent, {unknown_message, Else})
- end.
-
-stop_update_notifiers(Notifiers) ->
- rexi:kill_all([{N, Ref} || #worker{node = N, ref = Ref} <- Notifiers]).
-
-stop({Pid, Ref}) ->
- erlang:send(Pid, {Ref, done}).
-
-wait_db_updated({Pid, Ref}) ->
- MonRef = erlang:monitor(process, Pid),
- erlang:send(Pid, {Ref, get_state}),
- receive
- {state, Pid, State} ->
- erlang:demonitor(MonRef, [flush]),
- State;
- {'DOWN', MonRef, process, Pid, _Reason} ->
- changes_feed_died
- after 300000 ->
- ?MODULE:wait_db_updated({Pid, Ref})
- end.
-
-receive_results(Workers, Acc0, Timeout) ->
- Fun = fun handle_message/3,
- case rexi_utils:recv(Workers, #worker.ref, Fun, Acc0, infinity, Timeout) of
- {timeout, #acc{state=updated}=Acc} ->
- receive_results(Workers, Acc, Timeout);
- {timeout, #acc{state=waiting}=Acc} ->
- erlang:send(Acc#acc.parent, {state, self(), timeout}),
- receive_results(Workers, Acc#acc{state=unset}, Timeout);
- {timeout, Acc} ->
- receive_results(Workers, Acc#acc{state=timeout}, Timeout);
- {_, Acc} ->
- {ok, Acc}
- end.
-
-
-handle_message({rexi_DOWN, _, {_, Node}, _}, _Worker, Acc) ->
- handle_error(Node, {nodedown, Node}, Acc);
-handle_message({rexi_EXIT, _Reason}, Worker, Acc) ->
- handle_error(Worker#worker.node, {worker_exit, Worker}, Acc);
-handle_message({gen_event_EXIT, Node, Reason}, _Worker, Acc) ->
- handle_error(Node, {gen_event_EXIT, Node, Reason}, Acc);
-handle_message(db_updated, _Worker, #acc{state=waiting}=Acc) ->
- % propagate message to calling controller
- erlang:send(Acc#acc.parent, {state, self(), updated}),
- {ok, Acc#acc{state=unset}};
-handle_message(db_updated, _Worker, Acc) ->
- {ok, Acc#acc{state=updated}};
-handle_message(db_deleted, _Worker, _Acc) ->
- {stop, ok};
-handle_message(get_state, _Worker, #acc{state=unset}=Acc) ->
- {ok, Acc#acc{state=waiting}};
-handle_message(get_state, _Worker, Acc) ->
- erlang:send(Acc#acc.parent, {state, self(), Acc#acc.state}),
- {ok, Acc#acc{state=unset}};
-handle_message(done, _, _) ->
- {stop, ok}.
-
-
-handle_error(Node, Reason, #acc{shards = Shards} = Acc) ->
- Rest = lists:filter(fun(#shard{node = N}) -> N /= Node end, Shards),
- case fabric_ring:is_progress_possible([{R, nil} || R <- Rest]) of
- true ->
- {ok, Acc#acc{shards = Rest}};
- false ->
- {error, Reason}
- end.
diff --git a/src/fabric/src/fabric_design_doc_count.erl b/src/fabric/src/fabric_design_doc_count.erl
deleted file mode 100644
index b0efc3007..000000000
--- a/src/fabric/src/fabric_design_doc_count.erl
+++ /dev/null
@@ -1,62 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_design_doc_count).
-
--export([go/1]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(DbName) ->
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, get_design_doc_count, []),
- RexiMon = fabric_util:create_monitors(Shards),
- Acc0 = {fabric_dict:init(Workers, nil), []},
- try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
- {timeout, {WorkersDict, _}} ->
- DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
- fabric_util:log_timeout(DefunctWorkers, "get_design_doc_count"),
- {error, timeout};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, {Counters, Resps}) ->
- case fabric_ring:node_down(NodeRef, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps}};
- error -> {error, {nodedown, <<"progress not possible">>}}
- end;
-
-handle_message({rexi_EXIT, Reason}, Shard, {Counters, Resps}) ->
- case fabric_ring:handle_error(Shard, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps}};
- error -> {error, Reason}
- end;
-
-handle_message({ok, Count}, Shard, {Counters, Resps}) ->
- case fabric_ring:handle_response(Shard, Count, Counters, Resps) of
- {ok, {Counters1, Resps1}} ->
- {ok, {Counters1, Resps1}};
- {stop, Resps1} ->
- Total = fabric_dict:fold(fun(_, C, A) -> A + C end, 0, Resps1),
- {stop, Total}
- end;
-
-handle_message(Reason, Shard, {Counters, Resps}) ->
- case fabric_ring:handle_error(Shard, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps}};
- error -> {error, Reason}
- end.
diff --git a/src/fabric/src/fabric_dict.erl b/src/fabric/src/fabric_dict.erl
deleted file mode 100644
index b63ed2095..000000000
--- a/src/fabric/src/fabric_dict.erl
+++ /dev/null
@@ -1,61 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_dict).
--compile(export_all).
--compile(nowarn_export_all).
-
-% Instead of ets, let's use an ordered keylist. We'll need to revisit if we
-% have >> 100 shards, so a private interface is a good idea. - APK June 2010
-
-init(Keys, InitialValue) ->
- orddict:from_list([{Key, InitialValue} || Key <- Keys]).
-
-is_key(Key, Dict) ->
- orddict:is_key(Key, Dict).
-
-fetch_keys(Dict) ->
- orddict:fetch_keys(Dict).
-
-decrement_all(Dict) ->
- [{K,V-1} || {K,V} <- Dict].
-
-store(Key, Value, Dict) ->
- orddict:store(Key, Value, Dict).
-
-erase(Key, Dict) ->
- orddict:erase(Key, Dict).
-
-update_counter(Key, Incr, Dict0) ->
- orddict:update_counter(Key, Incr, Dict0).
-
-
-lookup_element(Key, Dict) ->
- couch_util:get_value(Key, Dict).
-
-size(Dict) ->
- orddict:size(Dict).
-
-any(Value, Dict) ->
- lists:keymember(Value, 2, Dict).
-
-filter(Fun, Dict) ->
- orddict:filter(Fun, Dict).
-
-fold(Fun, Acc0, Dict) ->
- orddict:fold(Fun, Acc0, Dict).
-
-to_list(Dict) ->
- orddict:to_list(Dict).
-
-from_list(KVList) when is_list(KVList) ->
- orddict:from_list(KVList).
diff --git a/src/fabric/src/fabric_doc_attachments.erl b/src/fabric/src/fabric_doc_attachments.erl
deleted file mode 100644
index 723b9e804..000000000
--- a/src/fabric/src/fabric_doc_attachments.erl
+++ /dev/null
@@ -1,160 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_attachments).
-
--compile(tuple_calls).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-%% couch api calls
--export([receiver/2]).
-
-receiver(_Req, undefined) ->
- <<"">>;
-receiver(_Req, {unknown_transfer_encoding, Unknown}) ->
- exit({unknown_transfer_encoding, Unknown});
-receiver(Req, chunked) ->
- MiddleMan = spawn(fun() -> middleman(Req, chunked) end),
- fun(4096, ChunkFun, State) ->
- write_chunks(MiddleMan, ChunkFun, State)
- end;
-receiver(_Req, 0) ->
- <<"">>;
-receiver(Req, Length) when is_integer(Length) ->
- maybe_send_continue(Req),
- Middleman = spawn(fun() -> middleman(Req, Length) end),
- fun() ->
- Middleman ! {self(), gimme_data},
- Timeout = fabric_util:attachments_timeout(),
- receive
- {Middleman, Data} ->
- rexi:reply(attachment_chunk_received),
- Data
- after Timeout ->
- exit(timeout)
- end
- end;
-receiver(_Req, Length) ->
- exit({length_not_integer, Length}).
-
-%%
-%% internal
-%%
-
-maybe_send_continue(#httpd{mochi_req = MochiReq} = Req) ->
- case couch_httpd:header_value(Req, "expect") of
- undefined ->
- ok;
- Expect ->
- case string:to_lower(Expect) of
- "100-continue" ->
- MochiReq:start_raw_response({100, gb_trees:empty()});
- _ ->
- ok
- end
- end.
-
-write_chunks(MiddleMan, ChunkFun, State) ->
- MiddleMan ! {self(), gimme_data},
- Timeout = fabric_util:attachments_timeout(),
- receive
- {MiddleMan, ChunkRecordList} ->
- rexi:reply(attachment_chunk_received),
- case flush_chunks(ChunkRecordList, ChunkFun, State) of
- {continue, NewState} ->
- write_chunks(MiddleMan, ChunkFun, NewState);
- {done, NewState} ->
- NewState
- end
- after Timeout ->
- exit(timeout)
- end.
-
-flush_chunks([], _ChunkFun, State) ->
- {continue, State};
-flush_chunks([{0, _}], _ChunkFun, State) ->
- {done, State};
-flush_chunks([Chunk | Rest], ChunkFun, State) ->
- NewState = ChunkFun(Chunk, State),
- flush_chunks(Rest, ChunkFun, NewState).
-
-receive_unchunked_attachment(_Req, 0) ->
- ok;
-receive_unchunked_attachment(Req, Length) ->
- receive {MiddleMan, go} ->
- Data = couch_httpd:recv(Req, 0),
- MiddleMan ! {self(), Data}
- end,
- receive_unchunked_attachment(Req, Length - size(Data)).
-
-middleman(Req, chunked) ->
- % spawn a process to actually receive the uploaded data
- RcvFun = fun(ChunkRecord, ok) ->
- receive {From, go} -> From ! {self(), ChunkRecord} end, ok
- end,
- Receiver = spawn(fun() -> couch_httpd:recv_chunked(Req,4096,RcvFun,ok) end),
-
- % take requests from the DB writers and get data from the receiver
- N = erlang:list_to_integer(config:get("cluster","n")),
- Timeout = fabric_util:attachments_timeout(),
- middleman_loop(Receiver, N, [], [], Timeout);
-
-middleman(Req, Length) ->
- Receiver = spawn(fun() -> receive_unchunked_attachment(Req, Length) end),
- N = erlang:list_to_integer(config:get("cluster","n")),
- Timeout = fabric_util:attachments_timeout(),
- middleman_loop(Receiver, N, [], [], Timeout).
-
-middleman_loop(Receiver, N, Counters0, ChunkList0, Timeout) ->
- receive {From, gimme_data} ->
- % Figure out how far along this writer (From) is in the list
- ListIndex = case fabric_dict:lookup_element(From, Counters0) of
- undefined -> 0;
- I -> I
- end,
-
- % Talk to the receiver to get another chunk if necessary
- ChunkList1 = if ListIndex == length(ChunkList0) ->
- Receiver ! {self(), go},
- receive
- {Receiver, ChunkRecord} ->
- ChunkList0 ++ [ChunkRecord]
- end;
- true -> ChunkList0 end,
-
- % reply to the writer
- Reply = lists:nthtail(ListIndex, ChunkList1),
- From ! {self(), Reply},
-
- % Update the counter for this writer
- Counters1 = fabric_dict:update_counter(From, length(Reply), Counters0),
-
- % Drop any chunks that have been sent to all writers
- Size = fabric_dict:size(Counters1),
- NumToDrop = lists:min([I || {_, I} <- Counters1]),
-
- {ChunkList3, Counters3} =
- if Size == N andalso NumToDrop > 0 ->
- ChunkList2 = lists:nthtail(NumToDrop, ChunkList1),
- Counters2 = [{F, I-NumToDrop} || {F, I} <- Counters1],
- {ChunkList2, Counters2};
- true ->
- {ChunkList1, Counters1}
- end,
-
- middleman_loop(Receiver, N, Counters3, ChunkList3, Timeout)
- after Timeout ->
- exit(Receiver, kill),
- ok
- end.
diff --git a/src/fabric/src/fabric_doc_atts.erl b/src/fabric/src/fabric_doc_atts.erl
deleted file mode 100644
index a3aae80ec..000000000
--- a/src/fabric/src/fabric_doc_atts.erl
+++ /dev/null
@@ -1,170 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_atts).
-
--compile(tuple_calls).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([
- receiver/2,
- receiver_callback/2
-]).
-
-
-receiver(_Req, undefined) ->
- <<"">>;
-receiver(_Req, {unknown_transfer_encoding, Unknown}) ->
- exit({unknown_transfer_encoding, Unknown});
-receiver(Req, chunked) ->
- MiddleMan = spawn(fun() -> middleman(Req, chunked) end),
- {fabric_attachment_receiver, MiddleMan, chunked};
-receiver(_Req, 0) ->
- <<"">>;
-receiver(Req, Length) when is_integer(Length) ->
- maybe_send_continue(Req),
- Middleman = spawn(fun() -> middleman(Req, Length) end),
- {fabric_attachment_receiver, Middleman, Length};
-receiver(_Req, Length) ->
- exit({length_not_integer, Length}).
-
-
-receiver_callback(Middleman, chunked) ->
- fun(4096, ChunkFun, State) ->
- write_chunks(Middleman, ChunkFun, State)
- end;
-receiver_callback(Middleman, Length) when is_integer(Length) ->
- fun() ->
- Middleman ! {self(), gimme_data},
- Timeout = fabric_util:attachments_timeout(),
- receive
- {Middleman, Data} ->
- rexi:reply(attachment_chunk_received),
- Data
- after Timeout ->
- exit(timeout)
- end
- end.
-
-
-%%
-%% internal
-%%
-
-maybe_send_continue(#httpd{mochi_req = MochiReq} = Req) ->
- case couch_httpd:header_value(Req, "expect") of
- undefined ->
- ok;
- Expect ->
- case string:to_lower(Expect) of
- "100-continue" ->
- MochiReq:start_raw_response({100, gb_trees:empty()});
- _ ->
- ok
- end
- end.
-
-write_chunks(MiddleMan, ChunkFun, State) ->
- MiddleMan ! {self(), gimme_data},
- Timeout = fabric_util:attachments_timeout(),
- receive
- {MiddleMan, ChunkRecordList} ->
- rexi:reply(attachment_chunk_received),
- case flush_chunks(ChunkRecordList, ChunkFun, State) of
- {continue, NewState} ->
- write_chunks(MiddleMan, ChunkFun, NewState);
- {done, NewState} ->
- NewState
- end
- after Timeout ->
- exit(timeout)
- end.
-
-flush_chunks([], _ChunkFun, State) ->
- {continue, State};
-flush_chunks([{0, _}], _ChunkFun, State) ->
- {done, State};
-flush_chunks([Chunk | Rest], ChunkFun, State) ->
- NewState = ChunkFun(Chunk, State),
- flush_chunks(Rest, ChunkFun, NewState).
-
-receive_unchunked_attachment(_Req, 0) ->
- ok;
-receive_unchunked_attachment(Req, Length) ->
- receive {MiddleMan, go} ->
- Data = couch_httpd:recv(Req, 0),
- MiddleMan ! {self(), Data}
- end,
- receive_unchunked_attachment(Req, Length - size(Data)).
-
-middleman(Req, chunked) ->
- % spawn a process to actually receive the uploaded data
- RcvFun = fun(ChunkRecord, ok) ->
- receive {From, go} -> From ! {self(), ChunkRecord} end, ok
- end,
- Receiver = spawn(fun() -> couch_httpd:recv_chunked(Req,4096,RcvFun,ok) end),
-
- % take requests from the DB writers and get data from the receiver
- N = erlang:list_to_integer(config:get("cluster","n")),
- Timeout = fabric_util:attachments_timeout(),
- middleman_loop(Receiver, N, [], [], Timeout);
-
-middleman(Req, Length) ->
- Receiver = spawn(fun() -> receive_unchunked_attachment(Req, Length) end),
- N = erlang:list_to_integer(config:get("cluster","n")),
- Timeout = fabric_util:attachments_timeout(),
- middleman_loop(Receiver, N, [], [], Timeout).
-
-middleman_loop(Receiver, N, Counters0, ChunkList0, Timeout) ->
- receive {From, gimme_data} ->
- % Figure out how far along this writer (From) is in the list
- ListIndex = case fabric_dict:lookup_element(From, Counters0) of
- undefined -> 0;
- I -> I
- end,
-
- % Talk to the receiver to get another chunk if necessary
- ChunkList1 = if ListIndex == length(ChunkList0) ->
- Receiver ! {self(), go},
- receive
- {Receiver, ChunkRecord} ->
- ChunkList0 ++ [ChunkRecord]
- end;
- true -> ChunkList0 end,
-
- % reply to the writer
- Reply = lists:nthtail(ListIndex, ChunkList1),
- From ! {self(), Reply},
-
- % Update the counter for this writer
- Counters1 = fabric_dict:update_counter(From, length(Reply), Counters0),
-
- % Drop any chunks that have been sent to all writers
- Size = fabric_dict:size(Counters1),
- NumToDrop = lists:min([I || {_, I} <- Counters1]),
-
- {ChunkList3, Counters3} =
- if Size == N andalso NumToDrop > 0 ->
- ChunkList2 = lists:nthtail(NumToDrop, ChunkList1),
- Counters2 = [{F, I-NumToDrop} || {F, I} <- Counters1],
- {ChunkList2, Counters2};
- true ->
- {ChunkList1, Counters1}
- end,
-
- middleman_loop(Receiver, N, Counters3, ChunkList3, Timeout)
- after Timeout ->
- exit(Receiver, kill),
- ok
- end.
diff --git a/src/fabric/src/fabric_doc_missing_revs.erl b/src/fabric/src/fabric_doc_missing_revs.erl
deleted file mode 100644
index 993c21dc2..000000000
--- a/src/fabric/src/fabric_doc_missing_revs.erl
+++ /dev/null
@@ -1,97 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_missing_revs).
-
--export([go/2, go/3]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-go(DbName, AllIdsRevs) ->
- go(DbName, AllIdsRevs, []).
-
-go(_, [], _) ->
- {ok, []};
-go(DbName, AllIdsRevs, Options) ->
- Workers = lists:map(fun({#shard{name=Name, node=Node} = Shard, IdsRevs}) ->
- Ref = rexi:cast(Node, {fabric_rpc, get_missing_revs, [Name, IdsRevs,
- Options]}),
- Shard#shard{ref=Ref}
- end, group_idrevs_by_shard(DbName, AllIdsRevs)),
- ResultDict = dict:from_list([{Id, {{nil,Revs},[]}} || {Id, Revs} <- AllIdsRevs]),
- RexiMon = fabric_util:create_monitors(Workers),
- Acc0 = {length(Workers), ResultDict, Workers},
- try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
- {timeout, {_, _, DefunctWorkers}} ->
- fabric_util:log_timeout(
- DefunctWorkers,
- "get_missing_revs"
- ),
- {error, timeout};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, {_WorkerLen, ResultDict, Workers}) ->
- NewWorkers = [W || #shard{node=Node} = W <- Workers, Node =/= NodeRef],
- skip_message({fabric_dict:size(NewWorkers), ResultDict, NewWorkers});
-handle_message({rexi_EXIT, _}, Worker, {W, D, Workers}) ->
- skip_message({W-1,D,lists:delete(Worker, Workers)});
-handle_message({ok, Results}, _Worker, {1, D0, _}) ->
- D = update_dict(D0, Results),
- {stop, dict:fold(fun force_reply/3, [], D)};
-handle_message({ok, Results}, Worker, {WaitingCount, D0, Workers}) ->
- D = update_dict(D0, Results),
- case dict:fold(fun maybe_reply/3, {stop, []}, D) of
- continue ->
- % still haven't heard about some Ids
- {ok, {WaitingCount - 1, D, lists:delete(Worker,Workers)}};
- {stop, FinalReply} ->
- % finished, stop the rest of the jobs
- fabric_util:cleanup(lists:delete(Worker,Workers)),
- {stop, FinalReply}
- end.
-
-force_reply(Id, {{nil,Revs}, Anc}, Acc) ->
- % never heard about this ID, assume it's missing
- [{Id, Revs, Anc} | Acc];
-force_reply(_, {[], _}, Acc) ->
- Acc;
-force_reply(Id, {Revs, Anc}, Acc) ->
- [{Id, Revs, Anc} | Acc].
-
-maybe_reply(_, _, continue) ->
- continue;
-maybe_reply(_, {{nil, _}, _}, _) ->
- continue;
-maybe_reply(_, {[], _}, {stop, Acc}) ->
- {stop, Acc};
-maybe_reply(Id, {Revs, Anc}, {stop, Acc}) ->
- {stop, [{Id, Revs, Anc} | Acc]}.
-
-group_idrevs_by_shard(DbName, IdsRevs) ->
- dict:to_list(lists:foldl(fun({Id, Revs}, D0) ->
- lists:foldl(fun(Shard, D1) ->
- dict:append(Shard, {Id, Revs}, D1)
- end, D0, mem3:shards(DbName,Id))
- end, dict:new(), IdsRevs)).
-
-update_dict(D0, KVs) ->
- lists:foldl(fun({K,V,A}, D1) -> dict:store(K, {V,A}, D1) end, D0, KVs).
-
-skip_message({0, Dict, _Workers}) ->
- {stop, dict:fold(fun force_reply/3, [], Dict)};
-skip_message(Acc) ->
- {ok, Acc}.
diff --git a/src/fabric/src/fabric_doc_open.erl b/src/fabric/src/fabric_doc_open.erl
deleted file mode 100644
index fe3a79a1f..000000000
--- a/src/fabric/src/fabric_doc_open.erl
+++ /dev/null
@@ -1,610 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_open).
-
--export([go/3]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--record(acc, {
- dbname,
- workers,
- r,
- state,
- replies,
- node_revs = [],
- q_reply
-}).
-
-
-go(DbName, Id, Options) ->
- Handler = case proplists:get_value(doc_info, Options) of
- true -> get_doc_info;
- full -> get_full_doc_info;
- undefined -> open_doc
- end,
- Workers = fabric_util:submit_jobs(mem3:shards(DbName,Id), Handler,
- [Id, [deleted|Options]]),
- SuppressDeletedDoc = not lists:member(deleted, Options),
- N = mem3:n(DbName),
- R = couch_util:get_value(r, Options, integer_to_list(mem3:quorum(DbName))),
- Acc0 = #acc{
- dbname = DbName,
- workers = Workers,
- r = erlang:min(N, list_to_integer(R)),
- state = r_not_met,
- replies = []
- },
- RexiMon = fabric_util:create_monitors(Workers),
- try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
- {ok, #acc{}=Acc} when Handler =:= open_doc ->
- Reply = handle_response(Acc),
- format_reply(Reply, SuppressDeletedDoc);
- {ok, #acc{state = r_not_met}} ->
- {error, quorum_not_met};
- {ok, #acc{q_reply = QuorumReply}} ->
- format_reply(QuorumReply, SuppressDeletedDoc);
- {timeout, #acc{workers=DefunctWorkers}} ->
- fabric_util:log_timeout(DefunctWorkers, atom_to_list(Handler)),
- {error, timeout};
- Error ->
- Error
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_, Node}, _}, _Worker, Acc) ->
- NewWorkers = [W || #shard{node=N}=W <- Acc#acc.workers, N /= Node],
- case NewWorkers of
- [] ->
- {stop, Acc#acc{workers=[]}};
- _ ->
- {ok, Acc#acc{workers=NewWorkers}}
- end;
-handle_message({rexi_EXIT, _Reason}, Worker, Acc) ->
- NewWorkers = lists:delete(Worker, Acc#acc.workers),
- case NewWorkers of
- [] ->
- {stop, Acc#acc{workers=[]}};
- _ ->
- {ok, Acc#acc{workers=NewWorkers}}
- end;
-handle_message(Reply, Worker, Acc) ->
- NewReplies = fabric_util:update_counter(Reply, 1, Acc#acc.replies),
- NewNodeRevs = case Reply of
- {ok, #doc{revs = {Pos, [Rev | _]}}} ->
- [{Worker#shard.node, [{Pos, Rev}]} | Acc#acc.node_revs];
- _ ->
- Acc#acc.node_revs
- end,
- NewAcc = Acc#acc{replies = NewReplies, node_revs = NewNodeRevs},
- case is_r_met(Acc#acc.workers, NewReplies, Acc#acc.r) of
- {true, QuorumReply} ->
- fabric_util:cleanup(lists:delete(Worker, Acc#acc.workers)),
- {stop, NewAcc#acc{workers=[], state=r_met, q_reply=QuorumReply}};
- wait_for_more ->
- NewWorkers = lists:delete(Worker, Acc#acc.workers),
- {ok, NewAcc#acc{workers=NewWorkers}};
- no_more_workers ->
- {stop, NewAcc#acc{workers=[]}}
- end.
-
-handle_response(#acc{state=r_met, replies=Replies, q_reply=QuorumReply}=Acc) ->
- case {Replies, fabric_util:remove_ancestors(Replies, [])} of
- {[_], [_]} ->
- % Complete agreement amongst all copies
- QuorumReply;
- {[_|_], [{_, {QuorumReply, _}}]} ->
- % Any divergent replies are ancestors of the QuorumReply,
- % repair the document asynchronously
- spawn(fun() -> read_repair(Acc) end),
- QuorumReply;
- _Else ->
- % real disagreement amongst the workers, block for the repair
- read_repair(Acc)
- end;
-handle_response(Acc) ->
- read_repair(Acc).
-
-is_r_met(Workers, Replies, R) ->
- case lists:dropwhile(fun({_,{_, Count}}) -> Count < R end, Replies) of
- [{_,{QuorumReply, _}} | _] ->
- {true, QuorumReply};
- [] when length(Workers) > 1 ->
- wait_for_more;
- [] ->
- no_more_workers
- end.
-
-read_repair(#acc{dbname=DbName, replies=Replies, node_revs=NodeRevs}) ->
- Docs = [Doc || {_, {{ok, #doc{}=Doc}, _}} <- Replies],
- case Docs of
- % omit local docs from read repair
- [#doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>} | _] ->
- choose_reply(Docs);
- [#doc{id=Id} | _] ->
- Opts = [?ADMIN_CTX, replicated_changes, {read_repair, NodeRevs}],
- Res = fabric:update_docs(DbName, Docs, Opts),
- case Res of
- {ok, []} ->
- couch_stats:increment_counter([fabric, read_repairs, success]);
- _ ->
- couch_stats:increment_counter([fabric, read_repairs, failure]),
- couch_log:notice("read_repair ~s ~s ~p", [DbName, Id, Res])
- end,
- choose_reply(Docs);
- [] ->
- % Try hard to return some sort of information
- % to the client.
- Values = [V || {_, {V, _}} <- Replies],
- case lists:member({not_found, missing}, Values) of
- true ->
- {not_found, missing};
- false when length(Values) > 0 ->
- % Sort for stability in responses in
- % case we have some weird condition
- hd(lists:sort(Values));
- false ->
- {error, read_failure}
- end
- end.
-
-choose_reply(Docs) ->
- % Sort descending by {not deleted, rev}. This should match
- % the logic of couch_doc:to_doc_info/1.
- [Winner | _] = lists:sort(fun(DocA, DocB) ->
- InfoA = {not DocA#doc.deleted, DocA#doc.revs},
- InfoB = {not DocB#doc.deleted, DocB#doc.revs},
- InfoA > InfoB
- end, Docs),
- {ok, Winner}.
-
-format_reply({ok, #full_doc_info{deleted=true}}, true) ->
- {not_found, deleted};
-format_reply({ok, #doc{deleted=true}}, true) ->
- {not_found, deleted};
-format_reply(not_found, _) ->
- {not_found, missing};
-format_reply(Else, _) ->
- Else.
-
-
-%% -ifdef(TEST).
-%% -include_lib("eunit/include/eunit.hrl").
-%%
-%% -define(MECK_MODS, [
-%% couch_log,
-%% couch_stats,
-%% fabric,
-%% fabric_util,
-%% mem3,
-%% rexi,
-%% rexi_monitor
-%% ]).
-%%
-%%
-%% setup_all() ->
-%% meck:new(?MECK_MODS, [passthrough]).
-%%
-%%
-%% teardown_all(_) ->
-%% meck:unload().
-%%
-%%
-%% setup() ->
-%% meck:reset(?MECK_MODS).
-%%
-%%
-%% teardown(_) ->
-%% ok.
-%%
-%%
-%% open_doc_test_() ->
-%% {
-%% setup,
-%% fun setup_all/0,
-%% fun teardown_all/1,
-%% {
-%% foreach,
-%% fun setup/0,
-%% fun teardown/1,
-%% [
-%% t_is_r_met(),
-%% t_handle_message_down(),
-%% t_handle_message_exit(),
-%% t_handle_message_reply(),
-%% t_store_node_revs(),
-%% t_read_repair(),
-%% t_handle_response_quorum_met(),
-%% t_get_doc_info()
-%% ]
-%% }
-%% }.
-%%
-%%
-%% t_is_r_met() ->
-%% ?_test(begin
-%% Workers0 = [],
-%% Workers1 = [nil],
-%% Workers2 = [nil, nil],
-%%
-%% SuccessCases = [
-%% {{true, foo}, [fabric_util:kv(foo, 2)], 2},
-%% {{true, foo}, [fabric_util:kv(foo, 3)], 2},
-%% {{true, foo}, [fabric_util:kv(foo, 1)], 1},
-%% {{true, foo}, [fabric_util:kv(foo, 2), fabric_util:kv(bar, 1)], 2},
-%% {{true, bar}, [fabric_util:kv(bar, 1), fabric_util:kv(bar, 2)], 2},
-%% {{true, bar}, [fabric_util:kv(bar, 2), fabric_util:kv(foo, 1)], 2}
-%% ],
-%% lists:foreach(fun({Expect, Replies, Q}) ->
-%% ?assertEqual(Expect, is_r_met(Workers0, Replies, Q))
-%% end, SuccessCases),
-%%
-%% WaitForMoreCases = [
-%% {[fabric_util:kv(foo, 1)], 2},
-%% {[fabric_util:kv(foo, 2)], 3},
-%% {[fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2}
-%% ],
-%% lists:foreach(fun({Replies, Q}) ->
-%% ?assertEqual(wait_for_more, is_r_met(Workers2, Replies, Q))
-%% end, WaitForMoreCases),
-%%
-%% FailureCases = [
-%% {Workers0, [fabric_util:kv(foo, 1)], 2},
-%% {Workers1, [fabric_util:kv(foo, 1)], 2},
-%% {Workers1, [fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2},
-%% {Workers1, [fabric_util:kv(foo, 2)], 3}
-%% ],
-%% lists:foreach(fun({Workers, Replies, Q}) ->
-%% ?assertEqual(no_more_workers, is_r_met(Workers, Replies, Q))
-%% end, FailureCases)
-%% end).
-%%
-%%
-%% t_handle_message_down() ->
-%% Node0 = 'foo@localhost',
-%% Node1 = 'bar@localhost',
-%% Down0 = {rexi_DOWN, nil, {nil, Node0}, nil},
-%% Down1 = {rexi_DOWN, nil, {nil, Node1}, nil},
-%% Workers0 = [#shard{node=Node0} || _ <- [a, b]],
-%% Worker1 = #shard{node=Node1},
-%% Workers1 = Workers0 ++ [Worker1],
-%%
-%% ?_test(begin
-%% % Stop when no more workers are left
-%% ?assertEqual(
-%% {stop, #acc{workers=[]}},
-%% handle_message(Down0, nil, #acc{workers=Workers0})
-%% ),
-%%
-%% % Continue when we have more workers
-%% ?assertEqual(
-%% {ok, #acc{workers=[Worker1]}},
-%% handle_message(Down0, nil, #acc{workers=Workers1})
-%% ),
-%%
-%% % A second DOWN removes the remaining workers
-%% ?assertEqual(
-%% {stop, #acc{workers=[]}},
-%% handle_message(Down1, nil, #acc{workers=[Worker1]})
-%% )
-%% end).
-%%
-%%
-%% t_handle_message_exit() ->
-%% Exit = {rexi_EXIT, nil},
-%% Worker0 = #shard{ref=erlang:make_ref()},
-%% Worker1 = #shard{ref=erlang:make_ref()},
-%%
-%% ?_test(begin
-%% % Only removes the specified worker
-%% ?assertEqual(
-%% {ok, #acc{workers=[Worker1]}},
-%% handle_message(Exit, Worker0, #acc{workers=[Worker0, Worker1]})
-%% ),
-%%
-%% ?assertEqual(
-%% {ok, #acc{workers=[Worker0]}},
-%% handle_message(Exit, Worker1, #acc{workers=[Worker0, Worker1]})
-%% ),
-%%
-%% % We bail if it was the last worker
-%% ?assertEqual(
-%% {stop, #acc{workers=[]}},
-%% handle_message(Exit, Worker0, #acc{workers=[Worker0]})
-%% )
-%% end).
-%%
-%%
-%% t_handle_message_reply() ->
-%% Worker0 = #shard{ref=erlang:make_ref()},
-%% Worker1 = #shard{ref=erlang:make_ref()},
-%% Worker2 = #shard{ref=erlang:make_ref()},
-%% Workers = [Worker0, Worker1, Worker2],
-%% Acc0 = #acc{workers=Workers, r=2, replies=[]},
-%%
-%% ?_test(begin
-%% meck:expect(rexi, kill_all, fun(_) -> ok end),
-%%
-%% % Test that we continue when we haven't met R yet
-%% ?assertMatch(
-%% {ok, #acc{
-%% workers=[Worker0, Worker1],
-%% replies=[{foo, {foo, 1}}]
-%% }},
-%% handle_message(foo, Worker2, Acc0)
-%% ),
-%%
-%% ?assertMatch(
-%% {ok, #acc{
-%% workers=[Worker0, Worker1],
-%% replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
-%% }},
-%% handle_message(bar, Worker2, Acc0#acc{
-%% replies=[{foo, {foo, 1}}]
-%% })
-%% ),
-%%
-%% % Test that we don't get a quorum when R isn't met. q_reply
-%% % isn't set and state remains unchanged and {stop, NewAcc}
-%% % is returned. Bit subtle on the assertions here.
-%%
-%% ?assertMatch(
-%% {stop, #acc{workers=[], replies=[{foo, {foo, 1}}]}},
-%% handle_message(foo, Worker0, Acc0#acc{workers=[Worker0]})
-%% ),
-%%
-%% ?assertMatch(
-%% {stop, #acc{
-%% workers=[],
-%% replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
-%% }},
-%% handle_message(bar, Worker0, Acc0#acc{
-%% workers=[Worker0],
-%% replies=[{foo, {foo, 1}}]
-%% })
-%% ),
-%%
-%% % Check that when R is met we stop with a new state and
-%% % a q_reply.
-%%
-%% ?assertMatch(
-%% {stop, #acc{
-%% workers=[],
-%% replies=[{foo, {foo, 2}}],
-%% state=r_met,
-%% q_reply=foo
-%% }},
-%% handle_message(foo, Worker1, Acc0#acc{
-%% workers=[Worker0, Worker1],
-%% replies=[{foo, {foo, 1}}]
-%% })
-%% ),
-%%
-%% ?assertEqual(
-%% {stop, #acc{
-%% workers=[],
-%% r=1,
-%% replies=[{foo, {foo, 1}}],
-%% state=r_met,
-%% q_reply=foo
-%% }},
-%% handle_message(foo, Worker0, Acc0#acc{r=1})
-%% ),
-%%
-%% ?assertMatch(
-%% {stop, #acc{
-%% workers=[],
-%% replies=[{bar, {bar, 1}}, {foo, {foo, 2}}],
-%% state=r_met,
-%% q_reply=foo
-%% }},
-%% handle_message(foo, Worker0, Acc0#acc{
-%% workers=[Worker0],
-%% replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
-%% })
-%% )
-%% end).
-%%
-%%
-%% t_store_node_revs() ->
-%% W1 = #shard{node = w1, ref = erlang:make_ref()},
-%% W2 = #shard{node = w2, ref = erlang:make_ref()},
-%% W3 = #shard{node = w3, ref = erlang:make_ref()},
-%% Foo1 = {ok, #doc{id = <<"bar">>, revs = {1, [<<"foo">>]}}},
-%% Foo2 = {ok, #doc{id = <<"bar">>, revs = {2, [<<"foo2">>, <<"foo">>]}}},
-%% NFM = {not_found, missing},
-%%
-%% InitAcc = #acc{workers = [W1, W2, W3], replies = [], r = 2},
-%%
-%% ?_test(begin
-%% meck:expect(rexi, kill_all, fun(_) -> ok end),
-%%
-%% % Simple case
-%% {ok, #acc{node_revs = NodeRevs1}} = handle_message(Foo1, W1, InitAcc),
-%% ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs1),
-%%
-%% % Make sure we only hold the head rev
-%% {ok, #acc{node_revs = NodeRevs2}} = handle_message(Foo2, W1, InitAcc),
-%% ?assertEqual([{w1, [{2, <<"foo2">>}]}], NodeRevs2),
-%%
-%% % Make sure we don't capture anything on error
-%% {ok, #acc{node_revs = NodeRevs3}} = handle_message(NFM, W1, InitAcc),
-%% ?assertEqual([], NodeRevs3),
-%%
-%% % Make sure we accumulate node revs
-%% Acc1 = InitAcc#acc{node_revs = [{w1, [{1, <<"foo">>}]}]},
-%% {ok, #acc{node_revs = NodeRevs4}} = handle_message(Foo2, W2, Acc1),
-%% ?assertEqual(
-%% [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
-%% NodeRevs4
-%% ),
-%%
-%% % Make sure rexi_DOWN doesn't modify node_revs
-%% Down = {rexi_DOWN, nil, {nil, w1}, nil},
-%% {ok, #acc{node_revs = NodeRevs5}} = handle_message(Down, W2, Acc1),
-%% ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs5),
-%%
-%% % Make sure rexi_EXIT doesn't modify node_revs
-%% Exit = {rexi_EXIT, reason},
-%% {ok, #acc{node_revs = NodeRevs6}} = handle_message(Exit, W2, Acc1),
-%% ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs6),
-%%
-%% % Make sure an error doesn't remove any node revs
-%% {ok, #acc{node_revs = NodeRevs7}} = handle_message(NFM, W2, Acc1),
-%% ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs7),
-%%
-%% % Make sure we have all of our node_revs when meeting
-%% % quorum
-%% {ok, Acc2} = handle_message(Foo1, W1, InitAcc),
-%% {ok, Acc3} = handle_message(Foo2, W2, Acc2),
-%% {stop, Acc4} = handle_message(NFM, W3, Acc3),
-%% ?assertEqual(
-%% [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
-%% Acc4#acc.node_revs
-%% )
-%% end).
-%%
-%%
-%% t_read_repair() ->
-%% Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
-%% Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
-%% NFM = {not_found, missing},
-%%
-%% ?_test(begin
-%% meck:expect(couch_log, notice, fun(_, _) -> ok end),
-%% meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-%%
-%% % Test when we have actual doc data to repair
-%% meck:expect(fabric, update_docs, fun(_, [_], _) -> {ok, []} end),
-%% Acc0 = #acc{
-%% dbname = <<"name">>,
-%% replies = [fabric_util:kv(Foo1,1)]
-%% },
-%% ?assertEqual(Foo1, read_repair(Acc0)),
-%%
-%% meck:expect(fabric, update_docs, fun(_, [_, _], _) -> {ok, []} end),
-%% Acc1 = #acc{
-%% dbname = <<"name">>,
-%% replies = [fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,1)]
-%% },
-%% ?assertEqual(Foo2, read_repair(Acc1)),
-%%
-%% % Test when we have nothing but errors
-%% Acc2 = #acc{replies=[fabric_util:kv(NFM, 1)]},
-%% ?assertEqual(NFM, read_repair(Acc2)),
-%%
-%% Acc3 = #acc{replies=[fabric_util:kv(NFM,1), fabric_util:kv(foo,2)]},
-%% ?assertEqual(NFM, read_repair(Acc3)),
-%%
-%% Acc4 = #acc{replies=[fabric_util:kv(foo,1), fabric_util:kv(bar,1)]},
-%% ?assertEqual(bar, read_repair(Acc4))
-%% end).
-%%
-%%
-%% t_handle_response_quorum_met() ->
-%% Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
-%% Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
-%% Bar1 = {ok, #doc{revs = {1,[<<"bar">>]}}},
-%%
-%% ?_test(begin
-%% meck:expect(couch_log, notice, fun(_, _) -> ok end),
-%% meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
-%% meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-%%
-%% BasicOkAcc = #acc{
-%% state=r_met,
-%% replies=[fabric_util:kv(Foo1,2)],
-%% q_reply=Foo1
-%% },
-%% ?assertEqual(Foo1, handle_response(BasicOkAcc)),
-%%
-%% WithAncestorsAcc = #acc{
-%% state=r_met,
-%% replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,2)],
-%% q_reply=Foo2
-%% },
-%% ?assertEqual(Foo2, handle_response(WithAncestorsAcc)),
-%%
-%% % This also checks when the quorum isn't the most recent
-%% % revision.
-%% DeeperWinsAcc = #acc{
-%% state=r_met,
-%% replies=[fabric_util:kv(Foo1,2), fabric_util:kv(Foo2,1)],
-%% q_reply=Foo1
-%% },
-%% ?assertEqual(Foo2, handle_response(DeeperWinsAcc)),
-%%
-%% % Check that we return the proper doc based on rev
-%% % (ie, pos is equal)
-%% BiggerRevWinsAcc = #acc{
-%% state=r_met,
-%% replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Bar1,2)],
-%% q_reply=Bar1
-%% },
-%% ?assertEqual(Foo1, handle_response(BiggerRevWinsAcc))
-%%
-%% % r_not_met is a proxy to read_repair so we rely on
-%% % read_repair_test for those conditions.
-%% end).
-%%
-%%
-%% t_get_doc_info() ->
-%% ?_test(begin
-%% meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
-%% meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-%% meck:expect(fabric_util, submit_jobs, fun(_, _, _) -> ok end),
-%% meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
-%% meck:expect(rexi_monitor, stop, fun(_) -> ok end),
-%% meck:expect(mem3, shards, fun(_, _) -> ok end),
-%% meck:expect(mem3, n, fun(_) -> 3 end),
-%% meck:expect(mem3, quorum, fun(_) -> 2 end),
-%%
-%% meck:expect(fabric_util, recv, fun(_, _, _, _) ->
-%% {ok, #acc{state = r_not_met}}
-%% end),
-%% Rsp1 = fabric_doc_open:go("test", "one", [doc_info]),
-%% ?assertEqual({error, quorum_not_met}, Rsp1),
-%%
-%% Rsp2 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
-%% ?assertEqual({error, quorum_not_met}, Rsp2),
-%%
-%% meck:expect(fabric_util, recv, fun(_, _, _, _) ->
-%% {ok, #acc{state = r_met, q_reply = not_found}}
-%% end),
-%% MissingRsp1 = fabric_doc_open:go("test", "one", [doc_info]),
-%% ?assertEqual({not_found, missing}, MissingRsp1),
-%% MissingRsp2 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
-%% ?assertEqual({not_found, missing}, MissingRsp2),
-%%
-%% meck:expect(fabric_util, recv, fun(_, _, _, _) ->
-%% A = #doc_info{},
-%% {ok, #acc{state = r_met, q_reply = {ok, A}}}
-%% end),
-%% {ok, Rec1} = fabric_doc_open:go("test", "one", [doc_info]),
-%% ?assert(is_record(Rec1, doc_info)),
-%%
-%% meck:expect(fabric_util, recv, fun(_, _, _, _) ->
-%% A = #full_doc_info{deleted = true},
-%% {ok, #acc{state = r_met, q_reply = {ok, A}}}
-%% end),
-%% Rsp3 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
-%% ?assertEqual({not_found, deleted}, Rsp3),
-%% {ok, Rec2} = fabric_doc_open:go("test", "one", [{doc_info, full},deleted]),
-%% ?assert(is_record(Rec2, full_doc_info))
-%% end).
-%%
-%% -endif.
diff --git a/src/fabric/src/fabric_doc_open_revs.erl b/src/fabric/src/fabric_doc_open_revs.erl
deleted file mode 100644
index aa7f53e9b..000000000
--- a/src/fabric/src/fabric_doc_open_revs.erl
+++ /dev/null
@@ -1,799 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_open_revs).
-
--export([go/4]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--record(state, {
- dbname,
- worker_count,
- workers,
- reply_count = 0,
- reply_error_count = 0,
- r,
- revs,
- latest,
- replies = [],
- node_revs = [],
- repair = false
-}).
-
-go(DbName, Id, Revs, Options) ->
- Workers = fabric_util:submit_jobs(mem3:shards(DbName,Id), open_revs,
- [Id, Revs, Options]),
- R = couch_util:get_value(r, Options, integer_to_list(mem3:quorum(DbName))),
- State = #state{
- dbname = DbName,
- worker_count = length(Workers),
- workers = Workers,
- r = list_to_integer(R),
- revs = Revs,
- latest = lists:member(latest, Options),
- replies = []
- },
- RexiMon = fabric_util:create_monitors(Workers),
- try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, State) of
- {ok, all_workers_died} ->
- {error, all_workers_died};
- {ok, Replies} ->
- {ok, Replies};
- {timeout, #state{workers=DefunctWorkers}} ->
- fabric_util:log_timeout(DefunctWorkers, "open_revs"),
- {error, timeout};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Worker, #state{workers=Workers}=State) ->
- NewState = State#state{
- workers = lists:keydelete(NodeRef, #shard.node, Workers),
- reply_error_count = State#state.reply_error_count + 1
- },
- handle_message({ok, []}, nil, NewState);
-
-handle_message({rexi_EXIT, _}, Worker, #state{workers=Workers}=State) ->
- NewState = State#state{
- workers = lists:delete(Worker, Workers),
- reply_error_count = State#state.reply_error_count + 1
- },
- handle_message({ok, []}, nil, NewState);
-
-handle_message({ok, RawReplies}, Worker, State) ->
- #state{
- dbname = DbName,
- reply_count = ReplyCount,
- worker_count = WorkerCount,
- workers = Workers,
- replies = PrevReplies,
- node_revs = PrevNodeRevs,
- r = R,
- revs = Revs,
- latest = Latest,
- repair = InRepair,
- reply_error_count = ReplyErrorCount
- } = State,
-
- IsTree = Revs == all orelse Latest,
-
- % Do not count error replies when checking quorum
- RealReplyCount = ReplyCount + 1 - ReplyErrorCount,
- QuorumReplies = RealReplyCount >= R,
- {NewReplies, QuorumMet, Repair} = case IsTree of
- true ->
- {NewReplies0, AllInternal, Repair0} =
- tree_replies(PrevReplies, tree_sort(RawReplies)),
- NumLeafs = couch_key_tree:count_leafs(PrevReplies),
- SameNumRevs = length(RawReplies) == NumLeafs,
- QMet = AllInternal andalso SameNumRevs andalso QuorumReplies,
- % Don't set repair=true on the first reply
- {NewReplies0, QMet, (ReplyCount > 0) and Repair0};
- false ->
- {NewReplies0, MinCount} = dict_replies(PrevReplies, RawReplies),
- {NewReplies0, MinCount >= R, false}
- end,
- NewNodeRevs = if Worker == nil -> PrevNodeRevs; true ->
- IdRevs = lists:foldl(fun
- ({ok, #doc{revs = {Pos, [Rev | _]}}}, Acc) ->
- [{Pos, Rev} | Acc];
- (_, Acc) ->
- Acc
- end, [], RawReplies),
- if IdRevs == [] -> PrevNodeRevs; true ->
- [{Worker#shard.node, IdRevs} | PrevNodeRevs]
- end
- end,
-
- Complete = (ReplyCount =:= (WorkerCount - 1)),
-
- case QuorumMet orelse Complete of
- true ->
- fabric_util:cleanup(lists:delete(Worker, Workers)),
- maybe_read_repair(
- DbName,
- IsTree,
- NewReplies,
- NewNodeRevs,
- ReplyCount + 1,
- InRepair orelse Repair
- ),
- {stop, format_reply(IsTree, NewReplies, RealReplyCount)};
- false ->
- {ok, State#state{
- replies = NewReplies,
- node_revs = NewNodeRevs,
- reply_count = ReplyCount + 1,
- workers = lists:delete(Worker, Workers),
- repair = InRepair orelse Repair
- }}
- end.
-
-
-tree_replies(RevTree, []) ->
- {RevTree, true, false};
-
-tree_replies(RevTree0, [{ok, Doc} | Rest]) ->
- {RevTree1, Done, Repair} = tree_replies(RevTree0, Rest),
- Path = couch_doc:to_path(Doc),
- case couch_key_tree:merge(RevTree1, Path) of
- {RevTree2, internal_node} ->
- {RevTree2, Done, Repair};
- {RevTree2, new_leaf} ->
- {RevTree2, Done, true};
- {RevTree2, _} ->
- {RevTree2, false, true}
- end;
-
-tree_replies(RevTree0, [{{not_found, missing}, {Pos, Rev}} | Rest]) ->
- {RevTree1, Done, Repair} = tree_replies(RevTree0, Rest),
- Node = {Rev, ?REV_MISSING, []},
- Path = {Pos, Node},
- case couch_key_tree:merge(RevTree1, Path) of
- {RevTree2, internal_node} ->
- {RevTree2, Done, true};
- {RevTree2, _} ->
- {RevTree2, false, Repair}
- end.
-
-
-tree_sort(Replies) ->
- SortFun = fun(A, B) -> sort_key(A) =< sort_key(B) end,
- lists:sort(SortFun, Replies).
-
-
-sort_key({ok, #doc{revs = {Pos, [Rev | _]}}}) ->
- {Pos, Rev};
-sort_key({{not_found, _}, {Pos, Rev}}) ->
- {Pos, Rev}.
-
-
-dict_replies(Dict, []) ->
- case [Count || {_Key, {_Reply, Count}} <- Dict] of
- [] -> {Dict, 0};
- Counts -> {Dict, lists:min(Counts)}
- end;
-
-dict_replies(Dict, [Reply | Rest]) ->
- NewDict = fabric_util:update_counter(Reply, 1, Dict),
- dict_replies(NewDict, Rest).
-
-
-maybe_read_repair(Db, IsTree, Replies, NodeRevs, ReplyCount, DoRepair) ->
- Docs = case IsTree of
- true -> tree_repair_docs(Replies, DoRepair);
- false -> dict_repair_docs(Replies, ReplyCount)
- end,
- case Docs of
- [] ->
- ok;
- _ ->
- erlang:spawn(fun() -> read_repair(Db, Docs, NodeRevs) end)
- end.
-
-
-tree_repair_docs(_Replies, false) ->
- [];
-
-tree_repair_docs(Replies, true) ->
- Leafs = couch_key_tree:get_all_leafs(Replies),
- [Doc || {Doc, {_Pos, _}} <- Leafs, is_record(Doc, doc)].
-
-
-dict_repair_docs(Replies, ReplyCount) ->
- NeedsRepair = lists:any(fun({_, {_, C}}) -> C < ReplyCount end, Replies),
- if not NeedsRepair -> []; true ->
- [Doc || {_, {{ok, Doc}, _}} <- Replies]
- end.
-
-
-read_repair(Db, Docs, NodeRevs) ->
- Opts = [?ADMIN_CTX, replicated_changes, {read_repair, NodeRevs}],
- Res = fabric:update_docs(Db, Docs, Opts),
- case Res of
- {ok, []} ->
- couch_stats:increment_counter([fabric, read_repairs, success]);
- _ ->
- couch_stats:increment_counter([fabric, read_repairs, failure]),
- [#doc{id = Id} | _] = Docs,
- couch_log:notice("read_repair ~s ~s ~p", [Db, Id, Res])
- end.
-
-
-format_reply(_, _, RealReplyCount) when RealReplyCount =< 0 ->
- all_workers_died;
-
-format_reply(true, Replies, _) ->
- tree_format_replies(Replies);
-
-format_reply(false, Replies, _) ->
- dict_format_replies(Replies).
-
-
-tree_format_replies(RevTree) ->
- Leafs = couch_key_tree:get_all_leafs(RevTree),
- lists:sort(lists:map(fun(Reply) ->
- case Reply of
- {?REV_MISSING, {Pos, [Rev]}} ->
- {{not_found, missing}, {Pos, Rev}};
- {Doc, _} when is_record(Doc, doc) ->
- {ok, Doc}
- end
- end, Leafs)).
-
-
-dict_format_replies(Dict) ->
- Replies0 = [Reply || {_, {Reply, _}} <- Dict],
-
- AllFoundRevs = lists:foldl(fun(Reply, Acc) ->
- case Reply of
- {ok, #doc{revs = {Pos, [RevId | _]}}} ->
- [{Pos, RevId} | Acc];
- _ ->
- Acc
- end
- end, [], Replies0),
-
- %% Drop any not_found replies for which we
- %% found the revision on a different node.
- Replies1 = lists:filter(fun(Reply) ->
- case Reply of
- {{not_found, missing}, Rev} ->
- not lists:member(Rev, AllFoundRevs);
- _ ->
- true
- end
- end, Replies0),
-
- % Remove replies with shorter revision
- % paths for a given revision.
- collapse_duplicate_revs(Replies1).
-
-
-collapse_duplicate_revs(Replies) ->
- % The collapse logic requires that replies are
- % sorted so that shorter rev paths are in
- % the list just before longer lists.
- %
- % This somewhat implicitly relies on Erlang's
- % sorting of [A, B] < [A, B, C] for all values
- % of C.
- collapse_duplicate_revs_int(lists:sort(Replies)).
-
-
-collapse_duplicate_revs_int([]) ->
- [];
-
-collapse_duplicate_revs_int([{ok, Doc1}, {ok, Doc2} | Rest]) ->
- {D1, R1} = Doc1#doc.revs,
- {D2, R2} = Doc2#doc.revs,
- Head = case D1 == D2 andalso lists:prefix(R1, R2) of
- true -> [];
- false -> [{ok, Doc1}]
- end,
- Head ++ collapse_duplicate_revs([{ok, Doc2} | Rest]);
-
-collapse_duplicate_revs_int([Reply | Rest]) ->
- [Reply | collapse_duplicate_revs(Rest)].
-
-
-%% -ifdef(TEST).
-%% -include_lib("eunit/include/eunit.hrl").
-%%
-%%
-%% setup_all() ->
-%% config:start_link([]),
-%% meck:new([fabric, couch_stats, couch_log]),
-%% meck:new(fabric_util, [passthrough]),
-%% meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, nil} end),
-%% meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-%% meck:expect(couch_log, notice, fun(_, _) -> ok end),
-%% meck:expect(fabric_util, cleanup, fun(_) -> ok end).
-%%
-%%
-%%
-%% teardown_all(_) ->
-%% meck:unload(),
-%% config:stop().
-%%
-%%
-%% setup() ->
-%% meck:reset([
-%% couch_log,
-%% couch_stats,
-%% fabric,
-%% fabric_util
-%% ]).
-%%
-%%
-%% teardown(_) ->
-%% ok.
-%%
-%%
-%% state0(Revs, Latest) ->
-%% #state{
-%% worker_count = 3,
-%% workers =
-%% [#shard{node='node1'}, #shard{node='node2'}, #shard{node='node3'}],
-%% r = 2,
-%% revs = Revs,
-%% latest = Latest
-%% }.
-%%
-%%
-%% revs() -> [{1,<<"foo">>}, {1,<<"bar">>}, {1,<<"baz">>}].
-%%
-%%
-%% foo1() -> {ok, #doc{revs = {1, [<<"foo">>]}}}.
-%% foo2() -> {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}}.
-%% foo2stemmed() -> {ok, #doc{revs = {2, [<<"foo2">>]}}}.
-%% fooNF() -> {{not_found, missing}, {1,<<"foo">>}}.
-%% foo2NF() -> {{not_found, missing}, {2, <<"foo2">>}}.
-%% bar1() -> {ok, #doc{revs = {1, [<<"bar">>]}}}.
-%% barNF() -> {{not_found, missing}, {1,<<"bar">>}}.
-%% bazNF() -> {{not_found, missing}, {1,<<"baz">>}}.
-%% baz1() -> {ok, #doc{revs = {1, [<<"baz">>]}}}.
-%%
-%%
-%%
-%% open_doc_revs_test_() ->
-%% {
-%% setup,
-%% fun setup_all/0,
-%% fun teardown_all/1,
-%% {
-%% foreach,
-%% fun setup/0,
-%% fun teardown/1,
-%% [
-%% check_empty_response_not_quorum(),
-%% check_basic_response(),
-%% check_finish_quorum(),
-%% check_finish_quorum_newer(),
-%% check_no_quorum_on_second(),
-%% check_done_on_third(),
-%% check_specific_revs_first_msg(),
-%% check_revs_done_on_agreement(),
-%% check_latest_true(),
-%% check_ancestor_counted_in_quorum(),
-%% check_not_found_counts_for_descendant(),
-%% check_worker_error_skipped(),
-%% check_quorum_only_counts_valid_responses(),
-%% check_empty_list_when_no_workers_reply(),
-%% check_node_rev_stored(),
-%% check_node_rev_store_head_only(),
-%% check_node_rev_store_multiple(),
-%% check_node_rev_dont_store_errors(),
-%% check_node_rev_store_non_errors(),
-%% check_node_rev_store_concatenate(),
-%% check_node_rev_store_concantenate_multiple(),
-%% check_node_rev_unmodified_on_down_or_exit(),
-%% check_not_found_replies_are_removed_when_doc_found(),
-%% check_not_found_returned_when_one_of_docs_not_found(),
-%% check_not_found_returned_when_doc_not_found(),
-%% check_longer_rev_list_returned(),
-%% check_longer_rev_list_not_combined(),
-%% check_not_found_removed_and_longer_rev_list()
-%% ]
-%% }
-%% }.
-%%
-%%
-%% % Tests for revs=all
-%%
-%%
-%% check_empty_response_not_quorum() ->
-%% % Simple smoke test that we don't think we're
-%% % done with a first empty response
-%% W1 = #shard{node='node1'},
-%% W2 = #shard{node='node2'},
-%% W3 = #shard{node='node3'},
-%% ?_assertMatch(
-%% {ok, #state{workers = [W2, W3]}},
-%% handle_message({ok, []}, W1, state0(all, false))
-%% ).
-%%
-%%
-%% check_basic_response() ->
-%% % Check that we've handle a response
-%% W1 = #shard{node='node1'},
-%% W2 = #shard{node='node2'},
-%% W3 = #shard{node='node3'},
-%% ?_assertMatch(
-%% {ok, #state{reply_count = 1, workers = [W2, W3]}},
-%% handle_message({ok, [foo1(), bar1()]}, W1, state0(all, false))
-%% ).
-%%
-%%
-%% check_finish_quorum() ->
-%% % Two messages with the same revisions means we're done
-%% ?_test(begin
-%% W1 = #shard{node='node1'},
-%% W2 = #shard{node='node2'},
-%% S0 = state0(all, false),
-%% {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
-%% Expect = {stop, [bar1(), foo1()]},
-%% ?assertEqual(Expect, handle_message({ok, [foo1(), bar1()]}, W2, S1))
-%% end).
-%%
-%%
-%% check_finish_quorum_newer() ->
-%% % We count a descendant of a revision for quorum so
-%% % foo1 should count for foo2 which means we're finished.
-%% % We also validate that read_repair was triggered.
-%% ?_test(begin
-%% W1 = #shard{node='node1'},
-%% W2 = #shard{node='node2'},
-%% S0 = state0(all, false),
-%% {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
-%% Expect = {stop, [bar1(), foo2()]},
-%% ok = meck:reset(fabric),
-%% ?assertEqual(Expect, handle_message({ok, [foo2(), bar1()]}, W2, S1)),
-%% ok = meck:wait(fabric, update_docs, '_', 5000),
-%% ?assertMatch(
-%% [{_, {fabric, update_docs, [_, _, _]}, _}],
-%% meck:history(fabric)
-%% )
-%% end).
-%%
-%%
-%% check_no_quorum_on_second() ->
-%% % Quorum not yet met for the foo revision so we
-%% % would wait for w3
-%% ?_test(begin
-%% W1 = #shard{node='node1'},
-%% W2 = #shard{node='node2'},
-%% W3 = #shard{node='node3'},
-%% S0 = state0(all, false),
-%% {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
-%% ?assertMatch(
-%% {ok, #state{workers = [W3]}},
-%% handle_message({ok, [bar1()]}, W2, S1)
-%% )
-%% end).
-%%
-%%
-%% check_done_on_third() ->
-%% % The third message of three means we're done no matter
-%% % what. Every revision seen in this pattern should be
-%% % included.
-%% ?_test(begin
-%% W1 = #shard{node='node1'},
-%% W2 = #shard{node='node2'},
-%% W3 = #shard{node='node3'},
-%% S0 = state0(all, false),
-%% {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
-%% {ok, S2} = handle_message({ok, [bar1()]}, W2, S1),
-%% Expect = {stop, [bar1(), foo1()]},
-%% ?assertEqual(Expect, handle_message({ok, [bar1()]}, W3, S2))
-%% end).
-%%
-%%
-%% % Tests for a specific list of revs
-%%
-%%
-%% check_specific_revs_first_msg() ->
-%% ?_test(begin
-%% W1 = #shard{node='node1'},
-%% W2 = #shard{node='node2'},
-%% W3 = #shard{node='node3'},
-%% S0 = state0(revs(), false),
-%% ?assertMatch(
-%% {ok, #state{reply_count = 1, workers = [W2, W3]}},
-%% handle_message({ok, [foo1(), bar1(), bazNF()]}, W1, S0)
-%% )
-%% end).
-%%
-%%
-%% check_revs_done_on_agreement() ->
-%% ?_test(begin
-%% W1 = #shard{node='node1'},
-%% W2 = #shard{node='node2'},
-%% S0 = state0(revs(), false),
-%% Msg = {ok, [foo1(), bar1(), bazNF()]},
-%% {ok, S1} = handle_message(Msg, W1, S0),
-%% Expect = {stop, [bar1(), foo1(), bazNF()]},
-%% ?assertEqual(Expect, handle_message(Msg, W2, S1))
-%% end).
-%%
-%%
-%% check_latest_true() ->
-%% ?_test(begin
-%% W1 = #shard{node='node1'},
-%% W2 = #shard{node='node2'},
-%% S0 = state0(revs(), true),
-%% Msg1 = {ok, [foo2(), bar1(), bazNF()]},
-%% Msg2 = {ok, [foo2(), bar1(), bazNF()]},
-%% {ok, S1} = handle_message(Msg1, W1, S0),
-%% Expect = {stop, [bar1(), foo2(), bazNF()]},
-%% ?assertEqual(Expect, handle_message(Msg2, W2, S1))
-%% end).
-%%
-%%
-%% check_ancestor_counted_in_quorum() ->
-%% ?_test(begin
-%% W1 = #shard{node='node1'},
-%% W2 = #shard{node='node2'},
-%% S0 = state0(revs(), true),
-%% Msg1 = {ok, [foo1(), bar1(), bazNF()]},
-%% Msg2 = {ok, [foo2(), bar1(), bazNF()]},
-%% Expect = {stop, [bar1(), foo2(), bazNF()]},
-%%
-%% % Older first
-%% {ok, S1} = handle_message(Msg1, W1, S0),
-%% ?assertEqual(Expect, handle_message(Msg2, W2, S1)),
-%%
-%% % Newer first
-%% {ok, S2} = handle_message(Msg2, W2, S0),
-%% ?assertEqual(Expect, handle_message(Msg1, W1, S2))
-%% end).
-%%
-%%
-%% check_not_found_counts_for_descendant() ->
-%% ?_test(begin
-%% W1 = #shard{node='node1'},
-%% W2 = #shard{node='node2'},
-%% S0 = state0(revs(), true),
-%% Msg1 = {ok, [foo1(), bar1(), bazNF()]},
-%% Msg2 = {ok, [foo1(), bar1(), baz1()]},
-%% Expect = {stop, [bar1(), baz1(), foo1()]},
-%%
-%% % not_found first
-%% {ok, S1} = handle_message(Msg1, W1, S0),
-%% ?assertEqual(Expect, handle_message(Msg2, W2, S1)),
-%%
-%% % not_found second
-%% {ok, S2} = handle_message(Msg2, W2, S0),
-%% ?assertEqual(Expect, handle_message(Msg1, W1, S2))
-%% end).
-%%
-%%
-%% check_worker_error_skipped() ->
-%% ?_test(begin
-%% W1 = #shard{node='node1'},
-%% W2 = #shard{node='node2'},
-%% W3 = #shard{node='node3'},
-%% S0 = state0(revs(), true),
-%% Msg1 = {ok, [foo1(), bar1(), baz1()]},
-%% Msg2 = {rexi_EXIT, reason},
-%% Msg3 = {ok, [foo1(), bar1(), baz1()]},
-%% Expect = {stop, [bar1(), baz1(), foo1()]},
-%%
-%% {ok, S1} = handle_message(Msg1, W1, S0),
-%% {ok, S2} = handle_message(Msg2, W2, S1),
-%% ?assertEqual(Expect, handle_message(Msg3, W3, S2))
-%% end).
-%%
-%%
-%% check_quorum_only_counts_valid_responses() ->
-%% ?_test(begin
-%% W1 = #shard{node='node1'},
-%% W2 = #shard{node='node2'},
-%% W3 = #shard{node='node3'},
-%% S0 = state0(revs(), true),
-%% Msg1 = {rexi_EXIT, reason},
-%% Msg2 = {rexi_EXIT, reason},
-%% Msg3 = {ok, [foo1(), bar1(), baz1()]},
-%% Expect = {stop, [bar1(), baz1(), foo1()]},
-%%
-%% {ok, S1} = handle_message(Msg1, W1, S0),
-%% {ok, S2} = handle_message(Msg2, W2, S1),
-%% ?assertEqual(Expect, handle_message(Msg3, W3, S2))
-%% end).
-%%
-%%
-%% check_empty_list_when_no_workers_reply() ->
-%% ?_test(begin
-%% W1 = #shard{node='node1'},
-%% W2 = #shard{node='node2'},
-%% W3 = #shard{node='node3'},
-%% S0 = state0(revs(), true),
-%% Msg1 = {rexi_EXIT, reason},
-%% Msg2 = {rexi_EXIT, reason},
-%% Msg3 = {rexi_DOWN, nodedown, {nil, node()}, nil},
-%% Expect = {stop, all_workers_died},
-%%
-%% {ok, S1} = handle_message(Msg1, W1, S0),
-%% {ok, S2} = handle_message(Msg2, W2, S1),
-%% ?assertEqual(Expect, handle_message(Msg3, W3, S2))
-%% end).
-%%
-%%
-%% check_node_rev_stored() ->
-%% ?_test(begin
-%% W1 = #shard{node = node1},
-%% S0 = state0([], true),
-%%
-%% {ok, S1} = handle_message({ok, [foo1()]}, W1, S0),
-%% ?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
-%% end).
-%%
-%%
-%% check_node_rev_store_head_only() ->
-%% ?_test(begin
-%% W1 = #shard{node = node1},
-%% S0 = state0([], true),
-%%
-%% {ok, S1} = handle_message({ok, [foo2()]}, W1, S0),
-%% ?assertEqual([{node1, [{2, <<"foo2">>}]}], S1#state.node_revs)
-%% end).
-%%
-%%
-%% check_node_rev_store_multiple() ->
-%% ?_test(begin
-%% W1 = #shard{node = node1},
-%% S0 = state0([], true),
-%%
-%% {ok, S1} = handle_message({ok, [foo1(), foo2()]}, W1, S0),
-%% ?assertEqual(
-%% [{node1, [{2, <<"foo2">>}, {1, <<"foo">>}]}],
-%% S1#state.node_revs
-%% )
-%% end).
-%%
-%%
-%% check_node_rev_dont_store_errors() ->
-%% ?_test(begin
-%% W1 = #shard{node = node1},
-%% S0 = state0([], true),
-%%
-%% {ok, S1} = handle_message({ok, [barNF()]}, W1, S0),
-%% ?assertEqual([], S1#state.node_revs)
-%% end).
-%%
-%%
-%% check_node_rev_store_non_errors() ->
-%% ?_test(begin
-%% W1 = #shard{node = node1},
-%% S0 = state0([], true),
-%%
-%% {ok, S1} = handle_message({ok, [foo1(), barNF()]}, W1, S0),
-%% ?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
-%% end).
-%%
-%%
-%% check_node_rev_store_concatenate() ->
-%% ?_test(begin
-%% W2 = #shard{node = node2},
-%% S0 = state0([], true),
-%% S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
-%%
-%% {ok, S2} = handle_message({ok, [foo2()]}, W2, S1),
-%% ?assertEqual(
-%% [{node2, [{2, <<"foo2">>}]}, {node1, [{1, <<"foo">>}]}],
-%% S2#state.node_revs
-%% )
-%% end).
-%%
-%%
-%% check_node_rev_store_concantenate_multiple() ->
-%% ?_test(begin
-%% W2 = #shard{node = node2},
-%% S0 = state0([], true),
-%% S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
-%%
-%% {ok, S2} = handle_message({ok, [foo2(), bar1()]}, W2, S1),
-%% ?assertEqual(
-%% [
-%% {node2, [{1, <<"bar">>}, {2, <<"foo2">>}]},
-%% {node1, [{1, <<"foo">>}]}
-%% ],
-%% S2#state.node_revs
-%% )
-%% end).
-%%
-%%
-%% check_node_rev_unmodified_on_down_or_exit() ->
-%% ?_test(begin
-%% W2 = #shard{node = node2},
-%% S0 = state0([], true),
-%% S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
-%%
-%% Down = {rexi_DOWN, nodedown, {nil, node()}, nil},
-%% {ok, S2} = handle_message(Down, W2, S1),
-%% ?assertEqual(
-%% [{node1, [{1, <<"foo">>}]}],
-%% S2#state.node_revs
-%% ),
-%%
-%% Exit = {rexi_EXIT, reason},
-%% {ok, S3} = handle_message(Exit, W2, S1),
-%% ?assertEqual(
-%% [{node1, [{1, <<"foo">>}]}],
-%% S3#state.node_revs
-%% )
-%% end).
-%%
-%%
-%% check_not_found_replies_are_removed_when_doc_found() ->
-%% ?_test(begin
-%% Replies = replies_to_dict([foo1(), bar1(), fooNF()]),
-%% Expect = [bar1(), foo1()],
-%% ?assertEqual(Expect, dict_format_replies(Replies))
-%% end).
-%%
-%% check_not_found_returned_when_one_of_docs_not_found() ->
-%% ?_test(begin
-%% Replies = replies_to_dict([foo1(), foo2(), barNF()]),
-%% Expect = [foo1(), foo2(), barNF()],
-%% ?assertEqual(Expect, dict_format_replies(Replies))
-%% end).
-%%
-%% check_not_found_returned_when_doc_not_found() ->
-%% ?_test(begin
-%% Replies = replies_to_dict([fooNF(), barNF(), bazNF()]),
-%% Expect = [barNF(), bazNF(), fooNF()],
-%% ?assertEqual(Expect, dict_format_replies(Replies))
-%% end).
-%%
-%% check_longer_rev_list_returned() ->
-%% ?_test(begin
-%% Replies = replies_to_dict([foo2(), foo2stemmed()]),
-%% Expect = [foo2()],
-%% ?assertEqual(2, length(Replies)),
-%% ?assertEqual(Expect, dict_format_replies(Replies))
-%% end).
-%%
-%% check_longer_rev_list_not_combined() ->
-%% ?_test(begin
-%% Replies = replies_to_dict([foo2(), foo2stemmed(), bar1()]),
-%% Expect = [bar1(), foo2()],
-%% ?assertEqual(3, length(Replies)),
-%% ?assertEqual(Expect, dict_format_replies(Replies))
-%% end).
-%%
-%% check_not_found_removed_and_longer_rev_list() ->
-%% ?_test(begin
-%% Replies = replies_to_dict([foo2(), foo2stemmed(), foo2NF()]),
-%% Expect = [foo2()],
-%% ?assertEqual(3, length(Replies)),
-%% ?assertEqual(Expect, dict_format_replies(Replies))
-%% end).
-%%
-%%
-%% replies_to_dict(Replies) ->
-%% [reply_to_element(R) || R <- Replies].
-%%
-%% reply_to_element({ok, #doc{revs = Revs}} = Reply) ->
-%% {_, [Rev | _]} = Revs,
-%% {{Rev, Revs}, {Reply, 1}};
-%% reply_to_element(Reply) ->
-%% {Reply, {Reply, 1}}.
-%%
-%% -endif.
diff --git a/src/fabric/src/fabric_doc_purge.erl b/src/fabric/src/fabric_doc_purge.erl
deleted file mode 100644
index bda9039ba..000000000
--- a/src/fabric/src/fabric_doc_purge.erl
+++ /dev/null
@@ -1,571 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_purge).
-
-
--export([
- go/3
-]).
-
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-
--record(acc, {
- worker_uuids,
- resps,
- uuid_counts,
- w
-}).
-
-
-go(_, [], _) ->
- {ok, []};
-go(DbName, IdsRevs, Options) ->
- % Generate our purge requests of {UUID, DocId, Revs}
- {UUIDs, Reqs} = create_reqs(IdsRevs, [], []),
-
- % Fire off rexi workers for each shard.
- {Workers, WorkerUUIDs} = dict:fold(fun(Shard, ShardReqs, {Ws, WUUIDs}) ->
- #shard{name = ShardDbName, node = Node} = Shard,
- Args = [ShardDbName, ShardReqs, Options],
- Ref = rexi:cast(Node, {fabric_rpc, purge_docs, Args}),
- Worker = Shard#shard{ref=Ref},
- ShardUUIDs = [UUID || {UUID, _Id, _Revs} <- ShardReqs],
- {[Worker | Ws], [{Worker, ShardUUIDs} | WUUIDs]}
- end, {[], []}, group_reqs_by_shard(DbName, Reqs)),
-
- UUIDCounts = lists:foldl(fun({_Worker, WUUIDs}, CountAcc) ->
- lists:foldl(fun(UUID, InnerCountAcc) ->
- dict:update_counter(UUID, 1, InnerCountAcc)
- end, CountAcc, WUUIDs)
- end, dict:new(), WorkerUUIDs),
-
- RexiMon = fabric_util:create_monitors(Workers),
- Timeout = fabric_util:request_timeout(),
- Acc0 = #acc{
- worker_uuids = WorkerUUIDs,
- resps = dict:from_list([{UUID, []} || UUID <- UUIDs]),
- uuid_counts = UUIDCounts,
- w = w(DbName, Options)
- },
- Acc2 = try rexi_utils:recv(Workers, #shard.ref,
- fun handle_message/3, Acc0, infinity, Timeout) of
- {ok, Acc1} ->
- Acc1;
- {timeout, Acc1} ->
- #acc{
- worker_uuids = WorkerUUIDs,
- resps = Resps
- } = Acc1,
- DefunctWorkers = [Worker || {Worker, _} <- WorkerUUIDs],
- fabric_util:log_timeout(DefunctWorkers, "purge_docs"),
- NewResps = append_errors(timeout, WorkerUUIDs, Resps),
- Acc1#acc{worker_uuids = [], resps = NewResps};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end,
-
- FinalResps = format_resps(UUIDs, Acc2),
- {resp_health(FinalResps), FinalResps}.
-
-
-handle_message({rexi_DOWN, _, {_, Node}, _}, _Worker, Acc) ->
- #acc{
- worker_uuids = WorkerUUIDs,
- resps = Resps
- } = Acc,
- Pred = fun({#shard{node = N}, _}) -> N == Node end,
- {Failed, Rest} = lists:partition(Pred, WorkerUUIDs),
- NewResps = append_errors(internal_server_error, Failed, Resps),
- maybe_stop(Acc#acc{worker_uuids = Rest, resps = NewResps});
-
-handle_message({rexi_EXIT, _}, Worker, Acc) ->
- #acc{
- worker_uuids = WorkerUUIDs,
- resps = Resps
- } = Acc,
- {value, WorkerPair, Rest} = lists:keytake(Worker, 1, WorkerUUIDs),
- NewResps = append_errors(internal_server_error, [WorkerPair], Resps),
- maybe_stop(Acc#acc{worker_uuids = Rest, resps = NewResps});
-
-handle_message({ok, Replies}, Worker, Acc) ->
- #acc{
- worker_uuids = WorkerUUIDs,
- resps = Resps
- } = Acc,
- {value, {_W, UUIDs}, Rest} = lists:keytake(Worker, 1, WorkerUUIDs),
- NewResps = append_resps(UUIDs, Replies, Resps),
- maybe_stop(Acc#acc{worker_uuids = Rest, resps = NewResps});
-
-handle_message({bad_request, Msg}, _, _) ->
- throw({bad_request, Msg}).
-
-
-create_reqs([], UUIDs, Reqs) ->
- {lists:reverse(UUIDs), lists:reverse(Reqs)};
-
-create_reqs([{Id, Revs} | RestIdsRevs], UUIDs, Reqs) ->
- UUID = couch_uuids:new(),
- NewUUIDs = [UUID | UUIDs],
- NewReqs = [{UUID, Id, Revs} | Reqs],
- create_reqs(RestIdsRevs, NewUUIDs, NewReqs).
-
-
-group_reqs_by_shard(DbName, Reqs) ->
- lists:foldl(fun({_UUID, Id, _Revs} = Req, D0) ->
- lists:foldl(fun(Shard, D1) ->
- dict:append(Shard, Req, D1)
- end, D0, mem3:shards(DbName, Id))
- end, dict:new(), Reqs).
-
-
-w(DbName, Options) ->
- try
- list_to_integer(couch_util:get_value(w, Options))
- catch _:_ ->
- mem3:quorum(DbName)
- end.
-
-
-append_errors(Type, WorkerUUIDs, Resps) ->
- lists:foldl(fun({_Worker, UUIDs}, RespAcc) ->
- Errors = [{error, Type} || _UUID <- UUIDs],
- append_resps(UUIDs, Errors, RespAcc)
- end, Resps, WorkerUUIDs).
-
-
-append_resps([], [], Resps) ->
- Resps;
-append_resps([UUID | RestUUIDs], [Reply | RestReplies], Resps) ->
- NewResps = dict:append(UUID, Reply, Resps),
- append_resps(RestUUIDs, RestReplies, NewResps).
-
-
-maybe_stop(#acc{worker_uuids = []} = Acc) ->
- {stop, Acc};
-maybe_stop(#acc{resps = Resps, uuid_counts = Counts, w = W} = Acc) ->
- try
- dict:fold(fun(UUID, UUIDResps, _) ->
- UUIDCount = dict:fetch(UUID, Counts),
- case has_quorum(UUIDResps, UUIDCount, W) of
- true -> ok;
- false -> throw(keep_going)
- end
- end, nil, Resps),
- {stop, Acc}
- catch throw:keep_going ->
- {ok, Acc}
- end.
-
-
-format_resps(UUIDs, #acc{} = Acc) ->
- #acc{
- resps = Resps,
- w = W
- } = Acc,
- FoldFun = fun(UUID, Replies, ReplyAcc) ->
- OkReplies = [Reply || {ok, Reply} <- Replies],
- case OkReplies of
- [] ->
- [Error | _] = lists:usort(Replies),
- [{UUID, Error} | ReplyAcc];
- _ ->
- AllRevs = lists:usort(lists:flatten(OkReplies)),
- IsOk = length(OkReplies) >= W
- andalso length(lists:usort(OkReplies)) == 1,
- Health = if IsOk -> ok; true -> accepted end,
- [{UUID, {Health, AllRevs}} | ReplyAcc]
- end
- end,
- FinalReplies = dict:fold(FoldFun, [], Resps),
- couch_util:reorder_results(UUIDs, FinalReplies);
-
-format_resps(_UUIDs, Else) ->
- Else.
-
-
-resp_health(Resps) ->
- Healths = lists:usort([H || {H, _} <- Resps]),
- HasError = lists:member(error, Healths),
- HasAccepted = lists:member(accepted, Healths),
- AllOk = Healths == [ok],
- if
- HasError -> error;
- HasAccepted -> accepted;
- AllOk -> ok;
- true -> error
- end.
-
-
-has_quorum(Resps, Count, W) ->
- OkResps = [R || {ok, _} = R <- Resps],
- OkCounts = lists:foldl(fun(R, Acc) ->
- orddict:update_counter(R, 1, Acc)
- end, orddict:new(), OkResps),
- MaxOk = lists:max([0 | element(2, lists:unzip(OkCounts))]),
- if
- MaxOk >= W -> true;
- length(Resps) >= Count -> true;
- true -> false
- end.
-
-
-%% -ifdef(TEST).
-%% -include_lib("eunit/include/eunit.hrl").
-%%
-%% purge_test_() ->
-%% {
-%% setup,
-%% fun setup/0,
-%% fun teardown/1,
-%% [
-%% t_w2_ok(),
-%% t_w3_ok(),
-%%
-%% t_w2_mixed_accepted(),
-%% t_w3_mixed_accepted(),
-%%
-%% t_w2_exit1_ok(),
-%% t_w2_exit2_accepted(),
-%% t_w2_exit3_error(),
-%%
-%% t_w4_accepted(),
-%%
-%% t_mixed_ok_accepted(),
-%% t_mixed_errors()
-%% ]
-%% }.
-%%
-%%
-%% setup() ->
-%% meck:new(couch_log),
-%% meck:expect(couch_log, warning, fun(_, _) -> ok end),
-%% meck:expect(couch_log, notice, fun(_, _) -> ok end).
-%%
-%%
-%% teardown(_) ->
-%% meck:unload().
-%%
-%%
-%% t_w2_ok() ->
-%% ?_test(begin
-%% Acc0 = create_init_acc(2),
-%% Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-%%
-%% {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-%% ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-%% check_quorum(Acc1, false),
-%%
-%% {stop, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
-%% ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-%% check_quorum(Acc2, true),
-%%
-%% Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
-%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
-%% ?assertEqual(Expect, Resps),
-%% ?assertEqual(ok, resp_health(Resps))
-%% end).
-%%
-%%
-%% t_w3_ok() ->
-%% ?_test(begin
-%% Acc0 = create_init_acc(3),
-%% Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-%%
-%% {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-%% check_quorum(Acc1, false),
-%%
-%% {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
-%% ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-%% check_quorum(Acc2, false),
-%%
-%% {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
-%% ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-%% check_quorum(Acc3, true),
-%%
-%% Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
-%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
-%% ?assertEqual(Expect, Resps),
-%% ?assertEqual(ok, resp_health(Resps))
-%% end).
-%%
-%%
-%% t_w2_mixed_accepted() ->
-%% ?_test(begin
-%% Acc0 = create_init_acc(2),
-%% Msg1 = {ok, [{ok, [{1, <<"foo1">>}]}, {ok, [{2, <<"bar1">>}]}]},
-%% Msg2 = {ok, [{ok, [{1, <<"foo2">>}]}, {ok, [{2, <<"bar2">>}]}]},
-%%
-%% {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
-%% ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-%% check_quorum(Acc1, false),
-%%
-%% {ok, Acc2} = handle_message(Msg2, worker(2, Acc0), Acc1),
-%% ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-%% check_quorum(Acc2, false),
-%%
-%% {stop, Acc3} = handle_message(Msg1, worker(3, Acc0), Acc2),
-%% ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-%% check_quorum(Acc3, true),
-%%
-%% Expect = [
-%% {accepted, [{1, <<"foo1">>}, {1, <<"foo2">>}]},
-%% {accepted, [{2, <<"bar1">>}, {2, <<"bar2">>}]}
-%% ],
-%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
-%% ?assertEqual(Expect, Resps),
-%% ?assertEqual(accepted, resp_health(Resps))
-%% end).
-%%
-%%
-%% t_w3_mixed_accepted() ->
-%% ?_test(begin
-%% Acc0 = create_init_acc(3),
-%% Msg1 = {ok, [{ok, [{1, <<"foo1">>}]}, {ok, [{2, <<"bar1">>}]}]},
-%% Msg2 = {ok, [{ok, [{1, <<"foo2">>}]}, {ok, [{2, <<"bar2">>}]}]},
-%%
-%% {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
-%% ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-%% check_quorum(Acc1, false),
-%%
-%% {ok, Acc2} = handle_message(Msg2, worker(2, Acc0), Acc1),
-%% ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-%% check_quorum(Acc2, false),
-%%
-%% {stop, Acc3} = handle_message(Msg2, worker(3, Acc0), Acc2),
-%% ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-%% check_quorum(Acc3, true),
-%%
-%% Expect = [
-%% {accepted, [{1, <<"foo1">>}, {1, <<"foo2">>}]},
-%% {accepted, [{2, <<"bar1">>}, {2, <<"bar2">>}]}
-%% ],
-%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
-%% ?assertEqual(Expect, Resps),
-%% ?assertEqual(accepted, resp_health(Resps))
-%% end).
-%%
-%%
-%% t_w2_exit1_ok() ->
-%% ?_test(begin
-%% Acc0 = create_init_acc(2),
-%% Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-%% ExitMsg = {rexi_EXIT, blargh},
-%%
-%% {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-%% ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-%% check_quorum(Acc1, false),
-%%
-%% {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
-%% ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-%% check_quorum(Acc2, false),
-%%
-%% {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
-%% ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-%% check_quorum(Acc3, true),
-%%
-%% Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
-%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
-%% ?assertEqual(Expect, Resps),
-%% ?assertEqual(ok, resp_health(Resps))
-%% end).
-%%
-%%
-%% t_w2_exit2_accepted() ->
-%% ?_test(begin
-%% Acc0 = create_init_acc(2),
-%% Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-%% ExitMsg = {rexi_EXIT, blargh},
-%%
-%% {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-%% ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-%% check_quorum(Acc1, false),
-%%
-%% {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
-%% ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-%% check_quorum(Acc2, false),
-%%
-%% {stop, Acc3} = handle_message(ExitMsg, worker(3, Acc0), Acc2),
-%% ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-%% check_quorum(Acc3, true),
-%%
-%% Expect = [{accepted, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
-%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
-%% ?assertEqual(Expect, Resps),
-%% ?assertEqual(accepted, resp_health(Resps))
-%% end).
-%%
-%%
-%% t_w2_exit3_error() ->
-%% ?_test(begin
-%% Acc0 = create_init_acc(2),
-%% ExitMsg = {rexi_EXIT, blargh},
-%%
-%% {ok, Acc1} = handle_message(ExitMsg, worker(1, Acc0), Acc0),
-%% ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-%% check_quorum(Acc1, false),
-%%
-%% {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
-%% ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-%% check_quorum(Acc2, false),
-%%
-%% {stop, Acc3} = handle_message(ExitMsg, worker(3, Acc0), Acc2),
-%% ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-%% check_quorum(Acc3, true),
-%%
-%% Expect = [
-%% {error, internal_server_error},
-%% {error, internal_server_error}
-%% ],
-%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
-%% ?assertEqual(Expect, Resps),
-%% ?assertEqual(error, resp_health(Resps))
-%% end).
-%%
-%%
-%% t_w4_accepted() ->
-%% % Make sure we return when all workers have responded
-%% % rather than wait around for a timeout if a user asks
-%% % for a qourum with more than the available number of
-%% % shards.
-%% ?_test(begin
-%% Acc0 = create_init_acc(4),
-%% Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-%%
-%% {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-%% ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-%% check_quorum(Acc1, false),
-%%
-%% {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
-%% ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-%% check_quorum(Acc2, false),
-%%
-%% {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
-%% ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-%% check_quorum(Acc3, true),
-%%
-%% Expect = [{accepted, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
-%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
-%% ?assertEqual(Expect, Resps),
-%% ?assertEqual(accepted, resp_health(Resps))
-%% end).
-%%
-%%
-%% t_mixed_ok_accepted() ->
-%% ?_test(begin
-%% WorkerUUIDs = [
-%% {#shard{node = a, range = [1, 2]}, [<<"uuid1">>]},
-%% {#shard{node = b, range = [1, 2]}, [<<"uuid1">>]},
-%% {#shard{node = c, range = [1, 2]}, [<<"uuid1">>]},
-%%
-%% {#shard{node = a, range = [3, 4]}, [<<"uuid2">>]},
-%% {#shard{node = b, range = [3, 4]}, [<<"uuid2">>]},
-%% {#shard{node = c, range = [3, 4]}, [<<"uuid2">>]}
-%% ],
-%%
-%% Acc0 = #acc{
-%% worker_uuids = WorkerUUIDs,
-%% resps = dict:from_list([{<<"uuid1">>, []}, {<<"uuid2">>, []}]),
-%% uuid_counts = dict:from_list([{<<"uuid1">>, 3}, {<<"uuid2">>, 3}]),
-%% w = 2
-%% },
-%%
-%% Msg1 = {ok, [{ok, [{1, <<"foo">>}]}]},
-%% Msg2 = {ok, [{ok, [{2, <<"bar">>}]}]},
-%% ExitMsg = {rexi_EXIT, blargh},
-%%
-%% {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
-%% {ok, Acc2} = handle_message(Msg1, worker(2, Acc0), Acc1),
-%% {ok, Acc3} = handle_message(ExitMsg, worker(4, Acc0), Acc2),
-%% {ok, Acc4} = handle_message(ExitMsg, worker(5, Acc0), Acc3),
-%% {stop, Acc5} = handle_message(Msg2, worker(6, Acc0), Acc4),
-%%
-%% Expect = [{ok, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
-%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc5),
-%% ?assertEqual(Expect, Resps),
-%% ?assertEqual(accepted, resp_health(Resps))
-%% end).
-%%
-%%
-%% t_mixed_errors() ->
-%% ?_test(begin
-%% WorkerUUIDs = [
-%% {#shard{node = a, range = [1, 2]}, [<<"uuid1">>]},
-%% {#shard{node = b, range = [1, 2]}, [<<"uuid1">>]},
-%% {#shard{node = c, range = [1, 2]}, [<<"uuid1">>]},
-%%
-%% {#shard{node = a, range = [3, 4]}, [<<"uuid2">>]},
-%% {#shard{node = b, range = [3, 4]}, [<<"uuid2">>]},
-%% {#shard{node = c, range = [3, 4]}, [<<"uuid2">>]}
-%% ],
-%%
-%% Acc0 = #acc{
-%% worker_uuids = WorkerUUIDs,
-%% resps = dict:from_list([{<<"uuid1">>, []}, {<<"uuid2">>, []}]),
-%% uuid_counts = dict:from_list([{<<"uuid1">>, 3}, {<<"uuid2">>, 3}]),
-%% w = 2
-%% },
-%%
-%% Msg = {ok, [{ok, [{1, <<"foo">>}]}]},
-%% ExitMsg = {rexi_EXIT, blargh},
-%%
-%% {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-%% {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
-%% {ok, Acc3} = handle_message(ExitMsg, worker(4, Acc0), Acc2),
-%% {ok, Acc4} = handle_message(ExitMsg, worker(5, Acc0), Acc3),
-%% {stop, Acc5} = handle_message(ExitMsg, worker(6, Acc0), Acc4),
-%%
-%% Expect = [{ok, [{1, <<"foo">>}]}, {error, internal_server_error}],
-%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc5),
-%% ?assertEqual(Expect, Resps),
-%% ?assertEqual(error, resp_health(Resps))
-%% end).
-%%
-%%
-%% create_init_acc(W) ->
-%% UUID1 = <<"uuid1">>,
-%% UUID2 = <<"uuid2">>,
-%%
-%% Nodes = [node1, node2, node3],
-%% Shards = mem3_util:create_partition_map(<<"foo">>, 3, 1, Nodes),
-%%
-%% % Create our worker_uuids. We're relying on the fact that
-%% % we're using a fake Q=1 db so we don't have to worry
-%% % about any hashing here.
-%% WorkerUUIDs = lists:map(fun(Shard) ->
-%% {Shard#shard{ref = erlang:make_ref()}, [UUID1, UUID2]}
-%% end, Shards),
-%%
-%% #acc{
-%% worker_uuids = WorkerUUIDs,
-%% resps = dict:from_list([{UUID1, []}, {UUID2, []}]),
-%% uuid_counts = dict:from_list([{UUID1, 3}, {UUID2, 3}]),
-%% w = W
-%% }.
-%%
-%%
-%% worker(N, #acc{worker_uuids = WorkerUUIDs}) ->
-%% {Worker, _} = lists:nth(N, WorkerUUIDs),
-%% Worker.
-%%
-%%
-%% check_quorum(Acc, Expect) ->
-%% dict:fold(fun(_Shard, Resps, _) ->
-%% ?assertEqual(Expect, has_quorum(Resps, 3, Acc#acc.w))
-%% end, nil, Acc#acc.resps).
-%%
-%% -endif.
diff --git a/src/fabric/src/fabric_doc_update.erl b/src/fabric/src/fabric_doc_update.erl
deleted file mode 100644
index d670e3ccf..000000000
--- a/src/fabric/src/fabric_doc_update.erl
+++ /dev/null
@@ -1,377 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_update).
-
--export([go/3]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(_, [], _) ->
- {ok, []};
-go(DbName, AllDocs0, Opts) ->
- AllDocs1 = before_doc_update(DbName, AllDocs0, Opts),
- AllDocs = tag_docs(AllDocs1),
- validate_atomic_update(DbName, AllDocs, lists:member(all_or_nothing, Opts)),
- Options = lists:delete(all_or_nothing, Opts),
- GroupedDocs = lists:map(fun({#shard{name=Name, node=Node} = Shard, Docs}) ->
- Docs1 = untag_docs(Docs),
- Ref = rexi:cast(Node, {fabric_rpc, update_docs, [Name,Docs1,Options]}),
- {Shard#shard{ref=Ref}, Docs}
- end, group_docs_by_shard(DbName, AllDocs)),
- {Workers, _} = lists:unzip(GroupedDocs),
- RexiMon = fabric_util:create_monitors(Workers),
- W = couch_util:get_value(w, Options, integer_to_list(mem3:quorum(DbName))),
- Acc0 = {length(Workers), length(AllDocs), list_to_integer(W), GroupedDocs,
- dict:new()},
- Timeout = fabric_util:request_timeout(),
- try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, Acc0, infinity, Timeout) of
- {ok, {Health, Results}}
- when Health =:= ok; Health =:= accepted; Health =:= error ->
- {Health, [R || R <- couch_util:reorder_results(AllDocs, Results), R =/= noreply]};
- {timeout, Acc} ->
- {_, _, W1, GroupedDocs1, DocReplDict} = Acc,
- {DefunctWorkers, _} = lists:unzip(GroupedDocs1),
- fabric_util:log_timeout(DefunctWorkers, "update_docs"),
- {Health, _, Resp} = dict:fold(fun force_reply/3, {ok, W1, []},
- DocReplDict),
- {Health, [R || R <- couch_util:reorder_results(AllDocs, Resp), R =/= noreply]};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Worker, Acc0) ->
- {_, LenDocs, W, GroupedDocs, DocReplyDict} = Acc0,
- NewGrpDocs = [X || {#shard{node=N}, _} = X <- GroupedDocs, N =/= NodeRef],
- skip_message({length(NewGrpDocs), LenDocs, W, NewGrpDocs, DocReplyDict});
-
-handle_message({rexi_EXIT, _}, Worker, Acc0) ->
- {WC,LenDocs,W,GrpDocs,DocReplyDict} = Acc0,
- NewGrpDocs = lists:keydelete(Worker,1,GrpDocs),
- skip_message({WC-1,LenDocs,W,NewGrpDocs,DocReplyDict});
-handle_message(internal_server_error, Worker, Acc0) ->
- % happens when we fail to load validation functions in an RPC worker
- {WC,LenDocs,W,GrpDocs,DocReplyDict} = Acc0,
- NewGrpDocs = lists:keydelete(Worker,1,GrpDocs),
- skip_message({WC-1,LenDocs,W,NewGrpDocs,DocReplyDict});
-handle_message(attachment_chunk_received, _Worker, Acc0) ->
- {ok, Acc0};
-handle_message({ok, Replies}, Worker, Acc0) ->
- {WaitingCount, DocCount, W, GroupedDocs, DocReplyDict0} = Acc0,
- {value, {_, Docs}, NewGrpDocs} = lists:keytake(Worker, 1, GroupedDocs),
- DocReplyDict = append_update_replies(Docs, Replies, DocReplyDict0),
- case {WaitingCount, dict:size(DocReplyDict)} of
- {1, _} ->
- % last message has arrived, we need to conclude things
- {Health, W, Reply} = dict:fold(fun force_reply/3, {ok, W, []},
- DocReplyDict),
- {stop, {Health, Reply}};
- {_, DocCount} ->
- % we've got at least one reply for each document, let's take a look
- case dict:fold(fun maybe_reply/3, {stop,W,[]}, DocReplyDict) of
- continue ->
- {ok, {WaitingCount - 1, DocCount, W, NewGrpDocs, DocReplyDict}};
- {stop, W, FinalReplies} ->
- {stop, {ok, FinalReplies}}
- end;
- _ ->
- {ok, {WaitingCount - 1, DocCount, W, NewGrpDocs, DocReplyDict}}
- end;
-handle_message({missing_stub, Stub}, _, _) ->
- throw({missing_stub, Stub});
-handle_message({not_found, no_db_file} = X, Worker, Acc0) ->
- {_, _, _, GroupedDocs, _} = Acc0,
- Docs = couch_util:get_value(Worker, GroupedDocs),
- handle_message({ok, [X || _D <- Docs]}, Worker, Acc0);
-handle_message({bad_request, Msg}, _, _) ->
- throw({bad_request, Msg});
-handle_message({request_entity_too_large, Entity}, _, _) ->
- throw({request_entity_too_large, Entity}).
-
-before_doc_update(DbName, Docs, Opts) ->
- case {fabric_util:is_replicator_db(DbName), fabric_util:is_users_db(DbName)} of
- {true, _} ->
- %% cluster db is expensive to create so we only do it if we have to
- Db = fabric_util:open_cluster_db(DbName, Opts),
- [couch_replicator_docs:before_doc_update(Doc, Db, replicated_changes)
- || Doc <- Docs];
- {_, true} ->
- %% cluster db is expensive to create so we only do it if we have to
- Db = fabric_util:open_cluster_db(DbName, Opts),
- [couch_users_db:before_doc_update(Doc, Db, interactive_edit)
- || Doc <- Docs];
- _ ->
- Docs
- end.
-
-tag_docs([]) ->
- [];
-tag_docs([#doc{meta=Meta}=Doc | Rest]) ->
- [Doc#doc{meta=[{ref, make_ref()} | Meta]} | tag_docs(Rest)].
-
-untag_docs([]) ->
- [];
-untag_docs([#doc{meta=Meta}=Doc | Rest]) ->
- [Doc#doc{meta=lists:keydelete(ref, 1, Meta)} | untag_docs(Rest)].
-
-force_reply(Doc, [], {_, W, Acc}) ->
- {error, W, [{Doc, {error, internal_server_error}} | Acc]};
-force_reply(Doc, [FirstReply|_] = Replies, {Health, W, Acc}) ->
- case update_quorum_met(W, Replies) of
- {true, Reply} ->
- {Health, W, [{Doc,Reply} | Acc]};
- false ->
- case [Reply || {ok, Reply} <- Replies] of
- [] ->
- % check if all errors are identical, if so inherit health
- case lists:all(fun(E) -> E =:= FirstReply end, Replies) of
- true ->
- CounterKey = [fabric, doc_update, errors],
- couch_stats:increment_counter(CounterKey),
- {Health, W, [{Doc, FirstReply} | Acc]};
- false ->
- CounterKey = [fabric, doc_update, mismatched_errors],
- couch_stats:increment_counter(CounterKey),
- {error, W, [{Doc, FirstReply} | Acc]}
- end;
- [AcceptedRev | _] ->
- CounterKey = [fabric, doc_update, write_quorum_errors],
- couch_stats:increment_counter(CounterKey),
- NewHealth = case Health of ok -> accepted; _ -> Health end,
- {NewHealth, W, [{Doc, {accepted,AcceptedRev}} | Acc]}
- end
- end.
-
-maybe_reply(_, _, continue) ->
- % we didn't meet quorum for all docs, so we're fast-forwarding the fold
- continue;
-maybe_reply(Doc, Replies, {stop, W, Acc}) ->
- case update_quorum_met(W, Replies) of
- {true, Reply} ->
- {stop, W, [{Doc, Reply} | Acc]};
- false ->
- continue
- end.
-
-update_quorum_met(W, Replies) ->
- Counters = lists:foldl(fun(R,D) -> orddict:update_counter(R,1,D) end,
- orddict:new(), Replies),
- GoodReplies = lists:filter(fun good_reply/1, Counters),
- case lists:dropwhile(fun({_, Count}) -> Count < W end, GoodReplies) of
- [] ->
- false;
- [{FinalReply, _} | _] ->
- {true, FinalReply}
- end.
-
-good_reply({{ok, _}, _}) ->
- true;
-good_reply({noreply, _}) ->
- true;
-good_reply(_) ->
- false.
-
--spec group_docs_by_shard(binary(), [#doc{}]) -> [{#shard{}, [#doc{}]}].
-group_docs_by_shard(DbName, Docs) ->
- dict:to_list(lists:foldl(fun(#doc{id=Id} = Doc, D0) ->
- lists:foldl(fun(Shard, D1) ->
- dict:append(Shard, Doc, D1)
- end, D0, mem3:shards(DbName,Id))
- end, dict:new(), Docs)).
-
-append_update_replies([], [], DocReplyDict) ->
- DocReplyDict;
-append_update_replies([Doc|Rest], [], Dict0) ->
- % icky, if replicated_changes only errors show up in result
- append_update_replies(Rest, [], dict:append(Doc, noreply, Dict0));
-append_update_replies([Doc|Rest1], [Reply|Rest2], Dict0) ->
- append_update_replies(Rest1, Rest2, dict:append(Doc, Reply, Dict0)).
-
-skip_message({0, _, W, _, DocReplyDict}) ->
- {Health, W, Reply} = dict:fold(fun force_reply/3, {ok, W, []}, DocReplyDict),
- {stop, {Health, Reply}};
-skip_message(Acc0) ->
- {ok, Acc0}.
-
-validate_atomic_update(_, _, false) ->
- ok;
-validate_atomic_update(_DbName, AllDocs, true) ->
- % TODO actually perform the validation. This requires some hackery, we need
- % to basically extract the prep_and_validate_updates function from couch_db
- % and only run that, without actually writing in case of a success.
- Error = {not_implemented, <<"all_or_nothing is not supported">>},
- PreCommitFailures = lists:map(fun(#doc{id=Id, revs = {Pos,Revs}}) ->
- case Revs of [] -> RevId = <<>>; [RevId|_] -> ok end,
- {{Id, {Pos, RevId}}, Error}
- end, AllDocs),
- throw({aborted, PreCommitFailures}).
-
-
-%% -ifdef(TEST).
-%% -include_lib("eunit/include/eunit.hrl").
-%%
-%%
-%% setup_all() ->
-%% meck:new([couch_log, couch_stats]),
-%% meck:expect(couch_log, warning, fun(_,_) -> ok end),
-%% meck:expect(couch_stats, increment_counter, fun(_) -> ok end).
-%%
-%%
-%% teardown_all(_) ->
-%% meck:unload().
-%%
-%%
-%% doc_update_test_() ->
-%% {
-%% setup,
-%% fun setup_all/0,
-%% fun teardown_all/1,
-%% [
-%% fun doc_update1/0,
-%% fun doc_update2/0,
-%% fun doc_update3/0
-%% ]
-%% }.
-%%
-%%
-%% % eunits
-%% doc_update1() ->
-%% Doc1 = #doc{revs = {1,[<<"foo">>]}},
-%% Doc2 = #doc{revs = {1,[<<"bar">>]}},
-%% Docs = [Doc1],
-%% Docs2 = [Doc2, Doc1],
-%% Dict = dict:from_list([{Doc,[]} || Doc <- Docs]),
-%% Dict2 = dict:from_list([{Doc,[]} || Doc <- Docs2]),
-%%
-%% Shards =
-%% mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
-%% GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
-%%
-%%
-%% % test for W = 2
-%% AccW2 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
-%% Dict},
-%%
-%% {ok,{WaitingCountW2_1,_,_,_,_}=AccW2_1} =
-%% handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW2),
-%% ?assertEqual(WaitingCountW2_1,2),
-%% {stop, FinalReplyW2 } =
-%% handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW2_1),
-%% ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW2),
-%%
-%% % test for W = 3
-%% AccW3 = {length(Shards), length(Docs), list_to_integer("3"), GroupedDocs,
-%% Dict},
-%%
-%% {ok,{WaitingCountW3_1,_,_,_,_}=AccW3_1} =
-%% handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW3),
-%% ?assertEqual(WaitingCountW3_1,2),
-%%
-%% {ok,{WaitingCountW3_2,_,_,_,_}=AccW3_2} =
-%% handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW3_1),
-%% ?assertEqual(WaitingCountW3_2,1),
-%%
-%% {stop, FinalReplyW3 } =
-%% handle_message({ok, [{ok, Doc1}]},lists:nth(3,Shards),AccW3_2),
-%% ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW3),
-%%
-%% % test w quorum > # shards, which should fail immediately
-%%
-%% Shards2 = mem3_util:create_partition_map("foo",1,1,["node1"]),
-%% GroupedDocs2 = group_docs_by_shard_hack(<<"foo">>,Shards2,Docs),
-%%
-%% AccW4 =
-%% {length(Shards2), length(Docs), list_to_integer("2"), GroupedDocs2, Dict},
-%% Bool =
-%% case handle_message({ok, [{ok, Doc1}]},hd(Shards2),AccW4) of
-%% {stop, _Reply} ->
-%% true;
-%% _ -> false
-%% end,
-%% ?assertEqual(Bool,true),
-%%
-%% % Docs with no replies should end up as {error, internal_server_error}
-%% SA1 = #shard{node=a, range=1},
-%% SB1 = #shard{node=b, range=1},
-%% SA2 = #shard{node=a, range=2},
-%% SB2 = #shard{node=b, range=2},
-%% GroupedDocs3 = [{SA1,[Doc1]}, {SB1,[Doc1]}, {SA2,[Doc2]}, {SB2,[Doc2]}],
-%% StW5_0 = {length(GroupedDocs3), length(Docs2), 2, GroupedDocs3, Dict2},
-%% {ok, StW5_1} = handle_message({ok, [{ok, "A"}]}, SA1, StW5_0),
-%% {ok, StW5_2} = handle_message({rexi_EXIT, nil}, SB1, StW5_1),
-%% {ok, StW5_3} = handle_message({rexi_EXIT, nil}, SA2, StW5_2),
-%% {stop, ReplyW5} = handle_message({rexi_EXIT, nil}, SB2, StW5_3),
-%% ?assertEqual(
-%% {error, [{Doc1,{accepted,"A"}},{Doc2,{error,internal_server_error}}]},
-%% ReplyW5
-%% ).
-%%
-%% doc_update2() ->
-%% Doc1 = #doc{revs = {1,[<<"foo">>]}},
-%% Doc2 = #doc{revs = {1,[<<"bar">>]}},
-%% Docs = [Doc2, Doc1],
-%% Shards =
-%% mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
-%% GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
-%% Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
-%% dict:from_list([{Doc,[]} || Doc <- Docs])},
-%%
-%% {ok,{WaitingCount1,_,_,_,_}=Acc1} =
-%% handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
-%% ?assertEqual(WaitingCount1,2),
-%%
-%% {ok,{WaitingCount2,_,_,_,_}=Acc2} =
-%% handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
-%% ?assertEqual(WaitingCount2,1),
-%%
-%% {stop, Reply} =
-%% handle_message({rexi_EXIT, 1},lists:nth(3,Shards),Acc2),
-%%
-%% ?assertEqual({accepted, [{Doc1,{accepted,Doc2}}, {Doc2,{accepted,Doc1}}]},
-%% Reply).
-%%
-%% doc_update3() ->
-%% Doc1 = #doc{revs = {1,[<<"foo">>]}},
-%% Doc2 = #doc{revs = {1,[<<"bar">>]}},
-%% Docs = [Doc2, Doc1],
-%% Shards =
-%% mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
-%% GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
-%% Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
-%% dict:from_list([{Doc,[]} || Doc <- Docs])},
-%%
-%% {ok,{WaitingCount1,_,_,_,_}=Acc1} =
-%% handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
-%% ?assertEqual(WaitingCount1,2),
-%%
-%% {ok,{WaitingCount2,_,_,_,_}=Acc2} =
-%% handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
-%% ?assertEqual(WaitingCount2,1),
-%%
-%% {stop, Reply} =
-%% handle_message({ok, [{ok, Doc1},{ok, Doc2}]},lists:nth(3,Shards),Acc2),
-%%
-%% ?assertEqual({ok, [{Doc1, {ok, Doc2}},{Doc2, {ok,Doc1}}]},Reply).
-%%
-%% % needed for testing to avoid having to start the mem3 application
-%% group_docs_by_shard_hack(_DbName, Shards, Docs) ->
-%% dict:to_list(lists:foldl(fun(#doc{id=_Id} = Doc, D0) ->
-%% lists:foldl(fun(Shard, D1) ->
-%% dict:append(Shard, Doc, D1)
-%% end, D0, Shards)
-%% end, dict:new(), Docs)).
-%%
-%% -endif.
diff --git a/src/fabric/src/fabric_group_info.erl b/src/fabric/src/fabric_group_info.erl
deleted file mode 100644
index 91bdf01b0..000000000
--- a/src/fabric/src/fabric_group_info.erl
+++ /dev/null
@@ -1,139 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_group_info).
-
--export([go/2]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(DbName, GroupId) when is_binary(GroupId) ->
- {ok, DDoc} = fabric:open_doc(DbName, GroupId, [?ADMIN_CTX]),
- go(DbName, DDoc);
-
-go(DbName, #doc{id=DDocId}) ->
- Shards = mem3:shards(DbName),
- Ushards = mem3:ushards(DbName),
- Workers = fabric_util:submit_jobs(Shards, group_info, [DDocId]),
- RexiMon = fabric_util:create_monitors(Shards),
- USet = sets:from_list([{Id, N} || #shard{name = Id, node = N} <- Ushards]),
- Acc = {fabric_dict:init(Workers, nil), [], USet},
- try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc) of
- {timeout, {WorkersDict, _, _}} ->
- DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
- fabric_util:log_timeout(DefunctWorkers, "group_info"),
- {error, timeout};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _, {Counters, Resps, USet}) ->
- case fabric_ring:node_down(NodeRef, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps, USet}};
- error -> {error, {nodedown, <<"progress not possible">>}}
- end;
-
-handle_message({rexi_EXIT, Reason}, Shard, {Counters, Resps, USet}) ->
- case fabric_ring:handle_error(Shard, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps, USet}};
- error -> {error, Reason}
- end;
-
-handle_message({ok, Info}, Shard, {Counters, Resps, USet}) ->
- case fabric_ring:handle_response(Shard, Info, Counters, Resps) of
- {ok, {Counters1, Resps1}} ->
- {ok, {Counters1, Resps1, USet}};
- {stop, Resps1} ->
- {stop, build_final_response(USet, Resps1)}
- end;
-
-handle_message(Reason, Shard, {Counters, Resps, USet}) ->
- case fabric_ring:handle_error(Shard, Counters, Resps) of
- {ok, Counters1} -> {ok, {Counters1, Resps, USet}};
- error -> {error, Reason}
- end.
-
-
-build_final_response(USet, Responses) ->
- AccF = fabric_dict:fold(fun(#shard{name = Id, node = Node}, Info, Acc) ->
- IsPreferred = sets:is_element({Id, Node}, USet),
- dict:append(Id, {Node, IsPreferred, Info}, Acc)
- end, dict:new(), Responses),
- Pending = aggregate_pending(AccF),
- Infos = get_infos(AccF),
- [{updates_pending, {Pending}} | merge_results(Infos)].
-
-
-get_infos(Acc) ->
- Values = [V || {_, V} <- dict:to_list(Acc)],
- lists:flatten([Info || {_Node, _Pref, Info} <- lists:flatten(Values)]).
-
-aggregate_pending(Dict) ->
- {Preferred, Total, Minimum} =
- dict:fold(fun(_Name, Results, {P, T, M}) ->
- {Preferred, Total, Minimum} = calculate_pending(Results),
- {P + Preferred, T + Total, M + Minimum}
- end, {0, 0, 0}, Dict),
- [
- {minimum, Minimum},
- {preferred, Preferred},
- {total, Total}
- ].
-
-calculate_pending(Results) ->
- lists:foldl(fun
- ({_Node, true, Info}, {P, T, V}) ->
- Pending = couch_util:get_value(pending_updates, Info),
- {P + Pending, T + Pending, min(Pending, V)};
- ({_Node, false, Info}, {P, T, V}) ->
- Pending = couch_util:get_value(pending_updates, Info),
- {P, T + Pending, min(Pending, V)}
- end, {0, 0, infinity}, Results).
-
-merge_results(Info) ->
- Dict = lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end,
- orddict:new(), Info),
- orddict:fold(fun
- (signature, [X | _], Acc) ->
- [{signature, X} | Acc];
- (language, [X | _], Acc) ->
- [{language, X} | Acc];
- (sizes, X, Acc) ->
- [{sizes, {merge_object(X)}} | Acc];
- (compact_running, X, Acc) ->
- [{compact_running, lists:member(true, X)} | Acc];
- (updater_running, X, Acc) ->
- [{updater_running, lists:member(true, X)} | Acc];
- (waiting_commit, X, Acc) ->
- [{waiting_commit, lists:member(true, X)} | Acc];
- (waiting_clients, X, Acc) ->
- [{waiting_clients, lists:sum(X)} | Acc];
- (update_seq, X, Acc) ->
- [{update_seq, lists:sum(X)} | Acc];
- (purge_seq, X, Acc) ->
- [{purge_seq, lists:sum(X)} | Acc];
- (_, _, Acc) ->
- Acc
- end, [], Dict).
-
-merge_object(Objects) ->
- Dict = lists:foldl(fun({Props}, D) ->
- lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end, D, Props)
- end, orddict:new(), Objects),
- orddict:fold(fun
- (Key, X, Acc) ->
- [{Key, lists:sum(X)} | Acc]
- end, [], Dict).
diff --git a/src/fabric/src/fabric_ring.erl b/src/fabric/src/fabric_ring.erl
deleted file mode 100644
index 110edb9ab..000000000
--- a/src/fabric/src/fabric_ring.erl
+++ /dev/null
@@ -1,519 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_ring).
-
-
--export([
- is_progress_possible/1,
- is_progress_possible/2,
- get_shard_replacements/2,
- node_down/3,
- node_down/4,
- handle_error/3,
- handle_error/4,
- handle_response/4,
- handle_response/5
-]).
-
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-
--type fabric_dict() :: [{#shard{}, any()}].
--type ring_opts() :: [atom() | tuple()].
-
-
-%% @doc looks for a fully covered keyrange in the list of counters
--spec is_progress_possible(fabric_dict()) -> boolean().
-is_progress_possible(Counters) ->
- is_progress_possible(Counters, []).
-
-
-%% @doc looks for a fully covered keyrange in the list of counters
-%% This version take ring option to configure how progress will
-%% be checked. By default, [], checks that the full ring is covered.
--spec is_progress_possible(fabric_dict(), ring_opts()) -> boolean().
-is_progress_possible(Counters, RingOpts) ->
- is_progress_possible(Counters, [], 0, ?RING_END, RingOpts).
-
-
--spec get_shard_replacements(binary(), [#shard{}]) -> [#shard{}].
-get_shard_replacements(DbName, UsedShards0) ->
- % We only want to generate a replacements list from shards
- % that aren't already used.
- AllLiveShards = mem3:live_shards(DbName, [node() | nodes()]),
- UsedShards = [S#shard{ref=undefined} || S <- UsedShards0],
- get_shard_replacements_int(AllLiveShards -- UsedShards, UsedShards).
-
-
--spec node_down(node(), fabric_dict(), fabric_dict()) ->
- {ok, fabric_dict()} | error.
-node_down(Node, Workers, Responses) ->
- node_down(Node, Workers, Responses, []).
-
-
--spec node_down(node(), fabric_dict(), fabric_dict(), ring_opts()) ->
- {ok, fabric_dict()} | error.
-node_down(Node, Workers, Responses, RingOpts) ->
- {B, E} = range_bounds(Workers, Responses),
- Workers1 = fabric_dict:filter(fun(#shard{node = N}, _) ->
- N =/= Node
- end, Workers),
- case is_progress_possible(Workers1, Responses, B, E, RingOpts) of
- true -> {ok, Workers1};
- false -> error
- end.
-
-
--spec handle_error(#shard{}, fabric_dict(), fabric_dict()) ->
- {ok, fabric_dict()} | error.
-handle_error(Shard, Workers, Responses) ->
- handle_error(Shard, Workers, Responses, []).
-
-
--spec handle_error(#shard{}, fabric_dict(), fabric_dict(), ring_opts()) ->
- {ok, fabric_dict()} | error.
-handle_error(Shard, Workers, Responses, RingOpts) ->
- {B, E} = range_bounds(Workers, Responses),
- Workers1 = fabric_dict:erase(Shard, Workers),
- case is_progress_possible(Workers1, Responses, B, E, RingOpts) of
- true -> {ok, Workers1};
- false -> error
- end.
-
-
--spec handle_response(#shard{}, any(), fabric_dict(), fabric_dict()) ->
- {ok, {fabric_dict(), fabric_dict()}} | {stop, fabric_dict()}.
-handle_response(Shard, Response, Workers, Responses) ->
- handle_response(Shard, Response, Workers, Responses, []).
-
-
--spec handle_response(#shard{}, any(), fabric_dict(), fabric_dict(),
- ring_opts()) ->
- {ok, {fabric_dict(), fabric_dict()}} | {stop, fabric_dict()}.
-handle_response(Shard, Response, Workers, Responses, RingOpts) ->
- handle_response(Shard, Response, Workers, Responses, RingOpts,
- fun stop_workers/1).
-
-
-% Worker response handler. Gets reponses from shard and puts them in the list
-% until they complete a full ring. Then kill unused responses and remaining
-% workers.
-%
-% How a ring "completes" is driven by RingOpts:
-%
-% * When RingOpts is [] (the default case) responses must form a "clean"
-% ring, where all copies at the start of the range and end of the range must
-% have the same boundary values.
-%
-% * When RingOpts is [{any, [#shard{}]}] responses are accepted from any of
-% the provided list of shards. This type of ring might be used when querying
-% a partitioned database. As soon as a result from any of the shards
-% arrives, result collection stops.
-%
-handle_response(Shard, Response, Workers, Responses, RingOpts, CleanupCb) ->
- Workers1 = fabric_dict:erase(Shard, Workers),
- case RingOpts of
- [] ->
- #shard{range = [B, E]} = Shard,
- Responses1 = [{{B, E}, Shard, Response} | Responses],
- handle_response_ring(Workers1, Responses1, CleanupCb);
- [{any, Any}] ->
- handle_response_any(Shard, Response, Workers1, Any, CleanupCb)
- end.
-
-
-handle_response_ring(Workers, Responses, CleanupCb) ->
- {MinB, MaxE} = range_bounds(Workers, Responses),
- Ranges = lists:map(fun({R, _, _}) -> R end, Responses),
- case mem3_util:get_ring(Ranges, MinB, MaxE) of
- [] ->
- {ok, {Workers, Responses}};
- Ring ->
- % Return one response per range in the ring. The
- % response list is reversed before sorting so that the
- % first shard copy to reply is first. We use keysort
- % because it is documented as being stable so that
- % we keep the relative order of duplicate shards
- SortedResponses = lists:keysort(1, lists:reverse(Responses)),
- UsedResponses = get_responses(Ring, SortedResponses),
- % Kill all the remaining workers as well as the redunant responses
- stop_unused_workers(Workers, Responses, UsedResponses, CleanupCb),
- {stop, fabric_dict:from_list(UsedResponses)}
- end.
-
-
-handle_response_any(Shard, Response, Workers, Any, CleanupCb) ->
- case lists:member(Shard#shard{ref = undefined}, Any) of
- true ->
- stop_unused_workers(Workers, [], [], CleanupCb),
- {stop, fabric_dict:from_list([{Shard, Response}])};
- false ->
- {ok, {Workers, []}}
- end.
-
-
-% Check if workers still waiting and the already received responses could
-% still form a continous range. The range won't always be the full ring, and
-% the bounds are computed based on the minimum and maximum interval beginning
-% and ends.
-%
-% There is also a special case where even if the ring cannot be formed, but
-% there is an overlap between all the shards, then it's considered that
-% progress can still be made. This is essentially to allow for split
-% partitioned shards where one shard copy on a node was split the set of ranges
-% might look like: 00-ff, 00-ff, 07-ff. Even if both 00-ff workers exit,
-% progress can still be made with the remaining 07-ff copy.
-%
--spec is_progress_possible(fabric_dict(), [{any(), #shard{}, any()}],
- non_neg_integer(), non_neg_integer(), ring_opts()) -> boolean().
-is_progress_possible([], [], _, _, _) ->
- false;
-
-is_progress_possible(Counters, Responses, MinB, MaxE, []) ->
- ResponseRanges = lists:map(fun({{B, E}, _, _}) -> {B, E} end, Responses),
- Ranges = fabric_util:worker_ranges(Counters) ++ ResponseRanges,
- mem3_util:get_ring(Ranges, MinB, MaxE) =/= [];
-
-is_progress_possible(Counters, Responses, _, _, [{any, AnyShards}]) ->
- InAny = fun(S) -> lists:member(S#shard{ref = undefined}, AnyShards) end,
- case fabric_dict:filter(fun(S, _) -> InAny(S) end, Counters) of
- [] ->
- case lists:filter(fun({_, S, _}) -> InAny(S) end, Responses) of
- [] -> false;
- [_ | _] -> true
- end;
- [_ | _] ->
- true
- end.
-
-
-get_shard_replacements_int(UnusedShards, UsedShards) ->
- % If we have more than one copy of a range then we don't
- % want to try and add a replacement to any copy.
- RangeCounts = lists:foldl(fun(#shard{range=R}, Acc) ->
- dict:update_counter(R, 1, Acc)
- end, dict:new(), UsedShards),
-
- % For each seq shard range with a count of 1, find any
- % possible replacements from the unused shards. The
- % replacement list is keyed by range.
- lists:foldl(fun(#shard{range = [B, E] = Range}, Acc) ->
- case dict:find(Range, RangeCounts) of
- {ok, 1} ->
- Repls = mem3_util:non_overlapping_shards(UnusedShards, B, E),
- % Only keep non-empty lists of replacements
- if Repls == [] -> Acc; true ->
- [{Range, Repls} | Acc]
- end;
- _ ->
- Acc
- end
- end, [], UsedShards).
-
-
-range_bounds(Workers, Responses) ->
- RespRanges = lists:map(fun({R, _, _}) -> R end, Responses),
- Ranges = fabric_util:worker_ranges(Workers) ++ RespRanges,
- {Bs, Es} = lists:unzip(Ranges),
- {lists:min(Bs), lists:max(Es)}.
-
-
-get_responses([], _) ->
- [];
-
-get_responses([Range | Ranges], [{Range, Shard, Value} | Resps]) ->
- [{Shard, Value} | get_responses(Ranges, Resps)];
-
-get_responses(Ranges, [_DupeRangeResp | Resps]) ->
- get_responses(Ranges, Resps).
-
-
-stop_unused_workers(_, _, _, undefined) ->
- ok;
-
-stop_unused_workers(Workers, AllResponses, UsedResponses, CleanupCb) ->
- WorkerShards = [S || {S, _} <- Workers],
- Used = [S || {S, _} <- UsedResponses],
- Unused = [S || {_, S, _} <- AllResponses, not lists:member(S, Used)],
- CleanupCb(WorkerShards ++ Unused).
-
-
-stop_workers(Shards) when is_list(Shards) ->
- rexi:kill_all([{Node, Ref} || #shard{node = Node, ref = Ref} <- Shards]).
-
-
-% Unit tests
-
-is_progress_possible_full_range_test() ->
- % a base case
- ?assertEqual(false, is_progress_possible([], [], 0, 0, [])),
- T1 = [[0, ?RING_END]],
- ?assertEqual(true, is_progress_possible(mk_cnts(T1))),
- T2 = [[0, 10], [11, 20], [21, ?RING_END]],
- ?assertEqual(true, is_progress_possible(mk_cnts(T2))),
- % gap
- T3 = [[0, 10], [12, ?RING_END]],
- ?assertEqual(false, is_progress_possible(mk_cnts(T3))),
- % outside range
- T4 = [[1, 10], [11, 20], [21, ?RING_END]],
- ?assertEqual(false, is_progress_possible(mk_cnts(T4))),
- % outside range
- T5 = [[0, 10], [11, 20], [21, ?RING_END + 1]],
- ?assertEqual(false, is_progress_possible(mk_cnts(T5))),
- % possible progress but with backtracking
- T6 = [[0, 10], [11, 20], [0, 5], [6, 21], [21, ?RING_END]],
- ?assertEqual(true, is_progress_possible(mk_cnts(T6))),
- % not possible, overlap is not exact
- T7 = [[0, 10], [13, 20], [21, ?RING_END], [9, 12]],
- ?assertEqual(false, is_progress_possible(mk_cnts(T7))).
-
-
-is_progress_possible_with_responses_test() ->
- C1 = mk_cnts([[0, ?RING_END]]),
- ?assertEqual(true, is_progress_possible(C1, [], 0, ?RING_END, [])),
- % check for gaps
- C2 = mk_cnts([[5, 6], [7, 8]]),
- ?assertEqual(true, is_progress_possible(C2, [], 5, 8, [])),
- ?assertEqual(false, is_progress_possible(C2, [], 4, 8, [])),
- ?assertEqual(false, is_progress_possible(C2, [], 5, 7, [])),
- ?assertEqual(false, is_progress_possible(C2, [], 4, 9, [])),
- % check for uneven shard range copies
- C3 = mk_cnts([[2, 5], [2, 10]]),
- ?assertEqual(true, is_progress_possible(C3, [], 2, 10, [])),
- ?assertEqual(false, is_progress_possible(C3, [], 2, 11, [])),
- ?assertEqual(false, is_progress_possible(C3, [], 3, 10, [])),
- % they overlap but still not a proper ring
- C4 = mk_cnts([[2, 4], [3, 7], [6, 10]]),
- ?assertEqual(false, is_progress_possible(C4, [], 2, 10, [])),
- % some of the ranges are in responses
- RS1 = mk_resps([{"n1", 7, 8, 42}]),
- C5 = mk_cnts([[5, 6]]),
- ?assertEqual(true, is_progress_possible(C5, RS1, 5, 8, [])),
- ?assertEqual(false, is_progress_possible([], RS1, 5, 8, [])),
- ?assertEqual(true, is_progress_possible([], RS1, 7, 8, [])).
-
-
-is_progress_possible_with_ring_opts_test() ->
- Opts = [{any, [mk_shard("n1", [0, 5]), mk_shard("n2", [3, 10])]}],
- C1 = [{mk_shard("n1", [0, ?RING_END]), nil}],
- RS1 = mk_resps([{"n1", 3, 10, 42}]),
- ?assertEqual(false, is_progress_possible(C1, [], 0, ?RING_END, Opts)),
- ?assertEqual(false, is_progress_possible([], [], 0, ?RING_END, Opts)),
- ?assertEqual(false, is_progress_possible([], RS1, 0, ?RING_END, Opts)),
- % explicitly accept only the shard specified in the ring options
- ?assertEqual(false, is_progress_possible([], RS1, 3, 10, [{any, []}])),
- % need to match the node exactly
- ?assertEqual(false, is_progress_possible([], RS1, 3, 10, Opts)),
- RS2 = mk_resps([{"n2", 3, 10, 42}]),
- ?assertEqual(true, is_progress_possible([], RS2, 3, 10, Opts)),
- % assert that counters can fill the ring not just the response
- C2 = [{mk_shard("n1", [0, 5]), nil}],
- ?assertEqual(true, is_progress_possible(C2, [], 0, ?RING_END, Opts)).
-
-
-get_shard_replacements_test() ->
- Unused = [mk_shard(N, [B, E]) || {N, B, E} <- [
- {"n1", 11, 20}, {"n1", 21, ?RING_END},
- {"n2", 0, 4}, {"n2", 5, 10}, {"n2", 11, 20},
- {"n3", 0, 21, ?RING_END}
- ]],
- Used = [mk_shard(N, [B, E]) || {N, B, E} <- [
- {"n2", 21, ?RING_END},
- {"n3", 0, 10}, {"n3", 11, 20}
- ]],
- Res = lists:sort(get_shard_replacements_int(Unused, Used)),
- % Notice that [0, 10] range can be replaces by spawning the
- % [0, 4] and [5, 10] workers on n1
- Expect = [
- {[0, 10], [mk_shard("n2", [0, 4]), mk_shard("n2", [5, 10])]},
- {[11, 20], [mk_shard("n1", [11, 20]), mk_shard("n2", [11, 20])]},
- {[21, ?RING_END], [mk_shard("n1", [21, ?RING_END])]}
- ],
- ?assertEqual(Expect, Res).
-
-
-handle_response_basic_test() ->
- Shard1 = mk_shard("n1", [0, 1]),
- Shard2 = mk_shard("n1", [2, ?RING_END]),
-
- Workers1 = fabric_dict:init([Shard1, Shard2], nil),
-
- Result1 = handle_response(Shard1, 42, Workers1, [], [], undefined),
- ?assertMatch({ok, {_, _}}, Result1),
- {ok, {Workers2, Responses1}} = Result1,
- ?assertEqual(fabric_dict:erase(Shard1, Workers1), Workers2),
- ?assertEqual([{{0, 1}, Shard1, 42}], Responses1),
-
- Result2 = handle_response(Shard2, 43, Workers2, Responses1, [], undefined),
- ?assertEqual({stop, [{Shard1, 42}, {Shard2, 43}]}, Result2).
-
-
-handle_response_incomplete_ring_test() ->
- Shard1 = mk_shard("n1", [0, 1]),
- Shard2 = mk_shard("n1", [2, 10]),
-
- Workers1 = fabric_dict:init([Shard1, Shard2], nil),
-
- Result1 = handle_response(Shard1, 42, Workers1, [], [], undefined),
- ?assertMatch({ok, {_, _}}, Result1),
- {ok, {Workers2, Responses1}} = Result1,
- ?assertEqual(fabric_dict:erase(Shard1, Workers1), Workers2),
- ?assertEqual([{{0, 1}, Shard1, 42}], Responses1),
-
- Result2 = handle_response(Shard2, 43, Workers2, Responses1, [], undefined),
- ?assertEqual({stop, [{Shard1, 42}, {Shard2, 43}]}, Result2).
-
-
-handle_response_multiple_copies_test() ->
- Shard1 = mk_shard("n1", [0, 1]),
- Shard2 = mk_shard("n2", [0, 1]),
- Shard3 = mk_shard("n1", [2, ?RING_END]),
-
- Workers1 = fabric_dict:init([Shard1, Shard2, Shard3], nil),
-
- Result1 = handle_response(Shard1, 42, Workers1, [], [], undefined),
- ?assertMatch({ok, {_, _}}, Result1),
- {ok, {Workers2, Responses1}} = Result1,
-
- Result2 = handle_response(Shard2, 43, Workers2, Responses1, [], undefined),
- ?assertMatch({ok, {_, _}}, Result2),
- {ok, {Workers3, Responses2}} = Result2,
-
- Result3 = handle_response(Shard3, 44, Workers3, Responses2, [], undefined),
- % Use the value (42) to distinguish between [0, 1] copies. In reality
- % they should have the same value but here we need to assert that copy
- % that responded first is included in the ring.
- ?assertEqual({stop, [{Shard1, 42}, {Shard3, 44}]}, Result3).
-
-
-handle_response_backtracking_test() ->
- Shard1 = mk_shard("n1", [0, 5]),
- Shard2 = mk_shard("n1", [10, ?RING_END]),
- Shard3 = mk_shard("n2", [2, ?RING_END]),
- Shard4 = mk_shard("n3", [0, 1]),
-
- Workers1 = fabric_dict:init([Shard1, Shard2, Shard3, Shard4], nil),
-
- Result1 = handle_response(Shard1, 42, Workers1, [], [], undefined),
- ?assertMatch({ok, {_, _}}, Result1),
- {ok, {Workers2, Responses1}} = Result1,
-
- Result2 = handle_response(Shard2, 43, Workers2, Responses1, [], undefined),
- ?assertMatch({ok, {_, _}}, Result2),
- {ok, {Workers3, Responses2}} = Result2,
-
- Result3 = handle_response(Shard3, 44, Workers3, Responses2, [], undefined),
- ?assertMatch({ok, {_, _}}, Result3),
- {ok, {Workers4, Responses3}} = Result3,
-
- Result4 = handle_response(Shard4, 45, Workers4, Responses3, [], undefined),
- ?assertEqual({stop, [{Shard3, 44}, {Shard4, 45}]}, Result4).
-
-
-handle_response_ring_opts_test() ->
- Shard1 = mk_shard("n1", [0, 5]),
- Shard2 = mk_shard("n2", [0, 1]),
- Shard3 = mk_shard("n3", [0, 1]),
-
- Opts = [{any, [mk_shard("n3", [0, 1])]}],
-
- ShardList = [Shard1, Shard2, Shard3],
- WithRefs = [S#shard{ref = make_ref()} || S <- ShardList],
- Workers1 = fabric_dict:init(WithRefs, nil),
-
- Result1 = handle_response(Shard1, 42, Workers1, [], Opts, undefined),
- ?assertMatch({ok, {_, _}}, Result1),
- {ok, {Workers2, []}} = Result1,
-
- % Still waiting because the node doesn't match
- Result2 = handle_response(Shard2, 43, Workers2, [], Opts, undefined),
- ?assertMatch({ok, {_, _}}, Result2),
- {ok, {Workers3, []}} = Result2,
-
- Result3 = handle_response(Shard3, 44, Workers3, [], Opts, undefined),
- ?assertEqual({stop, [{Shard3, 44}]}, Result3).
-
-
-handle_error_test() ->
- Shard1 = mk_shard("n1", [0, 5]),
- Shard2 = mk_shard("n1", [10, ?RING_END]),
- Shard3 = mk_shard("n2", [2, ?RING_END]),
- Shard4 = mk_shard("n3", [0, 1]),
-
- Workers1 = fabric_dict:init([Shard1, Shard2, Shard3, Shard4], nil),
-
- Result1 = handle_response(Shard1, 42, Workers1, [], [], undefined),
- ?assertMatch({ok, {_, _}}, Result1),
- {ok, {Workers2, Responses1}} = Result1,
-
- Result2 = handle_error(Shard2, Workers2, Responses1),
- ?assertMatch({ok, _}, Result2),
- {ok, Workers3} = Result2,
- ?assertEqual(fabric_dict:erase(Shard2, Workers2), Workers3),
-
- Result3 = handle_response(Shard3, 44, Workers3, Responses1, [], undefined),
- ?assertMatch({ok, {_, _}}, Result3),
- {ok, {Workers4, Responses3}} = Result3,
- ?assertEqual(error, handle_error(Shard4, Workers4, Responses3)).
-
-
-node_down_test() ->
- Shard1 = mk_shard("n1", [0, 5]),
- Shard2 = mk_shard("n1", [10, ?RING_END]),
- Shard3 = mk_shard("n2", [2, ?RING_END]),
- Shard4 = mk_shard("n3", [0, 1]),
-
- Workers1 = fabric_dict:init([Shard1, Shard2, Shard3, Shard4], nil),
-
- Result1 = handle_response(Shard1, 42, Workers1, [], [], undefined),
- ?assertMatch({ok, {_, _}}, Result1),
- {ok, {Workers2, Responses1}} = Result1,
-
- Result2 = handle_response(Shard2, 43, Workers2, Responses1, [], undefined),
- ?assertMatch({ok, {_, _}}, Result2),
- {ok, {Workers3, Responses2}} = Result2,
-
- Result3 = node_down(n1, Workers3, Responses2),
- ?assertMatch({ok, _}, Result3),
- {ok, Workers4} = Result3,
- ?assertEqual([{Shard3, nil}, {Shard4, nil}], Workers4),
-
- Result4 = handle_response(Shard3, 44, Workers4, Responses2, [], undefined),
- ?assertMatch({ok, {_, _}}, Result4),
- {ok, {Workers5, Responses3}} = Result4,
-
- % Note: Shard3 was already processed, it's ok if n2 went down after
- ?assertEqual({ok, [{Shard4, nil}]}, node_down(n2, Workers5, Responses3)),
-
- ?assertEqual(error, node_down(n3, Workers5, Responses3)).
-
-
-mk_cnts(Ranges) ->
- Shards = lists:map(fun mk_shard/1, Ranges),
- fabric_dict:init([S#shard{ref = make_ref()} || S <- Shards], nil).
-
-
-mk_resps(RangeNameVals) ->
- [{{B, E}, mk_shard(Name, [B, E]), V} || {Name, B, E, V} <- RangeNameVals].
-
-
-mk_shard([B, E]) when is_integer(B), is_integer(E) ->
- #shard{range = [B, E]}.
-
-
-mk_shard(Name, Range) ->
- Node = list_to_atom(Name),
- BName = list_to_binary(Name),
- #shard{name = BName, node = Node, range = Range}.
diff --git a/src/fabric/src/fabric_rpc.erl b/src/fabric/src/fabric_rpc.erl
deleted file mode 100644
index 6fdc76595..000000000
--- a/src/fabric/src/fabric_rpc.erl
+++ /dev/null
@@ -1,664 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_rpc).
-
--export([get_db_info/1, get_doc_count/1, get_design_doc_count/1,
- get_update_seq/1]).
--export([open_doc/3, open_revs/4, get_doc_info/3, get_full_doc_info/3,
- get_missing_revs/2, get_missing_revs/3, update_docs/3]).
--export([all_docs/3, changes/3, map_view/4, reduce_view/4, group_info/2]).
--export([create_db/1, create_db/2, delete_db/1, reset_validation_funs/1,
- set_security/3, set_revs_limit/3, create_shard_db_doc/2,
- delete_shard_db_doc/2, get_partition_info/2]).
--export([get_all_security/2, open_shard/2]).
--export([compact/1, compact/2]).
--export([get_purge_seq/2, purge_docs/3, set_purge_infos_limit/3]).
-
--export([get_db_info/2, get_doc_count/2, get_design_doc_count/2,
- get_update_seq/2, changes/4, map_view/5, reduce_view/5,
- group_info/3, update_mrview/4]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-%% rpc endpoints
-%% call to with_db will supply your M:F with a Db instance
-%% and then remaining args
-
-%% @equiv changes(DbName, Args, StartSeq, [])
-changes(DbName, Args, StartSeq) ->
- changes(DbName, Args, StartSeq, []).
-
-changes(DbName, #changes_args{} = Args, StartSeq, DbOptions) ->
- changes(DbName, [Args], StartSeq, DbOptions);
-changes(DbName, Options, StartVector, DbOptions) ->
- set_io_priority(DbName, DbOptions),
- Args0 = lists:keyfind(changes_args, 1, Options),
- #changes_args{dir=Dir, filter_fun=Filter} = Args0,
- Args = case Filter of
- {fetch, custom, Style, Req, {DDocId, Rev}, FName} ->
- {ok, DDoc} = ddoc_cache:open_doc(mem3:dbname(DbName), DDocId, Rev),
- Args0#changes_args{
- filter_fun={custom, Style, Req, DDoc, FName}
- };
- {fetch, view, Style, {DDocId, Rev}, VName} ->
- {ok, DDoc} = ddoc_cache:open_doc(mem3:dbname(DbName), DDocId, Rev),
- Args0#changes_args{filter_fun={view, Style, DDoc, VName}};
- _ ->
- Args0
- end,
-
- DbOpenOptions = Args#changes_args.db_open_options ++ DbOptions,
- case get_or_create_db(DbName, DbOpenOptions) of
- {ok, Db} ->
- StartSeq = calculate_start_seq(Db, node(), StartVector),
- Enum = fun changes_enumerator/2,
- Opts = [{dir,Dir}],
- Acc0 = #fabric_changes_acc{
- db = Db,
- seq = StartSeq,
- args = Args,
- options = Options,
- pending = couch_db:count_changes_since(Db, StartSeq),
- epochs = couch_db:get_epochs(Db)
- },
- try
- {ok, #fabric_changes_acc{seq=LastSeq, pending=Pending, epochs=Epochs}} =
- do_changes(Db, StartSeq, Enum, Acc0, Opts),
- rexi:stream_last({complete, [
- {seq, {LastSeq, uuid(Db), couch_db:owner_of(Epochs, LastSeq)}},
- {pending, Pending}
- ]})
- after
- couch_db:close(Db)
- end;
- Error ->
- rexi:stream_last(Error)
- end.
-
-do_changes(Db, StartSeq, Enum, Acc0, Opts) ->
- #fabric_changes_acc {
- args = Args
- } = Acc0,
- #changes_args {
- filter = Filter
- } = Args,
- case Filter of
- "_doc_ids" ->
- % optimised code path, we’re looking up all doc_ids in the by-id instead of filtering
- % the entire by-seq tree to find the doc_ids one by one
- #changes_args {
- filter_fun = {doc_ids, Style, DocIds},
- dir = Dir
- } = Args,
- couch_changes:send_changes_doc_ids(Db, StartSeq, Dir, Enum, Acc0, {doc_ids, Style, DocIds});
- "_design_docs" ->
- % optimised code path, we’re looking up all design_docs in the by-id instead of
- % filtering the entire by-seq tree to find the design_docs one by one
- #changes_args {
- filter_fun = {design_docs, Style},
- dir = Dir
- } = Args,
- couch_changes:send_changes_design_docs(Db, StartSeq, Dir, Enum, Acc0, {design_docs, Style});
- _ ->
- couch_db:fold_changes(Db, StartSeq, Enum, Acc0, Opts)
- end.
-
-all_docs(DbName, Options, Args0) ->
- case fabric_util:upgrade_mrargs(Args0) of
- #mrargs{keys=undefined} = Args ->
- set_io_priority(DbName, Options),
- {ok, Db} = get_or_create_db(DbName, Options),
- CB = get_view_cb(Args),
- couch_mrview:query_all_docs(Db, Args, CB, Args)
- end.
-
-update_mrview(DbName, {DDocId, Rev}, ViewName, Args0) ->
- {ok, DDoc} = ddoc_cache:open_doc(mem3:dbname(DbName), DDocId, Rev),
- couch_util:with_db(DbName, fun(Db) ->
- UpdateSeq = couch_db:get_update_seq(Db),
- {ok, Pid, _} = couch_mrview:get_view_index_pid(
- Db, DDoc, ViewName, fabric_util:upgrade_mrargs(Args0)),
- couch_index:get_state(Pid, UpdateSeq)
- end).
-
-%% @equiv map_view(DbName, DDoc, ViewName, Args0, [])
-map_view(DbName, DDocInfo, ViewName, Args0) ->
- map_view(DbName, DDocInfo, ViewName, Args0, []).
-
-map_view(DbName, {DDocId, Rev}, ViewName, Args0, DbOptions) ->
- {ok, DDoc} = ddoc_cache:open_doc(mem3:dbname(DbName), DDocId, Rev),
- map_view(DbName, DDoc, ViewName, Args0, DbOptions);
-map_view(DbName, DDoc, ViewName, Args0, DbOptions) ->
- set_io_priority(DbName, DbOptions),
- Args = fabric_util:upgrade_mrargs(Args0),
- {ok, Db} = get_or_create_db(DbName, DbOptions),
- CB = get_view_cb(Args),
- couch_mrview:query_view(Db, DDoc, ViewName, Args, CB, Args).
-
-%% @equiv reduce_view(DbName, DDoc, ViewName, Args0)
-reduce_view(DbName, DDocInfo, ViewName, Args0) ->
- reduce_view(DbName, DDocInfo, ViewName, Args0, []).
-
-reduce_view(DbName, {DDocId, Rev}, ViewName, Args0, DbOptions) ->
- {ok, DDoc} = ddoc_cache:open_doc(mem3:dbname(DbName), DDocId, Rev),
- reduce_view(DbName, DDoc, ViewName, Args0, DbOptions);
-reduce_view(DbName, DDoc, ViewName, Args0, DbOptions) ->
- set_io_priority(DbName, DbOptions),
- Args = fabric_util:upgrade_mrargs(Args0),
- {ok, Db} = get_or_create_db(DbName, DbOptions),
- VAcc0 = #vacc{db=Db},
- couch_mrview:query_view(Db, DDoc, ViewName, Args, fun reduce_cb/2, VAcc0).
-
-create_db(DbName) ->
- create_db(DbName, []).
-
-create_db(DbName, Options) ->
- rexi:reply(case couch_server:create(DbName, Options) of
- {ok, _} ->
- ok;
- Error ->
- Error
- end).
-
-create_shard_db_doc(_, Doc) ->
- rexi:reply(mem3_util:write_db_doc(Doc)).
-
-delete_db(DbName) ->
- couch_server:delete(DbName, []).
-
-delete_shard_db_doc(_, DocId) ->
- rexi:reply(mem3_util:delete_db_doc(DocId)).
-
-%% @equiv get_db_info(DbName, [])
-get_db_info(DbName) ->
- get_db_info(DbName, []).
-
-get_db_info(DbName, DbOptions) ->
- with_db(DbName, DbOptions, {couch_db, get_db_info, []}).
-
-get_partition_info(DbName, Partition) ->
- with_db(DbName, [], {couch_db, get_partition_info, [Partition]}).
-
-%% equiv get_doc_count(DbName, [])
-get_doc_count(DbName) ->
- get_doc_count(DbName, []).
-
-get_doc_count(DbName, DbOptions) ->
- with_db(DbName, DbOptions, {couch_db, get_doc_count, []}).
-
-%% equiv get_design_doc_count(DbName, [])
-get_design_doc_count(DbName) ->
- get_design_doc_count(DbName, []).
-
-get_design_doc_count(DbName, DbOptions) ->
- with_db(DbName, DbOptions, {couch_db, get_design_doc_count, []}).
-
-%% equiv get_update_seq(DbName, [])
-get_update_seq(DbName) ->
- get_update_seq(DbName, []).
-
-get_update_seq(DbName, DbOptions) ->
- with_db(DbName, DbOptions, {couch_db, get_update_seq, []}).
-
-set_security(DbName, SecObj, Options0) ->
- Options = case lists:keyfind(io_priority, 1, Options0) of
- false ->
- [{io_priority, {db_meta, security}}|Options0];
- _ ->
- Options0
- end,
- with_db(DbName, Options, {couch_db, set_security, [SecObj]}).
-
-get_all_security(DbName, Options) ->
- with_db(DbName, Options, {couch_db, get_security, []}).
-
-set_revs_limit(DbName, Limit, Options) ->
- with_db(DbName, Options, {couch_db, set_revs_limit, [Limit]}).
-
-set_purge_infos_limit(DbName, Limit, Options) ->
- with_db(DbName, Options, {couch_db, set_purge_infos_limit, [Limit]}).
-
-open_doc(DbName, DocId, Options) ->
- with_db(DbName, Options, {couch_db, open_doc, [DocId, Options]}).
-
-open_revs(DbName, Id, Revs, Options) ->
- with_db(DbName, Options, {couch_db, open_doc_revs, [Id, Revs, Options]}).
-
-get_full_doc_info(DbName, DocId, Options) ->
- with_db(DbName, Options, {couch_db, get_full_doc_info, [DocId]}).
-
-get_doc_info(DbName, DocId, Options) ->
- with_db(DbName, Options, {couch_db, get_doc_info, [DocId]}).
-
-get_missing_revs(DbName, IdRevsList) ->
- get_missing_revs(DbName, IdRevsList, []).
-
-get_missing_revs(DbName, IdRevsList, Options) ->
- % reimplement here so we get [] for Ids with no missing revs in response
- set_io_priority(DbName, Options),
- rexi:reply(case get_or_create_db(DbName, Options) of
- {ok, Db} ->
- Ids = [Id1 || {Id1, _Revs} <- IdRevsList],
- {ok, lists:zipwith(fun({Id, Revs}, FullDocInfoResult) ->
- case FullDocInfoResult of
- #full_doc_info{rev_tree=RevisionTree} = FullInfo ->
- MissingRevs = couch_key_tree:find_missing(RevisionTree, Revs),
- {Id, MissingRevs, possible_ancestors(FullInfo, MissingRevs)};
- not_found ->
- {Id, Revs, []}
- end
- end, IdRevsList, couch_db:get_full_doc_infos(Db, Ids))};
- Error ->
- Error
- end).
-
-update_docs(DbName, Docs0, Options) ->
- {Docs1, Type} = case couch_util:get_value(read_repair, Options) of
- NodeRevs when is_list(NodeRevs) ->
- Filtered = read_repair_filter(DbName, Docs0, NodeRevs, Options),
- {Filtered, replicated_changes};
- undefined ->
- X = case proplists:get_value(replicated_changes, Options) of
- true -> replicated_changes;
- _ -> interactive_edit
- end,
- {Docs0, X}
- end,
- Docs2 = make_att_readers(Docs1),
- with_db(DbName, Options, {couch_db, update_docs, [Docs2, Options, Type]}).
-
-
-get_purge_seq(DbName, Options) ->
- with_db(DbName, Options, {couch_db, get_purge_seq, []}).
-
-purge_docs(DbName, UUIdsIdsRevs, Options) ->
- with_db(DbName, Options, {couch_db, purge_docs, [UUIdsIdsRevs, Options]}).
-
-%% @equiv group_info(DbName, DDocId, [])
-group_info(DbName, DDocId) ->
- group_info(DbName, DDocId, []).
-
-group_info(DbName, DDocId, DbOptions) ->
- with_db(DbName, DbOptions, {couch_mrview, get_info, [DDocId]}).
-
-reset_validation_funs(DbName) ->
- case get_or_create_db(DbName, []) of
- {ok, Db} ->
- couch_db:reload_validation_funs(Db);
- _ ->
- ok
- end.
-
-open_shard(Name, Opts) ->
- set_io_priority(Name, Opts),
- try
- rexi:reply(couch_db:open(Name, Opts))
- catch exit:{timeout, _} ->
- couch_stats:increment_counter([fabric, open_shard, timeouts])
- end.
-
-compact(DbName) ->
- with_db(DbName, [], {couch_db, start_compact, []}).
-
-compact(ShardName, DesignName) ->
- {ok, Pid} = couch_index_server:get_index(
- couch_mrview_index, ShardName, <<"_design/", DesignName/binary>>),
- Ref = erlang:make_ref(),
- Pid ! {'$gen_call', {self(), Ref}, compact}.
-
-%%
-%% internal
-%%
-
-with_db(DbName, Options, {M,F,A}) ->
- set_io_priority(DbName, Options),
- case get_or_create_db(DbName, Options) of
- {ok, Db} ->
- rexi:reply(try
- apply(M, F, [Db | A])
- catch Exception ->
- Exception;
- error:Reason ->
- couch_log:error("rpc ~p:~p/~p ~p ~p", [M, F, length(A)+1, Reason,
- clean_stack()]),
- {error, Reason}
- end);
- Error ->
- rexi:reply(Error)
- end.
-
-
-read_repair_filter(DbName, Docs, NodeRevs, Options) ->
- set_io_priority(DbName, Options),
- case get_or_create_db(DbName, Options) of
- {ok, Db} ->
- try
- read_repair_filter(Db, Docs, NodeRevs)
- after
- couch_db:close(Db)
- end;
- Error ->
- rexi:reply(Error)
- end.
-
-
-% A read repair operation may have been triggered by a node
-% that was out of sync with the local node. Thus, any time
-% we receive a read repair request we need to check if we
-% may have recently purged any of the given revisions and
-% ignore them if so.
-%
-% This is accomplished by looking at the purge infos that we
-% have locally that have not been replicated to the remote
-% node. The logic here is that we may have received the purge
-% request before the remote shard copy. So to check that we
-% need to look at the purge infos that we have locally but
-% have not yet sent to the remote copy.
-%
-% NodeRevs is a list of the {node(), [rev()]} tuples passed
-% as the read_repair option to update_docs.
-read_repair_filter(Db, Docs, NodeRevs) ->
- [#doc{id = DocId} | _] = Docs,
- NonLocalNodeRevs = [NR || {N, _} = NR <- NodeRevs, N /= node()],
- Nodes = lists:usort([Node || {Node, _} <- NonLocalNodeRevs]),
- NodeSeqs = get_node_seqs(Db, Nodes),
-
- DbPSeq = couch_db:get_purge_seq(Db),
- Lag = config:get_integer("couchdb", "read_repair_lag", 100),
-
- % Filter out read-repair updates from any node that is
- % so out of date that it would force us to scan a large
- % number of purge infos
- NodeFiltFun = fun({Node, _Revs}) ->
- {Node, NodeSeq} = lists:keyfind(Node, 1, NodeSeqs),
- NodeSeq >= DbPSeq - Lag
- end,
- RecentNodeRevs = lists:filter(NodeFiltFun, NonLocalNodeRevs),
-
- % For each node we scan the purge infos to filter out any
- % revisions that have been locally purged since we last
- % replicated to the remote node's shard copy.
- AllowableRevs = lists:foldl(fun({Node, Revs}, RevAcc) ->
- {Node, StartSeq} = lists:keyfind(Node, 1, NodeSeqs),
- FoldFun = fun({_PSeq, _UUID, PDocId, PRevs}, InnerAcc) ->
- if PDocId /= DocId -> {ok, InnerAcc}; true ->
- {ok, InnerAcc -- PRevs}
- end
- end,
- {ok, FiltRevs} = couch_db:fold_purge_infos(Db, StartSeq, FoldFun, Revs),
- lists:usort(FiltRevs ++ RevAcc)
- end, [], RecentNodeRevs),
-
- % Finally, filter the doc updates to only include revisions
- % that have not been purged locally.
- DocFiltFun = fun(#doc{revs = {Pos, [Rev | _]}}) ->
- lists:member({Pos, Rev}, AllowableRevs)
- end,
- lists:filter(DocFiltFun, Docs).
-
-
-get_node_seqs(Db, Nodes) ->
- % Gather the list of {Node, PurgeSeq} pairs for all nodes
- % that are present in our read repair group
- FoldFun = fun(#doc{id = Id, body = {Props}}, Acc) ->
- case Id of
- <<?LOCAL_DOC_PREFIX, "purge-mem3-", _/binary>> ->
- TgtNode = couch_util:get_value(<<"target_node">>, Props),
- PurgeSeq = couch_util:get_value(<<"purge_seq">>, Props),
- case lists:keyfind(TgtNode, 1, Acc) of
- {_, OldSeq} ->
- NewSeq = erlang:max(OldSeq, PurgeSeq),
- NewEntry = {TgtNode, NewSeq},
- NewAcc = lists:keyreplace(TgtNode, 1, Acc, NewEntry),
- {ok, NewAcc};
- false ->
- {ok, Acc}
- end;
- _ ->
- % We've processed all _local mem3 purge docs
- {stop, Acc}
- end
- end,
- InitAcc = [{list_to_binary(atom_to_list(Node)), 0} || Node <- Nodes],
- Opts = [{start_key, <<?LOCAL_DOC_PREFIX, "purge-mem3-">>}],
- {ok, NodeBinSeqs} = couch_db:fold_local_docs(Db, FoldFun, InitAcc, Opts),
- [{list_to_existing_atom(binary_to_list(N)), S} || {N, S} <- NodeBinSeqs].
-
-
-
-get_or_create_db(DbName, Options) ->
- mem3_util:get_or_create_db(DbName, Options).
-
-
-get_view_cb(#mrargs{extra = Options}) ->
- case couch_util:get_value(callback, Options) of
- {Mod, Fun} when is_atom(Mod), is_atom(Fun) ->
- fun Mod:Fun/2;
- _ ->
- fun view_cb/2
- end;
-get_view_cb(_) ->
- fun view_cb/2.
-
-
-view_cb({meta, Meta}, Acc) ->
- % Map function starting
- ok = rexi:stream2({meta, Meta}),
- {ok, Acc};
-view_cb({row, Row}, Acc) ->
- % Adding another row
- ViewRow = #view_row{
- id = couch_util:get_value(id, Row),
- key = couch_util:get_value(key, Row),
- value = couch_util:get_value(value, Row),
- doc = couch_util:get_value(doc, Row)
- },
- ok = rexi:stream2(ViewRow),
- {ok, Acc};
-view_cb(complete, Acc) ->
- % Finish view output
- ok = rexi:stream_last(complete),
- {ok, Acc};
-view_cb(ok, ddoc_updated) ->
- rexi:reply({ok, ddoc_updated}).
-
-
-reduce_cb({meta, Meta}, Acc) ->
- % Map function starting
- ok = rexi:stream2({meta, Meta}),
- {ok, Acc};
-reduce_cb({row, Row}, Acc) ->
- % Adding another row
- ok = rexi:stream2(#view_row{
- key = couch_util:get_value(key, Row),
- value = couch_util:get_value(value, Row)
- }),
- {ok, Acc};
-reduce_cb(complete, Acc) ->
- % Finish view output
- ok = rexi:stream_last(complete),
- {ok, Acc};
-reduce_cb(ok, ddoc_updated) ->
- rexi:reply({ok, ddoc_updated}).
-
-
-changes_enumerator(#full_doc_info{} = FDI, Acc) ->
- changes_enumerator(couch_doc:to_doc_info(FDI), Acc);
-changes_enumerator(#doc_info{id= <<"_local/", _/binary>>, high_seq=Seq}, Acc) ->
- {ok, Acc#fabric_changes_acc{seq = Seq, pending = Acc#fabric_changes_acc.pending-1}};
-changes_enumerator(DocInfo, Acc) ->
- #fabric_changes_acc{
- db = Db,
- args = #changes_args{
- include_docs = IncludeDocs,
- conflicts = Conflicts,
- filter_fun = Filter,
- doc_options = DocOptions
- },
- pending = Pending,
- epochs = Epochs
- } = Acc,
- #doc_info{id=Id, high_seq=Seq, revs=[#rev_info{deleted=Del}|_]} = DocInfo,
- case [X || X <- couch_changes:filter(Db, DocInfo, Filter), X /= null] of
- [] ->
- ChangesRow = {no_pass, [
- {pending, Pending-1},
- {seq, {Seq, uuid(Db), couch_db:owner_of(Epochs, Seq)}}
- ]};
- Results ->
- Opts = if Conflicts -> [conflicts | DocOptions]; true -> DocOptions end,
- ChangesRow = {change, [
- {pending, Pending-1},
- {seq, {Seq, uuid(Db), couch_db:owner_of(Epochs, Seq)}},
- {id, Id},
- {changes, Results},
- {deleted, Del} |
- if IncludeDocs -> [doc_member(Db, DocInfo, Opts, Filter)]; true -> [] end
- ]}
- end,
- ok = rexi:stream2(ChangesRow),
- {ok, Acc#fabric_changes_acc{seq = Seq, pending = Pending-1}}.
-
-doc_member(Shard, DocInfo, Opts, Filter) ->
- case couch_db:open_doc(Shard, DocInfo, [deleted | Opts]) of
- {ok, Doc} ->
- {doc, maybe_filtered_json_doc(Doc, Opts, Filter)};
- Error ->
- Error
- end.
-
-maybe_filtered_json_doc(Doc, Opts, {selector, _Style, {_Selector, Fields}})
- when Fields =/= nil ->
- mango_fields:extract(couch_doc:to_json_obj(Doc, Opts), Fields);
-maybe_filtered_json_doc(Doc, Opts, _Filter) ->
- couch_doc:to_json_obj(Doc, Opts).
-
-
-possible_ancestors(_FullInfo, []) ->
- [];
-possible_ancestors(FullInfo, MissingRevs) ->
- #doc_info{revs=RevsInfo} = couch_doc:to_doc_info(FullInfo),
- LeafRevs = [Rev || #rev_info{rev=Rev} <- RevsInfo],
- % Find the revs that are possible parents of this rev
- lists:foldl(fun({LeafPos, LeafRevId}, Acc) ->
- % this leaf is a "possible ancenstor" of the missing
- % revs if this LeafPos lessthan any of the missing revs
- case lists:any(fun({MissingPos, _}) ->
- LeafPos < MissingPos end, MissingRevs) of
- true ->
- [{LeafPos, LeafRevId} | Acc];
- false ->
- Acc
- end
- end, [], LeafRevs).
-
-make_att_readers([]) ->
- [];
-make_att_readers([#doc{atts=Atts0} = Doc | Rest]) ->
- % % go through the attachments looking for 'follows' in the data,
- % % replace with function that reads the data from MIME stream.
- Atts = [couch_att:transform(data, fun make_att_reader/1, Att) || Att <- Atts0],
- [Doc#doc{atts = Atts} | make_att_readers(Rest)].
-
-make_att_reader({follows, Parser, Ref}) ->
- fun() ->
- ParserRef = case get(mp_parser_ref) of
- undefined ->
- PRef = erlang:monitor(process, Parser),
- put(mp_parser_ref, PRef),
- PRef;
- Else ->
- Else
- end,
- Parser ! {get_bytes, Ref, self()},
- receive
- {bytes, Ref, Bytes} ->
- rexi:reply(attachment_chunk_received),
- Bytes;
- {'DOWN', ParserRef, _, _, Reason} ->
- throw({mp_parser_died, Reason})
- end
- end;
-make_att_reader({fabric_attachment_receiver, Middleman, Length}) ->
- fabric_doc_atts:receiver_callback(Middleman, Length);
-make_att_reader(Else) ->
- Else.
-
-clean_stack() ->
- lists:map(fun({M,F,A}) when is_list(A) -> {M,F,length(A)}; (X) -> X end,
- erlang:get_stacktrace()).
-
-set_io_priority(DbName, Options) ->
- case lists:keyfind(io_priority, 1, Options) of
- {io_priority, Pri} ->
- erlang:put(io_priority, Pri);
- false ->
- erlang:put(io_priority, {interactive, DbName})
- end,
- case erlang:get(io_priority) of
- {interactive, _} ->
- case config:get("couchdb", "maintenance_mode", "false") of
- "true" ->
- % Done to silence error logging by rexi_server
- rexi:reply({rexi_EXIT, {maintenance_mode, node()}}),
- exit(normal);
- _ ->
- ok
- end;
- _ ->
- ok
- end.
-
-
-calculate_start_seq(Db, Node, Seq) ->
- case couch_db:calculate_start_seq(Db, Node, Seq) of
- N when is_integer(N) ->
- N;
- {replace, OriginalNode, Uuid, OriginalSeq} ->
- %% Scan history looking for an entry with
- %% * target_node == TargetNode
- %% * target_uuid == TargetUUID
- %% * target_seq =< TargetSeq
- %% If such an entry is found, stream from associated source_seq
- mem3_rep:find_source_seq(Db, OriginalNode, Uuid, OriginalSeq)
- end.
-
-
-uuid(Db) ->
- Uuid = couch_db:get_uuid(Db),
- binary:part(Uuid, {0, uuid_prefix_len()}).
-
-uuid_prefix_len() ->
- list_to_integer(config:get("fabric", "uuid_prefix_len", "7")).
-
-%% -ifdef(TEST).
-%% -include_lib("eunit/include/eunit.hrl").
-%%
-%% maybe_filtered_json_doc_no_filter_test() ->
-%% Body = {[{<<"a">>, 1}]},
-%% Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body},
-%% {JDocProps} = maybe_filtered_json_doc(Doc, [], x),
-%% ExpectedProps = [{<<"_id">>, <<"1">>}, {<<"_rev">>, <<"1-r1">>}, {<<"a">>, 1}],
-%% ?assertEqual(lists:keysort(1, JDocProps), ExpectedProps).
-%%
-%% maybe_filtered_json_doc_with_filter_test() ->
-%% Body = {[{<<"a">>, 1}]},
-%% Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body},
-%% Fields = [<<"a">>, <<"nonexistent">>],
-%% Filter = {selector, main_only, {some_selector, Fields}},
-%% {JDocProps} = maybe_filtered_json_doc(Doc, [], Filter),
-%% ?assertEqual(JDocProps, [{<<"a">>, 1}]).
-%%
-%% -endif.
diff --git a/src/fabric/src/fabric_streams.erl b/src/fabric/src/fabric_streams.erl
deleted file mode 100644
index 98e285081..000000000
--- a/src/fabric/src/fabric_streams.erl
+++ /dev/null
@@ -1,274 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_streams).
-
--export([
- start/2,
- start/3,
- start/4,
- start/5,
- cleanup/1
-]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-
--define(WORKER_CLEANER, fabric_worker_cleaner).
-
-
-start(Workers, Keypos) ->
- start(Workers, Keypos, undefined, undefined).
-
-
-start(Workers, Keypos, RingOpts) ->
- start(Workers, Keypos, undefined, undefined, RingOpts).
-
-
-start(Workers, Keypos, StartFun, Replacements) ->
- start(Workers, Keypos, StartFun, Replacements, []).
-
-
-start(Workers0, Keypos, StartFun, Replacements, RingOpts) ->
- Fun = fun handle_stream_start/3,
- Acc = #stream_acc{
- workers = fabric_dict:init(Workers0, waiting),
- ready = [],
- start_fun = StartFun,
- replacements = Replacements,
- ring_opts = RingOpts
- },
- spawn_worker_cleaner(self(), Workers0),
- Timeout = fabric_util:request_timeout(),
- case rexi_utils:recv(Workers0, Keypos, Fun, Acc, Timeout, infinity) of
- {ok, #stream_acc{ready = Workers}} ->
- AckedWorkers = fabric_dict:fold(fun(Worker, From, WorkerAcc) ->
- rexi:stream_start(From),
- [Worker | WorkerAcc]
- end, [], Workers),
- {ok, AckedWorkers};
- Else ->
- Else
- end.
-
-
-cleanup(Workers) ->
- % Stop the auxiliary cleaner process as we got to the point where cleanup
- % happesn in the regular fashion so we don't want to send 2x the number kill
- % messages
- case get(?WORKER_CLEANER) of
- CleanerPid when is_pid(CleanerPid) ->
- erase(?WORKER_CLEANER),
- exit(CleanerPid, kill);
- _ ->
- ok
- end,
- fabric_util:cleanup(Workers).
-
-
-handle_stream_start({rexi_DOWN, _, {_, NodeRef}, _}, _, St) ->
- #stream_acc{workers = Workers, ready = Ready, ring_opts = RingOpts} = St,
- case fabric_ring:node_down(NodeRef, Workers, Ready, RingOpts) of
- {ok, Workers1} ->
- {ok, St#stream_acc{workers = Workers1}};
- error ->
- {error, {nodedown, <<"progress not possible">>}}
- end;
-
-handle_stream_start({rexi_EXIT, Reason}, Worker, St) ->
- #stream_acc{
- workers = Workers,
- ready = Ready,
- replacements = Replacements,
- ring_opts = RingOpts
- } = St,
- case {fabric_ring:handle_error(Worker, Workers, Ready, RingOpts), Reason} of
- {{ok, Workers1}, _Reason} ->
- {ok, St#stream_acc{workers = Workers1}};
- {error, {maintenance_mode, _Node}} when Replacements /= undefined ->
- % Check if we have replacements for this range
- % and start the new workers if so.
- case lists:keytake(Worker#shard.range, 1, Replacements) of
- {value, {_Range, WorkerReplacements}, NewReplacements} ->
- FinalWorkers = lists:foldl(fun(Repl, NewWorkers) ->
- NewWorker = (St#stream_acc.start_fun)(Repl),
- add_worker_to_cleaner(self(), NewWorker),
- fabric_dict:store(NewWorker, waiting, NewWorkers)
- end, Workers, WorkerReplacements),
- % Assert that our replaced worker provides us
- % the oppurtunity to make progress. Need to make sure
- % to include already processed responses, since we are
- % checking the full range and some workers have already
- % responded and were removed from the workers list
- ReadyWorkers = [{W, R} || {_, W, R} <- Ready],
- AllWorkers = FinalWorkers ++ ReadyWorkers,
- true = fabric_ring:is_progress_possible(AllWorkers),
- NewRefs = fabric_dict:fetch_keys(FinalWorkers),
- {new_refs, NewRefs, St#stream_acc{
- workers=FinalWorkers,
- replacements=NewReplacements
- }};
- false ->
- % If we progress isn't possible and we don't have any
- % replacements then we're dead in the water.
- {error, {nodedown, <<"progress not possible">>}}
- end;
- {error, _} ->
- {error, fabric_util:error_info(Reason)}
- end;
-
-handle_stream_start(rexi_STREAM_INIT, {Worker, From}, St) ->
- #stream_acc{workers = Workers, ready = Ready, ring_opts = RingOpts} = St,
- case fabric_dict:lookup_element(Worker, Workers) of
- undefined ->
- % This worker lost the race with other partition copies, terminate
- rexi:stream_cancel(From),
- {ok, St};
- waiting ->
- case fabric_ring:handle_response(Worker, From, Workers, Ready, RingOpts) of
- {ok, {Workers1, Ready1}} ->
- % Don't have a full ring yet. Keep getting responses
- {ok, St#stream_acc{workers = Workers1, ready = Ready1}};
- {stop, Ready1} ->
- % Have a full ring of workers. But don't ack the worker
- % yet so they don't start sending us rows until we're ready
- {stop, St#stream_acc{workers = [], ready = Ready1}}
- end
- end;
-
-handle_stream_start({ok, ddoc_updated}, _, St) ->
- WaitingWorkers = [W || {W, _} <- St#stream_acc.workers],
- ReadyWorkers = [W || {W, _} <- St#stream_acc.ready],
- cleanup(WaitingWorkers ++ ReadyWorkers),
- {stop, ddoc_updated};
-
-handle_stream_start(Else, _, _) ->
- exit({invalid_stream_start, Else}).
-
-
-% Spawn an auxiliary rexi worker cleaner. This will be used in cases
-% when the coordinator (request) process is forceably killed and doesn't
-% get a chance to process its `after` fabric:clean/1 clause.
-spawn_worker_cleaner(Coordinator, Workers) ->
- case get(?WORKER_CLEANER) of
- undefined ->
- Pid = spawn(fun() ->
- erlang:monitor(process, Coordinator),
- cleaner_loop(Coordinator, Workers)
- end),
- put(?WORKER_CLEANER, Pid),
- Pid;
- ExistingCleaner ->
- ExistingCleaner
- end.
-
-
-cleaner_loop(Pid, Workers) ->
- receive
- {add_worker, Pid, Worker} ->
- cleaner_loop(Pid, [Worker | Workers]);
- {'DOWN', _, _, Pid, _} ->
- fabric_util:cleanup(Workers)
- end.
-
-
-add_worker_to_cleaner(CoordinatorPid, Worker) ->
- case get(?WORKER_CLEANER) of
- CleanerPid when is_pid(CleanerPid) ->
- CleanerPid ! {add_worker, CoordinatorPid, Worker};
- _ ->
- ok
- end.
-
-
-
-
-%% -ifdef(TEST).
-%%
-%% -include_lib("eunit/include/eunit.hrl").
-%%
-%% worker_cleaner_test_() ->
-%% {
-%% "Fabric spawn_worker_cleaner test", {
-%% setup, fun setup/0, fun teardown/1,
-%% fun(_) -> [
-%% should_clean_workers(),
-%% does_not_fire_if_cleanup_called(),
-%% should_clean_additional_worker_too()
-%% ] end
-%% }
-%% }.
-%%
-%%
-%% should_clean_workers() ->
-%% ?_test(begin
-%% meck:reset(rexi),
-%% erase(?WORKER_CLEANER),
-%% Workers = [
-%% #shard{node = 'n1', ref = make_ref()},
-%% #shard{node = 'n2', ref = make_ref()}
-%% ],
-%% {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
-%% Cleaner = spawn_worker_cleaner(Coord, Workers),
-%% Ref = erlang:monitor(process, Cleaner),
-%% Coord ! die,
-%% receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
-%% ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
-%% end).
-%%
-%%
-%% does_not_fire_if_cleanup_called() ->
-%% ?_test(begin
-%% meck:reset(rexi),
-%% erase(?WORKER_CLEANER),
-%% Workers = [
-%% #shard{node = 'n1', ref = make_ref()},
-%% #shard{node = 'n2', ref = make_ref()}
-%% ],
-%% {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
-%% Cleaner = spawn_worker_cleaner(Coord, Workers),
-%% Ref = erlang:monitor(process, Cleaner),
-%% cleanup(Workers),
-%% Coord ! die,
-%% receive {'DOWN', Ref, _, _, _} -> ok end,
-%% % 2 calls would be from cleanup/1 function. If cleanup process fired
-%% % too it would have been 4 calls total.
-%% ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
-%% end).
-%%
-%%
-%% should_clean_additional_worker_too() ->
-%% ?_test(begin
-%% meck:reset(rexi),
-%% erase(?WORKER_CLEANER),
-%% Workers = [
-%% #shard{node = 'n1', ref = make_ref()}
-%% ],
-%% {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
-%% Cleaner = spawn_worker_cleaner(Coord, Workers),
-%% add_worker_to_cleaner(Coord, #shard{node = 'n2', ref = make_ref()}),
-%% Ref = erlang:monitor(process, Cleaner),
-%% Coord ! die,
-%% receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
-%% ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
-%% end).
-%%
-%%
-%% setup() ->
-%% ok = meck:expect(rexi, kill_all, fun(_) -> ok end).
-%%
-%%
-%% teardown(_) ->
-%% meck:unload().
-%%
-%% -endif.
diff --git a/src/fabric/src/fabric_util.erl b/src/fabric/src/fabric_util.erl
deleted file mode 100644
index 1c1ee80b7..000000000
--- a/src/fabric/src/fabric_util.erl
+++ /dev/null
@@ -1,347 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_util).
-
--export([submit_jobs/3, submit_jobs/4, cleanup/1, recv/4, get_db/1, get_db/2, error_info/1,
- update_counter/3, remove_ancestors/2, create_monitors/1, kv/2,
- remove_down_workers/2, remove_down_workers/3, doc_id_and_rev/1]).
--export([request_timeout/0, attachments_timeout/0, all_docs_timeout/0, view_timeout/1]).
--export([log_timeout/2, remove_done_workers/2]).
--export([is_users_db/1, is_replicator_db/1]).
--export([open_cluster_db/1, open_cluster_db/2]).
--export([is_partitioned/1]).
--export([validate_all_docs_args/2, validate_args/3]).
--export([upgrade_mrargs/1]).
--export([worker_ranges/1]).
-
--compile({inline, [{doc_id_and_rev,1}]}).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-remove_down_workers(Workers, BadNode) ->
- remove_down_workers(Workers, BadNode, []).
-
-remove_down_workers(Workers, BadNode, RingOpts) ->
- Filter = fun(#shard{node = Node}, _) -> Node =/= BadNode end,
- NewWorkers = fabric_dict:filter(Filter, Workers),
- case fabric_ring:is_progress_possible(NewWorkers, RingOpts) of
- true ->
- {ok, NewWorkers};
- false ->
- error
- end.
-
-submit_jobs(Shards, EndPoint, ExtraArgs) ->
- submit_jobs(Shards, fabric_rpc, EndPoint, ExtraArgs).
-
-submit_jobs(Shards, Module, EndPoint, ExtraArgs) ->
- lists:map(fun(#shard{node=Node, name=ShardName} = Shard) ->
- Ref = rexi:cast(Node, {Module, EndPoint, [ShardName | ExtraArgs]}),
- Shard#shard{ref = Ref}
- end, Shards).
-
-cleanup(Workers) ->
- rexi:kill_all([{Node, Ref} || #shard{node = Node, ref = Ref} <- Workers]).
-
-recv(Workers, Keypos, Fun, Acc0) ->
- rexi_utils:recv(Workers, Keypos, Fun, Acc0, request_timeout(), infinity).
-
-request_timeout() ->
- timeout("request", "60000").
-
-all_docs_timeout() ->
- timeout("all_docs", "10000").
-
-attachments_timeout() ->
- timeout("attachments", "600000").
-
-view_timeout(Args) ->
- PartitionQuery = couch_mrview_util:get_extra(Args, partition, false),
- case PartitionQuery of
- false -> timeout("view", "infinity");
- _ -> timeout("partition_view", "infinity")
- end.
-
-timeout(Type, Default) ->
- case config:get("fabric", Type ++ "_timeout", Default) of
- "infinity" -> infinity;
- N -> list_to_integer(N)
- end.
-
-log_timeout(Workers, EndPoint) ->
- CounterKey = [fabric, worker, timeouts],
- couch_stats:increment_counter(CounterKey),
- lists:map(fun(#shard{node=Dest, name=Name}) ->
- Fmt = "fabric_worker_timeout ~s,~p,~p",
- couch_log:error(Fmt, [EndPoint, Dest, Name])
- end, Workers).
-
-remove_done_workers(Workers, WaitingIndicator) ->
- [W || {W, WI} <- fabric_dict:to_list(Workers), WI == WaitingIndicator].
-
-get_db(DbName) ->
- get_db(DbName, []).
-
-get_db(DbName, Options) ->
- {Local, SameZone, DifferentZone} = mem3:group_by_proximity(mem3:shards(DbName)),
- % Prefer shards on the same node over other nodes, prefer shards in the same zone over
- % over zones and sort each remote list by name so that we don't repeatedly try the same node.
- Shards = Local ++ lists:keysort(#shard.name, SameZone) ++ lists:keysort(#shard.name, DifferentZone),
- % suppress shards from down nodes
- Nodes = [node()|erlang:nodes()],
- Live = [S || #shard{node = N} = S <- Shards, lists:member(N, Nodes)],
- Factor = list_to_integer(config:get("fabric", "shard_timeout_factor", "2")),
- get_shard(Live, [{create_if_missing, true} | Options], 100, Factor).
-
-get_shard([], _Opts, _Timeout, _Factor) ->
- erlang:error({internal_server_error, "No DB shards could be opened."});
-get_shard([#shard{node = Node, name = Name} | Rest], Opts, Timeout, Factor) ->
- Mon = rexi_monitor:start([rexi_utils:server_pid(Node)]),
- MFA = {fabric_rpc, open_shard, [Name, [{timeout, Timeout} | Opts]]},
- Ref = rexi:cast(Node, self(), MFA, [sync]),
- try
- receive {Ref, {ok, Db}} ->
- {ok, Db};
- {Ref, {'rexi_EXIT', {{unauthorized, _} = Error, _}}} ->
- throw(Error);
- {Ref, {'rexi_EXIT', {{forbidden, _} = Error, _}}} ->
- throw(Error);
- {Ref, Reason} ->
- couch_log:debug("Failed to open shard ~p because: ~p", [Name, Reason]),
- get_shard(Rest, Opts, Timeout, Factor)
- after Timeout ->
- couch_log:debug("Failed to open shard ~p after: ~p", [Name, Timeout]),
- get_shard(Rest, Opts, Factor * Timeout, Factor)
- end
- after
- rexi_monitor:stop(Mon)
- end.
-
-error_info({{timeout, _} = Error, _Stack}) ->
- Error;
-error_info({{Error, Reason}, Stack}) ->
- {Error, Reason, Stack};
-error_info({Error, Stack}) ->
- {Error, nil, Stack}.
-
-update_counter(Item, Incr, D) ->
- UpdateFun = fun ({Old, Count}) -> {Old, Count + Incr} end,
- orddict:update(make_key(Item), UpdateFun, {Item, Incr}, D).
-
-make_key({ok, L}) when is_list(L) ->
- make_key(L);
-make_key([]) ->
- [];
-make_key([{ok, #doc{revs= {Pos,[RevId | _]}}} | Rest]) ->
- [{ok, {Pos, RevId}} | make_key(Rest)];
-make_key([{{not_found, missing}, Rev} | Rest]) ->
- [{not_found, Rev} | make_key(Rest)];
-make_key({ok, #doc{id=Id,revs=Revs}}) ->
- {Id, Revs};
-make_key(Else) ->
- Else.
-
-% this presumes the incoming list is sorted, i.e. shorter revlists come first
-remove_ancestors([], Acc) ->
- lists:reverse(Acc);
-remove_ancestors([{_, {{not_found, _}, Count}} = Head | Tail], Acc) ->
- % any document is a descendant
- case lists:filter(fun({_,{{ok, #doc{}}, _}}) -> true; (_) -> false end, Tail) of
- [{_,{{ok, #doc{}} = Descendant, _}} | _] ->
- remove_ancestors(update_counter(Descendant, Count, Tail), Acc);
- [] ->
- remove_ancestors(Tail, [Head | Acc])
- end;
-remove_ancestors([{_,{{ok, #doc{revs = {Pos, Revs}}}, Count}} = Head | Tail], Acc) ->
- Descendants = lists:dropwhile(fun
- ({_,{{ok, #doc{revs = {Pos2, Revs2}}}, _}}) ->
- case lists:nthtail(erlang:min(Pos2 - Pos, length(Revs2)), Revs2) of
- [] ->
- % impossible to tell if Revs2 is a descendant - assume no
- true;
- History ->
- % if Revs2 is a descendant, History is a prefix of Revs
- not lists:prefix(History, Revs)
- end
- end, Tail),
- case Descendants of [] ->
- remove_ancestors(Tail, [Head | Acc]);
- [{Descendant, _} | _] ->
- remove_ancestors(update_counter(Descendant, Count, Tail), Acc)
- end;
-remove_ancestors([Error | Tail], Acc) ->
- remove_ancestors(Tail, [Error | Acc]).
-
-create_monitors(Shards) ->
- MonRefs = lists:usort([
- rexi_utils:server_pid(N) || #shard{node=N} <- Shards
- ]),
- rexi_monitor:start(MonRefs).
-
-%% %% verify only id and rev are used in key.
-%% update_counter_test() ->
-%% Reply = {ok, #doc{id = <<"id">>, revs = <<"rev">>,
-%% body = <<"body">>, atts = <<"atts">>}},
-%% ?assertEqual([{{<<"id">>,<<"rev">>}, {Reply, 1}}],
-%% update_counter(Reply, 1, [])).
-%%
-%% remove_ancestors_test() ->
-%% Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
-%% Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
-%% Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
-%% Bar2 = {not_found, {1,<<"bar">>}},
-%% ?assertEqual(
-%% [kv(Bar1,1), kv(Foo1,1)],
-%% remove_ancestors([kv(Bar1,1), kv(Foo1,1)], [])
-%% ),
-%% ?assertEqual(
-%% [kv(Bar1,1), kv(Foo2,2)],
-%% remove_ancestors([kv(Bar1,1), kv(Foo1,1), kv(Foo2,1)], [])
-%% ),
-%% ?assertEqual(
-%% [kv(Bar1,2)],
-%% remove_ancestors([kv(Bar2,1), kv(Bar1,1)], [])
-%% ).
-
-is_replicator_db(DbName) ->
- path_ends_with(DbName, <<"_replicator">>).
-
-is_users_db(DbName) ->
- ConfigName = list_to_binary(config:get(
- "chttpd_auth", "authentication_db", "_users")),
- DbName == ConfigName orelse path_ends_with(DbName, <<"_users">>).
-
-path_ends_with(Path, Suffix) ->
- Suffix =:= couch_db:dbname_suffix(Path).
-
-open_cluster_db(#shard{dbname = DbName, opts = Options}) ->
- case couch_util:get_value(props, Options) of
- Props when is_list(Props) ->
- {ok, Db} = couch_db:clustered_db(DbName, [{props, Props}]),
- Db;
- _ ->
- {ok, Db} = couch_db:clustered_db(DbName, []),
- Db
- end.
-
-open_cluster_db(DbName, Opts) ->
- {SecProps} = fabric:get_security(DbName), % as admin
- UserCtx = couch_util:get_value(user_ctx, Opts, #user_ctx{}),
- {ok, Db} = couch_db:clustered_db(DbName, UserCtx, SecProps),
- Db.
-
-%% test function
-kv(Item, Count) ->
- {make_key(Item), {Item,Count}}.
-
-doc_id_and_rev(#doc{id=DocId, revs={RevNum, [RevHash|_]}}) ->
- {DocId, {RevNum, RevHash}}.
-
-
-is_partitioned(DbName0) when is_binary(DbName0) ->
- Shards = mem3:shards(fabric:dbname(DbName0)),
- is_partitioned(open_cluster_db(hd(Shards)));
-
-is_partitioned(Db) ->
- couch_db:is_partitioned(Db).
-
-
-validate_all_docs_args(DbName, Args) when is_binary(DbName) ->
- Shards = mem3:shards(fabric:dbname(DbName)),
- Db = open_cluster_db(hd(Shards)),
- validate_all_docs_args(Db, Args);
-
-validate_all_docs_args(Db, Args) ->
- true = couch_db:is_clustered(Db),
- couch_mrview_util:validate_all_docs_args(Db, Args).
-
-
-validate_args(DbName, DDoc, Args) when is_binary(DbName) ->
- Shards = mem3:shards(fabric:dbname(DbName)),
- Db = open_cluster_db(hd(Shards)),
- validate_args(Db, DDoc, Args);
-
-validate_args(Db, DDoc, Args) ->
- true = couch_db:is_clustered(Db),
- couch_mrview_util:validate_args(Db, DDoc, Args).
-
-
-upgrade_mrargs(#mrargs{} = Args) ->
- Args;
-
-upgrade_mrargs({mrargs,
- ViewType,
- Reduce,
- PreflightFun,
- StartKey,
- StartKeyDocId,
- EndKey,
- EndKeyDocId,
- Keys,
- Direction,
- Limit,
- Skip,
- GroupLevel,
- Group,
- Stale,
- MultiGet,
- InclusiveEnd,
- IncludeDocs,
- DocOptions,
- UpdateSeq,
- Conflicts,
- Callback,
- Sorted,
- Extra}) ->
- {Stable, Update} = case Stale of
- ok -> {true, false};
- update_after -> {true, lazy};
- _ -> {false, true}
- end,
- #mrargs{
- view_type = ViewType,
- reduce = Reduce,
- preflight_fun = PreflightFun,
- start_key = StartKey,
- start_key_docid = StartKeyDocId,
- end_key = EndKey,
- end_key_docid = EndKeyDocId,
- keys = Keys,
- direction = Direction,
- limit = Limit,
- skip = Skip,
- group_level = GroupLevel,
- group = Group,
- stable = Stable,
- update = Update,
- multi_get = MultiGet,
- inclusive_end = InclusiveEnd,
- include_docs = IncludeDocs,
- doc_options = DocOptions,
- update_seq = UpdateSeq,
- conflicts = Conflicts,
- callback = Callback,
- sorted = Sorted,
- extra = Extra
- }.
-
-
-worker_ranges(Workers) ->
- Ranges = fabric_dict:fold(fun(#shard{range=[X, Y]}, _, Acc) ->
- [{X, Y} | Acc]
- end, [], Workers),
- lists:usort(Ranges).
diff --git a/src/fabric/src/fabric_view.erl b/src/fabric/src/fabric_view.erl
deleted file mode 100644
index 6c33e1e32..000000000
--- a/src/fabric/src/fabric_view.erl
+++ /dev/null
@@ -1,478 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view).
-
--export([remove_overlapping_shards/2, maybe_send_row/1,
- transform_row/1, keydict/1, extract_view/4, get_shards/2,
- check_down_shards/2, handle_worker_exit/3,
- get_shard_replacements/2, maybe_update_others/5]).
--export([fix_skip_and_limit/1]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-%% @doc Check if a downed node affects any of our workers
--spec check_down_shards(#collector{}, node()) ->
- {ok, #collector{}} | {error, any()}.
-check_down_shards(Collector, BadNode) ->
- #collector{callback=Callback, counters=Counters, user_acc=Acc} = Collector,
- Filter = fun(#shard{node = Node}, _) -> Node == BadNode end,
- BadCounters = fabric_dict:filter(Filter, Counters),
- case fabric_dict:size(BadCounters) > 0 of
- true ->
- Reason = {nodedown, <<"progress not possible">>},
- Callback({error, Reason}, Acc),
- {error, Reason};
- false ->
- {ok, Collector}
- end.
-
-%% @doc Handle a worker that dies during a stream
--spec handle_worker_exit(#collector{}, #shard{}, any()) -> {error, any()}.
-handle_worker_exit(Collector, _Worker, Reason) ->
- #collector{callback=Callback, user_acc=Acc} = Collector,
- {ok, Resp} = Callback({error, fabric_util:error_info(Reason)}, Acc),
- {error, Resp}.
-
-
--spec remove_overlapping_shards(#shard{}, [{#shard{}, any()}]) ->
- [{#shard{}, any()}].
-remove_overlapping_shards(#shard{} = Shard, Counters) ->
- remove_overlapping_shards(Shard, Counters, fun stop_worker/1).
-
-
--spec remove_overlapping_shards(#shard{}, [{#shard{}, any()}], fun()) ->
- [{#shard{}, any()}].
-remove_overlapping_shards(#shard{} = Shard, Counters, RemoveCb) ->
- Counters1 = filter_exact_copies(Shard, Counters, RemoveCb),
- filter_possible_overlaps(Shard, Counters1, RemoveCb).
-
-
-filter_possible_overlaps(Shard, Counters, RemoveCb) ->
- Ranges0 = fabric_util:worker_ranges(Counters),
- #shard{range = [BShard, EShard]} = Shard,
- Ranges = Ranges0 ++ [{BShard, EShard}],
- {Bs, Es} = lists:unzip(Ranges),
- {MinB, MaxE} = {lists:min(Bs), lists:max(Es)},
- % Use a custom sort function which prioritizes the given shard
- % range when the start endpoints match.
- SortFun = fun
- ({B, E}, {B, _}) when {B, E} =:= {BShard, EShard} ->
- % If start matches with the shard's start, shard always wins
- true;
- ({B, _}, {B, E}) when {B, E} =:= {BShard, EShard} ->
- % If start matches with the shard's start, shard always wins
- false;
- ({B, E1}, {B, E2}) ->
- % If start matches, pick the longest range first
- E2 >= E1;
- ({B1, _}, {B2, _}) ->
- % Then, by default, sort by start point
- B1 =< B2
- end,
- Ring = mem3_util:get_ring(Ranges, SortFun, MinB, MaxE),
- fabric_dict:filter(fun
- (S, _) when S =:= Shard ->
- % Keep the original shard
- true;
- (#shard{range = [B, E]} = S, _) ->
- case lists:member({B, E}, Ring) of
- true ->
- true; % Keep it
- false ->
- % Duplicate range, delete after calling callback function
- case is_function(RemoveCb) of
- true -> RemoveCb(S);
- false -> ok
- end,
- false
- end
- end, Counters).
-
-
-filter_exact_copies(#shard{range = Range0} = Shard0, Shards, Cb) ->
- fabric_dict:filter(fun
- (Shard, _) when Shard =:= Shard0 ->
- true; % Don't remove ourselves
- (#shard{range = Range} = Shard, _) when Range =:= Range0 ->
- case is_function(Cb) of
- true -> Cb(Shard);
- false -> ok
- end,
- false;
- (_, _) ->
- true
- end, Shards).
-
-
-stop_worker(#shard{ref = Ref, node = Node}) ->
- rexi:kill(Node, Ref).
-
-
-maybe_send_row(#collector{limit=0} = State) ->
- #collector{counters=Counters, user_acc=AccIn, callback=Callback} = State,
- case fabric_dict:any(0, Counters) of
- true ->
- % we still need to send the total/offset header
- {ok, State};
- false ->
- erase(meta_sent),
- {_, Acc} = Callback(complete, AccIn),
- {stop, State#collector{user_acc=Acc}}
- end;
-maybe_send_row(State) ->
- #collector{
- callback = Callback,
- counters = Counters,
- skip = Skip,
- limit = Limit,
- user_acc = AccIn
- } = State,
- case fabric_dict:any(0, Counters) of
- true ->
- {ok, State};
- false ->
- try get_next_row(State) of
- {_, NewState} when Skip > 0 ->
- maybe_send_row(NewState#collector{skip=Skip-1});
- {Row0, NewState} ->
- Row1 = possibly_embed_doc(NewState, Row0),
- Row2 = detach_partition(Row1),
- Row3 = transform_row(Row2),
- case Callback(Row3, AccIn) of
- {stop, Acc} ->
- {stop, NewState#collector{user_acc=Acc, limit=Limit-1}};
- {ok, Acc} ->
- maybe_send_row(NewState#collector{user_acc=Acc, limit=Limit-1})
- end
- catch complete ->
- erase(meta_sent),
- {_, Acc} = Callback(complete, AccIn),
- {stop, State#collector{user_acc=Acc}}
- end
- end.
-
-%% if include_docs=true is used when keys and
-%% the values contain "_id" then use the "_id"s
-%% to retrieve documents and embed in result
-possibly_embed_doc(_State,
- #view_row{id=reduced}=Row) ->
- Row;
-possibly_embed_doc(_State,
- #view_row{value=undefined}=Row) ->
- Row;
-possibly_embed_doc(#collector{db_name=DbName, query_args=Args},
- #view_row{key=_Key, id=_Id, value=Value, doc=_Doc}=Row) ->
- #mrargs{include_docs=IncludeDocs} = Args,
- case IncludeDocs andalso is_tuple(Value) of
- true ->
- {Props} = Value,
- Rev0 = couch_util:get_value(<<"_rev">>, Props),
- case couch_util:get_value(<<"_id">>,Props) of
- null -> Row#view_row{doc=null};
- undefined -> Row;
- IncId ->
- % use separate process to call fabric:open_doc
- % to not interfere with current call
- {Pid, Ref} = spawn_monitor(fun() ->
- exit(
- case Rev0 of
- undefined ->
- case fabric:open_doc(DbName, IncId, []) of
- {ok, NewDoc} ->
- Row#view_row{doc=couch_doc:to_json_obj(NewDoc,[])};
- {not_found, _} ->
- Row#view_row{doc=null};
- Else ->
- Row#view_row{doc={error, Else}}
- end;
- Rev0 ->
- Rev = couch_doc:parse_rev(Rev0),
- case fabric:open_revs(DbName, IncId, [Rev], []) of
- {ok, [{ok, NewDoc}]} ->
- Row#view_row{doc=couch_doc:to_json_obj(NewDoc,[])};
- {ok, [{{not_found, _}, Rev}]} ->
- Row#view_row{doc=null};
- Else ->
- Row#view_row{doc={error, Else}}
- end
- end) end),
- receive {'DOWN',Ref,process,Pid, Resp} ->
- Resp
- end
- end;
- _ -> Row
- end.
-
-detach_partition(#view_row{key={p, _Partition, Key}} = Row) ->
- Row#view_row{key = Key};
-detach_partition(#view_row{} = Row) ->
- Row.
-
-keydict(undefined) ->
- undefined;
-keydict(Keys) ->
- {Dict,_} = lists:foldl(fun(K, {D,I}) -> {dict:store(K,I,D), I+1} end,
- {dict:new(),0}, Keys),
- Dict.
-
-%% internal %%
-
-get_next_row(#collector{rows = []}) ->
- throw(complete);
-get_next_row(#collector{reducer = RedSrc} = St) when RedSrc =/= undefined ->
- #collector{
- query_args = #mrargs{direction = Dir},
- keys = Keys,
- rows = RowDict,
- lang = Lang,
- counters = Counters0,
- collation = Collation
- } = St,
- {Key, RestKeys} = find_next_key(Keys, Dir, Collation, RowDict),
- case dict:find(Key, RowDict) of
- {ok, Records} ->
- NewRowDict = dict:erase(Key, RowDict),
- Counters = lists:foldl(fun(#view_row{worker={Worker,From}}, CntrsAcc) ->
- case From of
- {Pid, _} when is_pid(Pid) ->
- gen_server:reply(From, ok);
- Pid when is_pid(Pid) ->
- rexi:stream_ack(From)
- end,
- fabric_dict:update_counter(Worker, -1, CntrsAcc)
- end, Counters0, Records),
- Wrapped = [[V] || #view_row{value=V} <- Records],
- {ok, [Reduced]} = couch_query_servers:rereduce(Lang, [RedSrc], Wrapped),
- {ok, Finalized} = couch_query_servers:finalize(RedSrc, Reduced),
- NewSt = St#collector{keys=RestKeys, rows=NewRowDict, counters=Counters},
- {#view_row{key=Key, id=reduced, value=Finalized}, NewSt};
- error ->
- get_next_row(St#collector{keys=RestKeys})
- end;
-get_next_row(State) ->
- #collector{rows = [Row|Rest], counters = Counters0} = State,
- {Worker, From} = Row#view_row.worker,
- rexi:stream_ack(From),
- Counters1 = fabric_dict:update_counter(Worker, -1, Counters0),
- {Row, State#collector{rows = Rest, counters=Counters1}}.
-
-%% TODO: rectify nil <-> undefined discrepancies
-find_next_key(nil, Dir, Collation, RowDict) ->
- find_next_key(undefined, Dir, Collation, RowDict);
-find_next_key(undefined, Dir, Collation, RowDict) ->
- CmpFun = fun(A, B) -> compare(Dir, Collation, A, B) end,
- case lists:sort(CmpFun, dict:fetch_keys(RowDict)) of
- [] ->
- throw(complete);
- [Key|_] ->
- {Key, nil}
- end;
-find_next_key([], _, _, _) ->
- throw(complete);
-find_next_key([Key|Rest], _, _, _) ->
- {Key, Rest}.
-
-transform_row(#view_row{value={[{reduce_overflow_error, Msg}]}}) ->
- {row, [{key,null}, {id,error}, {value,reduce_overflow_error}, {reason,Msg}]};
-transform_row(#view_row{key=Key, id=reduced, value=Value}) ->
- {row, [{key,Key}, {value,Value}]};
-transform_row(#view_row{key=Key, id=undefined}) ->
- {row, [{key,Key}, {id,error}, {value,not_found}]};
-transform_row(#view_row{key=Key, id=Id, value=Value, doc=undefined}) ->
- {row, [{id,Id}, {key,Key}, {value,Value}]};
-transform_row(#view_row{key=Key, id=_Id, value=_Value, doc={error,Reason}}) ->
- {row, [{id,error}, {key,Key}, {value,Reason}]};
-transform_row(#view_row{key=Key, id=Id, value=Value, doc=Doc}) ->
- {row, [{id,Id}, {key,Key}, {value,Value}, {doc,Doc}]}.
-
-compare(_, _, A, A) -> true;
-compare(fwd, <<"raw">>, A, B) -> A < B;
-compare(rev, <<"raw">>, A, B) -> B < A;
-compare(fwd, _, A, B) -> couch_ejson_compare:less_json(A, B);
-compare(rev, _, A, B) -> couch_ejson_compare:less_json(B, A).
-
-extract_view(Pid, ViewName, [], _ViewType) ->
- couch_log:error("missing_named_view ~p", [ViewName]),
- exit(Pid, kill),
- exit(missing_named_view);
-extract_view(Pid, ViewName, [View|Rest], ViewType) ->
- case lists:member(ViewName, view_names(View, ViewType)) of
- true ->
- if ViewType == reduce ->
- {index_of(ViewName, view_names(View, reduce)), View};
- true ->
- View
- end;
- false ->
- extract_view(Pid, ViewName, Rest, ViewType)
- end.
-
-view_names(View, Type) when Type == red_map; Type == reduce ->
- [Name || {Name, _} <- View#mrview.reduce_funs];
-view_names(View, map) ->
- View#mrview.map_names.
-
-index_of(X, List) ->
- index_of(X, List, 1).
-
-index_of(_X, [], _I) ->
- not_found;
-index_of(X, [X|_Rest], I) ->
- I;
-index_of(X, [_|Rest], I) ->
- index_of(X, Rest, I+1).
-
-get_shards(Db, #mrargs{} = Args) ->
- DbPartitioned = fabric_util:is_partitioned(Db),
- Partition = couch_mrview_util:get_extra(Args, partition),
- if DbPartitioned orelse Partition == undefined -> ok; true ->
- throw({bad_request, <<"partition specified on non-partitioned db">>})
- end,
- DbName = fabric:dbname(Db),
- % Decide which version of mem3:shards/1,2 or
- % mem3:ushards/1,2 to use for the current
- % request.
- case {Args#mrargs.stable, Partition} of
- {true, undefined} ->
- {mem3:ushards(DbName), []};
- {true, Partition} ->
- Shards = mem3:ushards(DbName, couch_partition:shard_key(Partition)),
- {Shards, [{any, Shards}]};
- {false, undefined} ->
- {mem3:shards(DbName), []};
- {false, Partition} ->
- Shards = mem3:shards(DbName, couch_partition:shard_key(Partition)),
- {Shards, [{any, Shards}]}
- end.
-
-maybe_update_others(DbName, DDoc, ShardsInvolved, ViewName,
- #mrargs{update=lazy} = Args) ->
- ShardsNeedUpdated = mem3:shards(DbName) -- ShardsInvolved,
- lists:foreach(fun(#shard{node=Node, name=ShardName}) ->
- rpc:cast(Node, fabric_rpc, update_mrview, [ShardName, DDoc, ViewName, Args])
- end, ShardsNeedUpdated);
-maybe_update_others(_DbName, _DDoc, _ShardsInvolved, _ViewName, _Args) ->
- ok.
-
-get_shard_replacements(DbName, UsedShards0) ->
- % We only want to generate a replacements list from shards
- % that aren't already used.
- AllLiveShards = mem3:live_shards(DbName, [node() | nodes()]),
- UsedShards = [S#shard{ref=undefined} || S <- UsedShards0],
- get_shard_replacements_int(AllLiveShards -- UsedShards, UsedShards).
-
-get_shard_replacements_int(UnusedShards, UsedShards) ->
- % If we have more than one copy of a range then we don't
- % want to try and add a replacement to any copy.
- RangeCounts = lists:foldl(fun(#shard{range=R}, Acc) ->
- dict:update_counter(R, 1, Acc)
- end, dict:new(), UsedShards),
-
- % For each seq shard range with a count of 1, find any
- % possible replacements from the unused shards. The
- % replacement list is keyed by range.
- lists:foldl(fun(#shard{range = [B, E] = Range}, Acc) ->
- case dict:find(Range, RangeCounts) of
- {ok, 1} ->
- Repls = mem3_util:non_overlapping_shards(UnusedShards, B, E),
- % Only keep non-empty lists of replacements
- if Repls == [] -> Acc; true ->
- [{Range, Repls} | Acc]
- end;
- _ ->
- Acc
- end
- end, [], UsedShards).
-
--spec fix_skip_and_limit(#mrargs{}) -> {CoordArgs::#mrargs{}, WorkerArgs::#mrargs{}}.
-fix_skip_and_limit(#mrargs{} = Args) ->
- {CoordArgs, WorkerArgs} = case couch_mrview_util:get_extra(Args, partition) of
- undefined ->
- #mrargs{skip=Skip, limit=Limit}=Args,
- {Args, Args#mrargs{skip=0, limit=Skip+Limit}};
- _Partition ->
- {Args#mrargs{skip=0}, Args}
- end,
- %% the coordinator needs to finalize each row, so make sure the shards don't
- {CoordArgs, remove_finalizer(WorkerArgs)}.
-
-remove_finalizer(Args) ->
- couch_mrview_util:set_extra(Args, finalizer, null).
-
-
-%% remove_overlapping_shards_test() ->
-%% Cb = undefined,
-%%
-%% Shards = mk_cnts([[0, 10], [11, 20], [21, ?RING_END]], 3),
-%%
-%% % Simple (exact) overlap
-%% Shard1 = mk_shard("node-3", [11, 20]),
-%% Shards1 = fabric_dict:store(Shard1, nil, Shards),
-%% R1 = remove_overlapping_shards(Shard1, Shards1, Cb),
-%% ?assertEqual([{0, 10}, {11, 20}, {21, ?RING_END}],
-%% fabric_util:worker_ranges(R1)),
-%% ?assert(fabric_dict:is_key(Shard1, R1)),
-%%
-%% % Split overlap (shard overlap multiple workers)
-%% Shard2 = mk_shard("node-3", [0, 20]),
-%% Shards2 = fabric_dict:store(Shard2, nil, Shards),
-%% R2 = remove_overlapping_shards(Shard2, Shards2, Cb),
-%% ?assertEqual([{0, 20}, {21, ?RING_END}],
-%% fabric_util:worker_ranges(R2)),
-%% ?assert(fabric_dict:is_key(Shard2, R2)).
-%%
-%%
-%% get_shard_replacements_test() ->
-%% Unused = [mk_shard(N, [B, E]) || {N, B, E} <- [
-%% {"n1", 11, 20}, {"n1", 21, ?RING_END},
-%% {"n2", 0, 4}, {"n2", 5, 10}, {"n2", 11, 20},
-%% {"n3", 0, 21, ?RING_END}
-%% ]],
-%% Used = [mk_shard(N, [B, E]) || {N, B, E} <- [
-%% {"n2", 21, ?RING_END},
-%% {"n3", 0, 10}, {"n3", 11, 20}
-%% ]],
-%% Res = lists:sort(get_shard_replacements_int(Unused, Used)),
-%% % Notice that [0, 10] range can be replaced by spawning the [0, 4] and [5,
-%% % 10] workers on n1
-%% Expect = [
-%% {[0, 10], [mk_shard("n2", [0, 4]), mk_shard("n2", [5, 10])]},
-%% {[11, 20], [mk_shard("n1", [11, 20]), mk_shard("n2", [11, 20])]},
-%% {[21, ?RING_END], [mk_shard("n1", [21, ?RING_END])]}
-%% ],
-%% ?assertEqual(Expect, Res).
-%%
-%%
-%% mk_cnts(Ranges, NoNodes) ->
-%% orddict:from_list([{Shard,nil}
-%% || Shard <-
-%% lists:flatten(lists:map(
-%% fun(Range) ->
-%% mk_shards(NoNodes,Range,[])
-%% end, Ranges))]
-%% ).
-%%
-%% mk_shards(0,_Range,Shards) ->
-%% Shards;
-%% mk_shards(NoNodes,Range,Shards) ->
-%% Name ="node-" ++ integer_to_list(NoNodes),
-%% mk_shards(NoNodes-1,Range, [mk_shard(Name, Range) | Shards]).
-%%
-%%
-%% mk_shard(Name, Range) ->
-%% Node = list_to_atom(Name),
-%% BName = list_to_binary(Name),
-%% #shard{name = BName, node = Node, range = Range}.
diff --git a/src/fabric/src/fabric_view_all_docs.erl b/src/fabric/src/fabric_view_all_docs.erl
deleted file mode 100644
index e4d3d4a40..000000000
--- a/src/fabric/src/fabric_view_all_docs.erl
+++ /dev/null
@@ -1,332 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view_all_docs).
-
--export([go/5]).
--export([open_doc/4]). % exported for spawn
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-go(Db, Options, #mrargs{keys=undefined} = QueryArgs, Callback, Acc) ->
- {CoordArgs, WorkerArgs} = fabric_view:fix_skip_and_limit(QueryArgs),
- DbName = fabric:dbname(Db),
- {Shards, RingOpts} = shards(Db, QueryArgs),
- Workers0 = fabric_util:submit_jobs(
- Shards, fabric_rpc, all_docs, [Options, WorkerArgs]),
- RexiMon = fabric_util:create_monitors(Workers0),
- try
- case fabric_streams:start(Workers0, #shard.ref, RingOpts) of
- {ok, Workers} ->
- try
- go(DbName, Options, Workers, CoordArgs, Callback, Acc)
- after
- fabric_streams:cleanup(Workers)
- end;
- {timeout, NewState} ->
- DefunctWorkers = fabric_util:remove_done_workers(
- NewState#stream_acc.workers, waiting
- ),
- fabric_util:log_timeout(
- DefunctWorkers,
- "all_docs"
- ),
- Callback({error, timeout}, Acc);
- {error, Error} ->
- Callback({error, Error}, Acc)
- end
- after
- rexi_monitor:stop(RexiMon)
- end;
-
-
-go(DbName, Options, QueryArgs, Callback, Acc0) ->
- #mrargs{
- direction = Dir,
- include_docs = IncludeDocs,
- doc_options = DocOptions0,
- limit = Limit,
- conflicts = Conflicts,
- skip = Skip,
- keys = Keys0,
- extra = Extra,
- update_seq = UpdateSeq
- } = QueryArgs,
- DocOptions1 = case Conflicts of
- true -> [conflicts|DocOptions0];
- _ -> DocOptions0
- end,
- SpawnFun = fun(Key) ->
- spawn_monitor(?MODULE, open_doc, [DbName, Options ++ DocOptions1, Key, IncludeDocs])
- end,
- MaxJobs = all_docs_concurrency(),
- Keys1 = case Dir of
- fwd -> Keys0;
- _ -> lists:reverse(Keys0)
- end,
- Keys2 = case Skip < length(Keys1) of
- true -> lists:nthtail(Skip, Keys1);
- false -> []
- end,
- Keys3 = case Limit < length(Keys2) of
- true -> lists:sublist(Keys2, Limit);
- false -> Keys2
- end,
- %% namespace can be _set_ to `undefined`, so we want simulate enum here
- Namespace = case couch_util:get_value(namespace, Extra) of
- <<"_all_docs">> -> <<"_all_docs">>;
- <<"_design">> -> <<"_design">>;
- <<"_local">> -> <<"_local">>;
- _ -> <<"_all_docs">>
- end,
- Timeout = fabric_util:all_docs_timeout(),
- {_, Ref} = spawn_monitor(fun() ->
- exit(fabric:get_doc_count(DbName, Namespace))
- end),
- receive
- {'DOWN', Ref, _, _, {ok, TotalRows}} ->
- Meta = case UpdateSeq of
- false ->
- [{total, TotalRows}, {offset, null}];
- true ->
- [{total, TotalRows}, {offset, null}, {update_seq, null}]
- end,
- {ok, Acc1} = Callback({meta, Meta}, Acc0),
- Resp = doc_receive_loop(
- Keys3, queue:new(), SpawnFun, MaxJobs, Callback, Acc1
- ),
- case Resp of
- {ok, Acc2} ->
- Callback(complete, Acc2);
- timeout ->
- Callback({error, timeout}, Acc0)
- end;
- {'DOWN', Ref, _, _, Error} ->
- Callback({error, Error}, Acc0)
- after Timeout ->
- Callback({error, timeout}, Acc0)
- end.
-
-go(DbName, _Options, Workers, QueryArgs, Callback, Acc0) ->
- #mrargs{limit = Limit, skip = Skip, update_seq = UpdateSeq} = QueryArgs,
- State = #collector{
- db_name = DbName,
- query_args = QueryArgs,
- callback = Callback,
- counters = fabric_dict:init(Workers, 0),
- skip = Skip,
- limit = Limit,
- user_acc = Acc0,
- update_seq = case UpdateSeq of true -> []; false -> nil end
- },
- case rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, fabric_util:view_timeout(QueryArgs), 5000) of
- {ok, NewState} ->
- {ok, NewState#collector.user_acc};
- {timeout, NewState} ->
- Callback({error, timeout}, NewState#collector.user_acc);
- {error, Resp} ->
- {ok, Resp}
- end.
-
-shards(Db, Args) ->
- DbPartitioned = fabric_util:is_partitioned(Db),
- Partition = couch_mrview_util:get_extra(Args, partition),
- NewArgs = case {DbPartitioned, Partition} of
- {true, undefined} ->
- % If a user specifies the same partition on both
- % the start and end keys we can optimize the
- % query by limiting to the partition shard.
- Start = couch_partition:extract(Args#mrargs.start_key),
- End = couch_partition:extract(Args#mrargs.end_key),
- case {Start, End} of
- {{Partition, SK}, {Partition, EK}} ->
- A1 = Args#mrargs{
- start_key = SK,
- end_key = EK
- },
- couch_mrview_util:set_extra(A1, partition, Partition);
- _ ->
- Args
- end;
- _ ->
- Args
- end,
- fabric_view:get_shards(Db, NewArgs).
-
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
- fabric_view:check_down_shards(State, NodeRef);
-
-handle_message({rexi_EXIT, Reason}, Worker, State) ->
- fabric_view:handle_worker_exit(State, Worker, Reason);
-
-handle_message({meta, Meta0}, {Worker, From}, State) ->
- Tot = couch_util:get_value(total, Meta0, 0),
- Off = couch_util:get_value(offset, Meta0, 0),
- Seq = couch_util:get_value(update_seq, Meta0, 0),
- #collector{
- callback = Callback,
- counters = Counters0,
- total_rows = Total0,
- offset = Offset0,
- user_acc = AccIn,
- update_seq = UpdateSeq0
- } = State,
- % Assert that we don't have other messages from this
- % worker when the total_and_offset message arrives.
- 0 = fabric_dict:lookup_element(Worker, Counters0),
- rexi:stream_ack(From),
- Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- Total = if Tot == null -> null; true -> Total0 + Tot end,
- Offset = if Off == null -> null; true -> Offset0 + Off end,
- UpdateSeq = case {UpdateSeq0, Seq} of
- {nil, _} -> nil;
- {_, null} -> null;
- _ -> [{Worker, Seq} | UpdateSeq0]
- end,
- case fabric_dict:any(0, Counters1) of
- true ->
- {ok, State#collector{
- counters = Counters1,
- total_rows = Total,
- update_seq = UpdateSeq,
- offset = Offset
- }};
- false ->
- FinalOffset = case Offset of
- null -> null;
- _ -> erlang:min(Total, Offset+State#collector.skip)
- end,
- Meta = [{total, Total}, {offset, FinalOffset}] ++
- case UpdateSeq of
- nil ->
- [];
- null ->
- [{update_seq, null}];
- _ ->
- [{update_seq, fabric_view_changes:pack_seqs(UpdateSeq)}]
- end,
- {Go, Acc} = Callback({meta, Meta}, AccIn),
- {Go, State#collector{
- counters = fabric_dict:decrement_all(Counters1),
- total_rows = Total,
- offset = FinalOffset,
- user_acc = Acc,
- update_seq = UpdateSeq0
- }}
- end;
-
-handle_message(#view_row{} = Row, {Worker, From}, State) ->
- #collector{query_args = Args, counters = Counters0, rows = Rows0} = State,
- Dir = Args#mrargs.direction,
- Rows = merge_row(Dir, Row#view_row{worker={Worker, From}}, Rows0),
- Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- State1 = State#collector{rows=Rows, counters=Counters1},
- fabric_view:maybe_send_row(State1);
-
-handle_message(complete, Worker, State) ->
- Counters = fabric_dict:update_counter(Worker, 1, State#collector.counters),
- fabric_view:maybe_send_row(State#collector{counters = Counters});
-
-handle_message({execution_stats, _} = Msg, {_,From}, St) ->
- #collector{callback=Callback, user_acc=AccIn} = St,
- {Go, Acc} = Callback(Msg, AccIn),
- rexi:stream_ack(From),
- {Go, St#collector{user_acc=Acc}}.
-
-merge_row(fwd, Row, Rows) ->
- lists:keymerge(#view_row.id, [Row], Rows);
-merge_row(rev, Row, Rows) ->
- lists:rkeymerge(#view_row.id, [Row], Rows).
-
-all_docs_concurrency() ->
- Value = config:get("fabric", "all_docs_concurrency", "10"),
- try
- list_to_integer(Value)
- catch _:_ ->
- 10
- end.
-
-doc_receive_loop(Keys, Pids, SpawnFun, MaxJobs, Callback, AccIn) ->
- case {Keys, queue:len(Pids)} of
- {[], 0} ->
- {ok, AccIn};
- {[K | RKeys], Len} when Len < MaxJobs ->
- Pids1 = queue:in(SpawnFun(K), Pids),
- doc_receive_loop(RKeys, Pids1, SpawnFun, MaxJobs, Callback, AccIn);
- _ ->
- {{value, {Pid, Ref}}, RestPids} = queue:out(Pids),
- Timeout = fabric_util:all_docs_timeout(),
- receive {'DOWN', Ref, process, Pid, Row} ->
- case Row of
- #view_row{} ->
- case Callback(fabric_view:transform_row(Row), AccIn) of
- {ok, Acc} ->
- doc_receive_loop(
- Keys, RestPids, SpawnFun, MaxJobs, Callback, Acc
- );
- {stop, Acc} ->
- cancel_read_pids(RestPids),
- {ok, Acc}
- end;
- Error ->
- cancel_read_pids(RestPids),
- Callback({error, Error}, AccIn)
- end
- after Timeout ->
- timeout
- end
- end.
-
-
-open_doc(DbName, Options, Id, IncludeDocs) ->
- try open_doc_int(DbName, Options, Id, IncludeDocs) of
- #view_row{} = Row ->
- exit(Row)
- catch Type:Reason ->
- Stack = erlang:get_stacktrace(),
- couch_log:error("_all_docs open error: ~s ~s :: ~w ~w", [
- DbName, Id, {Type, Reason}, Stack]),
- exit({Id, Reason})
- end.
-
-open_doc_int(DbName, Options, Id, IncludeDocs) ->
- Row = case fabric:open_doc(DbName, Id, [deleted | Options]) of
- {not_found, missing} ->
- Doc = undefined,
- #view_row{key=Id};
- {ok, #doc{deleted=true, revs=Revs}} ->
- Doc = null,
- {RevPos, [RevId|_]} = Revs,
- Value = {[{rev,couch_doc:rev_to_str({RevPos, RevId})}, {deleted,true}]},
- #view_row{key=Id, id=Id, value=Value};
- {ok, #doc{revs=Revs} = Doc0} ->
- Doc = couch_doc:to_json_obj(Doc0, Options),
- {RevPos, [RevId|_]} = Revs,
- Value = {[{rev,couch_doc:rev_to_str({RevPos, RevId})}]},
- #view_row{key=Id, id=Id, value=Value}
- end,
- if IncludeDocs -> Row#view_row{doc=Doc}; true -> Row end.
-
-cancel_read_pids(Pids) ->
- case queue:out(Pids) of
- {{value, {Pid, Ref}}, RestPids} ->
- exit(Pid, kill),
- erlang:demonitor(Ref, [flush]),
- cancel_read_pids(RestPids);
- {empty, _} ->
- ok
- end.
diff --git a/src/fabric/src/fabric_view_changes.erl b/src/fabric/src/fabric_view_changes.erl
deleted file mode 100644
index 3f684a3cc..000000000
--- a/src/fabric/src/fabric_view_changes.erl
+++ /dev/null
@@ -1,820 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view_changes).
-
--export([go/5, pack_seqs/1, unpack_seqs/2]).
--export([increment_changes_epoch/0]).
-
-%% exported for upgrade purposes.
--export([keep_sending_changes/8]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--import(fabric_db_update_listener, [wait_db_updated/1]).
-
-go(DbName, Feed, Options, Callback, Acc0) when Feed == "continuous" orelse
- Feed == "longpoll" orelse Feed == "eventsource" ->
- Args = make_changes_args(Options),
- Since = get_start_seq(DbName, Args),
- case validate_start_seq(DbName, Since) of
- ok ->
- {ok, Acc} = Callback(start, Acc0),
- {Timeout, _} = couch_changes:get_changes_timeout(Args, Callback),
- Ref = make_ref(),
- Parent = self(),
- UpdateListener = {spawn_link(fabric_db_update_listener, go,
- [Parent, Ref, DbName, Timeout]),
- Ref},
- put(changes_epoch, get_changes_epoch()),
- try
- keep_sending_changes(
- DbName,
- Args,
- Callback,
- Since,
- Acc,
- Timeout,
- UpdateListener,
- os:timestamp()
- )
- after
- fabric_db_update_listener:stop(UpdateListener)
- end;
- Error ->
- Callback(Error, Acc0)
- end;
-
-go(DbName, "normal", Options, Callback, Acc0) ->
- Args = make_changes_args(Options),
- Since = get_start_seq(DbName, Args),
- case validate_start_seq(DbName, Since) of
- ok ->
- {ok, Acc} = Callback(start, Acc0),
- {ok, Collector} = send_changes(
- DbName,
- Args,
- Callback,
- Since,
- Acc,
- 5000
- ),
- #collector{counters=Seqs, user_acc=AccOut, offset=Offset} = Collector,
- Callback({stop, pack_seqs(Seqs), pending_count(Offset)}, AccOut);
- Error ->
- Callback(Error, Acc0)
- end.
-
-keep_sending_changes(DbName, Args, Callback, Seqs, AccIn, Timeout, UpListen, T0) ->
- #changes_args{limit=Limit, feed=Feed, heartbeat=Heartbeat} = Args,
- {ok, Collector} = send_changes(DbName, Args, Callback, Seqs, AccIn, Timeout),
- #collector{
- limit = Limit2,
- counters = NewSeqs,
- offset = Offset,
- user_acc = AccOut0
- } = Collector,
- LastSeq = pack_seqs(NewSeqs),
- MaintenanceMode = config:get("couchdb", "maintenance_mode"),
- NewEpoch = get_changes_epoch() > erlang:get(changes_epoch),
- if Limit > Limit2, Feed == "longpoll";
- MaintenanceMode == "true"; MaintenanceMode == "nolb"; NewEpoch ->
- Callback({stop, LastSeq, pending_count(Offset)}, AccOut0);
- true ->
- {ok, AccOut} = Callback(waiting_for_updates, AccOut0),
- WaitForUpdate = wait_db_updated(UpListen),
- AccumulatedTime = timer:now_diff(os:timestamp(), T0) div 1000,
- Max = case config:get("fabric", "changes_duration") of
- undefined ->
- infinity;
- MaxStr ->
- list_to_integer(MaxStr)
- end,
- case {Heartbeat, AccumulatedTime > Max, WaitForUpdate} of
- {_, _, changes_feed_died} ->
- Callback({stop, LastSeq, pending_count(Offset)}, AccOut);
- {undefined, _, timeout} ->
- Callback({stop, LastSeq, pending_count(Offset)}, AccOut);
- {_, true, timeout} ->
- Callback({stop, LastSeq, pending_count(Offset)}, AccOut);
- _ ->
- {ok, AccTimeout} = Callback(timeout, AccOut),
- ?MODULE:keep_sending_changes(
- DbName,
- Args#changes_args{limit=Limit2},
- Callback,
- LastSeq,
- AccTimeout,
- Timeout,
- UpListen,
- T0
- )
- end
- end.
-
-send_changes(DbName, ChangesArgs, Callback, PackedSeqs, AccIn, Timeout) ->
- LiveNodes = [node() | nodes()],
- AllLiveShards = mem3:live_shards(DbName, LiveNodes),
- Seqs0 = unpack_seqs(PackedSeqs, DbName),
- {WSeqs0, Dead, Reps} = find_replacements(Seqs0, AllLiveShards),
- % Start workers which didn't need replacements
- WSeqs = lists:map(fun({#shard{name = Name, node = N} = S, Seq}) ->
- Ref = rexi:cast(N, {fabric_rpc, changes, [Name, ChangesArgs, Seq]}),
- {S#shard{ref = Ref}, Seq}
- end, WSeqs0),
- % For some dead workers see if they are a result of split shards. In that
- % case make a replacement argument so that local rexi workers can calculate
- % (hopefully) a > 0 update sequence.
- {WSplitSeqs0, Reps1} = find_split_shard_replacements(Dead, Reps),
- WSplitSeqs = lists:map(fun({#shard{name = Name, node = N} = S, Seq}) ->
- Arg = make_replacement_arg(N, Seq),
- Ref = rexi:cast(N, {fabric_rpc, changes, [Name, ChangesArgs, Arg]}),
- {S#shard{ref = Ref}, Seq}
- end, WSplitSeqs0),
- % For ranges that were not split start sequences from 0
- WReps = lists:map(fun(#shard{name = Name, node = N} = S) ->
- Ref = rexi:cast(N, {fabric_rpc, changes, [Name, ChangesArgs, 0]}),
- {S#shard{ref = Ref}, 0}
- end, Reps1),
- Seqs = WSeqs ++ WSplitSeqs ++ WReps,
- {Workers0, _} = lists:unzip(Seqs),
- Repls = fabric_ring:get_shard_replacements(DbName, Workers0),
- StartFun = fun(#shard{name=Name, node=N, range=R0}=Shard) ->
- %% Find the original shard copy in the Seqs array
- case lists:dropwhile(fun({S, _}) -> S#shard.range =/= R0 end, Seqs) of
- [{#shard{}, {replace, _, _, _}} | _] ->
- % Don't attempt to replace a replacement
- SeqArg = 0;
- [{#shard{node = OldNode}, OldSeq} | _] ->
- SeqArg = make_replacement_arg(OldNode, OldSeq);
- _ ->
- % TODO this clause is probably unreachable in the N>2
- % case because we compute replacements only if a shard has one
- % in the original set.
- couch_log:error("Streaming ~s from zero while replacing ~p",
- [Name, PackedSeqs]),
- SeqArg = 0
- end,
- Ref = rexi:cast(N, {fabric_rpc, changes, [Name, ChangesArgs, SeqArg]}),
- Shard#shard{ref = Ref}
- end,
- RexiMon = fabric_util:create_monitors(Workers0),
- try
- case fabric_streams:start(Workers0, #shard.ref, StartFun, Repls) of
- {ok, Workers} ->
- try
- LiveSeqs = lists:map(fun(W) ->
- case lists:keyfind(W, 1, Seqs) of
- {W, Seq} -> {W, Seq};
- _ -> {W, 0}
- end
- end, Workers),
- send_changes(DbName, Workers, LiveSeqs, ChangesArgs,
- Callback, AccIn, Timeout)
- after
- fabric_streams:cleanup(Workers)
- end;
- {timeout, NewState} ->
- DefunctWorkers = fabric_util:remove_done_workers(
- NewState#stream_acc.workers,
- waiting
- ),
- fabric_util:log_timeout(
- DefunctWorkers,
- "changes"
- ),
- throw({error, timeout});
- {error, Reason} ->
- throw({error, Reason});
- Else ->
- throw({error, Else})
- end
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-send_changes(DbName, Workers, Seqs, ChangesArgs, Callback, AccIn, Timeout) ->
- State = #collector{
- db_name = DbName,
- query_args = ChangesArgs,
- callback = Callback,
- counters = orddict:from_list(Seqs),
- user_acc = AccIn,
- limit = ChangesArgs#changes_args.limit,
- offset = fabric_dict:init(Workers, null),
- rows = Seqs % store sequence positions instead
- },
- %% TODO: errors need to be handled here
- receive_results(Workers, State, Timeout, Callback).
-
-receive_results(Workers, State, Timeout, Callback) ->
- case rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, State,
- Timeout, infinity) of
- {timeout, NewState0} ->
- {ok, AccOut} = Callback(timeout, NewState0#collector.user_acc),
- NewState = NewState0#collector{user_acc = AccOut},
- receive_results(Workers, NewState, Timeout, Callback);
- {_, NewState} ->
- {ok, NewState}
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
- fabric_view:check_down_shards(State, NodeRef);
-
-handle_message({rexi_EXIT, Reason}, Worker, State) ->
- fabric_view:handle_worker_exit(State, Worker, Reason);
-
-% Temporary upgrade clause - Case 24236
-handle_message({complete, Key}, Worker, State) when is_tuple(Key) ->
- handle_message({complete, [{seq, Key}, {pending, 0}]}, Worker, State);
-
-handle_message({change, Props}, {Worker, _}, #collector{limit=0} = State) ->
- O0 = State#collector.offset,
- O1 = case fabric_dict:lookup_element(Worker, O0) of
- null ->
- % Use Pending+1 because we're ignoring this row in the response
- Pending = couch_util:get_value(pending, Props, 0),
- fabric_dict:store(Worker, Pending+1, O0);
- _ ->
- O0
- end,
- maybe_stop(State#collector{offset = O1});
-
-handle_message({complete, Props}, Worker, #collector{limit=0} = State) ->
- O0 = State#collector.offset,
- O1 = case fabric_dict:lookup_element(Worker, O0) of
- null ->
- fabric_dict:store(Worker, couch_util:get_value(pending,Props), O0);
- _ ->
- O0
- end,
- maybe_stop(State#collector{offset = O1});
-
-handle_message({no_pass, Props}, {Worker, From}, #collector{limit=0} = State)
- when is_list(Props) ->
- #collector{counters = S0, offset = O0} = State,
- O1 = case fabric_dict:lookup_element(Worker, O0) of
- null ->
- fabric_dict:store(Worker, couch_util:get_value(pending, Props), O0);
- _ ->
- O0
- end,
- S1 = fabric_dict:store(Worker, couch_util:get_value(seq, Props), S0),
- rexi:stream_ack(From),
- maybe_stop(State#collector{counters = S1, offset = O1});
-
-handle_message(#change{} = Row, {Worker, From}, St) ->
- Change = {change, [
- {seq, Row#change.key},
- {id, Row#change.id},
- {changes, Row#change.value},
- {deleted, Row#change.deleted},
- {doc, Row#change.doc}
- ]},
- handle_message(Change, {Worker, From}, St);
-
-handle_message({change, Props}, {Worker, From}, St) ->
- #collector{
- callback = Callback,
- counters = S0,
- offset = O0,
- limit = Limit,
- user_acc = AccIn
- } = St,
- true = fabric_dict:is_key(Worker, S0),
- S1 = fabric_dict:store(Worker, couch_util:get_value(seq, Props), S0),
- O1 = fabric_dict:store(Worker, couch_util:get_value(pending, Props), O0),
- % Temporary hack for FB 23637
- Interval = erlang:get(changes_seq_interval),
- if (Interval == undefined) orelse (Limit rem Interval == 0) ->
- Props2 = lists:keyreplace(seq, 1, Props, {seq, pack_seqs(S1)});
- true ->
- Props2 = lists:keyreplace(seq, 1, Props, {seq, null})
- end,
- {Go, Acc} = Callback(changes_row(Props2), AccIn),
- rexi:stream_ack(From),
- {Go, St#collector{counters=S1, offset=O1, limit=Limit-1, user_acc=Acc}};
-
-%% upgrade clause
-handle_message({no_pass, Seq}, From, St) when is_integer(Seq) ->
- handle_message({no_pass, [{seq, Seq}]}, From, St);
-
-handle_message({no_pass, Props}, {Worker, From}, St) ->
- Seq = couch_util:get_value(seq, Props),
- #collector{counters = S0} = St,
- true = fabric_dict:is_key(Worker, S0),
- S1 = fabric_dict:store(Worker, Seq, S0),
- rexi:stream_ack(From),
- {ok, St#collector{counters=S1}};
-
-handle_message({complete, Props}, Worker, State) ->
- Key = couch_util:get_value(seq, Props),
- #collector{
- counters = S0,
- offset = O0,
- total_rows = Completed % override
- } = State,
- true = fabric_dict:is_key(Worker, S0),
- S1 = fabric_dict:store(Worker, Key, S0),
- O1 = fabric_dict:store(Worker, couch_util:get_value(pending, Props), O0),
- NewState = State#collector{counters=S1, offset=O1, total_rows=Completed+1},
- % We're relying on S1 having exactly the numnber of workers that
- % are participtaing in this response. With the new stream_start
- % that's a bit more obvious but historically it wasn't quite
- % so clear. The Completed variable is just a hacky override
- % of the total_rows field in the #collector{} record.
- NumWorkers = fabric_dict:size(S1),
- Go = case NumWorkers =:= (Completed+1) of
- true -> stop;
- false -> ok
- end,
- {Go, NewState}.
-
-
-make_replacement_arg(Node, {Seq, Uuid}) ->
- {replace, Node, Uuid, Seq};
-make_replacement_arg(_Node, {Seq, Uuid, EpochNode}) ->
- % The replacement should properly be computed aginst the node that owned
- % the sequence when it was written to disk (the EpochNode) rather than the
- % node we're trying to replace.
- {replace, EpochNode, Uuid, Seq};
-make_replacement_arg(_, _) ->
- 0.
-
-maybe_stop(#collector{offset = Offset} = State) ->
- case fabric_dict:any(null, Offset) of
- false ->
- {stop, State};
- true ->
- % Wait till we've heard from everyone to compute pending count
- {ok, State}
- end.
-
-make_changes_args(#changes_args{style=Style, filter_fun=undefined}=Args) ->
- Args#changes_args{filter_fun = {default, Style}};
-make_changes_args(Args) ->
- Args.
-
-get_start_seq(DbName, #changes_args{dir=Dir, since=Since})
- when Dir == rev; Since == "now" ->
- {ok, Info} = fabric:get_db_info(DbName),
- couch_util:get_value(update_seq, Info);
-get_start_seq(_DbName, #changes_args{dir=fwd, since=Since}) ->
- Since.
-
-pending_count(Dict) ->
- fabric_dict:fold(fun
- (_Worker, Count, Acc) when is_integer(Count), is_integer(Acc) ->
- Count + Acc;
- (_Worker, _Count, _Acc) ->
- null
- end, 0, Dict).
-
-pack_seqs(Workers) ->
- SeqList = [{N,R,S} || {#shard{node=N, range=R}, S} <- Workers],
- SeqSum = lists:sum([seq(S) || {_,_,S} <- SeqList]),
- Opaque = couch_util:encodeBase64Url(term_to_binary(SeqList, [compressed])),
- ?l2b([integer_to_list(SeqSum), $-, Opaque]).
-
-seq({Seq, _Uuid, _Node}) -> Seq;
-seq({Seq, _Uuid}) -> Seq;
-seq(Seq) -> Seq.
-
-
-unpack_seq_regex_match(Packed) ->
- NewPattern = "^\\[[0-9]+\s*,\s*\"(?<opaque>.*)\"\\]$",
- OldPattern = "^\"?([0-9]+-)?(?<opaque>.*?)\"?$",
- Options = [{capture, [opaque], binary}],
- case re:run(Packed, NewPattern, Options) of
- {match, Match} ->
- Match;
- nomatch ->
- {match, Match} = re:run(Packed, OldPattern, Options),
- Match
- end.
-
-
-unpack_seq_decode_term(Opaque) ->
- binary_to_term(couch_util:decodeBase64Url(Opaque)).
-
-
-unpack_seqs(0, DbName) ->
- fabric_dict:init(mem3:shards(DbName), 0);
-
-unpack_seqs("0", DbName) ->
- fabric_dict:init(mem3:shards(DbName), 0);
-
-unpack_seqs([_SeqNum, Opaque], DbName) -> % deprecated
- do_unpack_seqs(Opaque, DbName);
-
-unpack_seqs(Packed, DbName) ->
- Opaque = unpack_seq_regex_match(Packed),
- do_unpack_seqs(Opaque, DbName).
-
-do_unpack_seqs(Opaque, DbName) ->
- % A preventative fix for FB 13533 to remove duplicate shards.
- % This just picks each unique shard and keeps the largest seq
- % value recorded.
- Decoded = unpack_seq_decode_term(Opaque),
- DedupDict = lists:foldl(fun({Node, [A, B], Seq}, Acc) ->
- dict:append({Node, [A, B]}, Seq, Acc)
- end, dict:new(), Decoded),
- Deduped = lists:map(fun({{Node, [A, B]}, SeqList}) ->
- {Node, [A, B], lists:max(SeqList)}
- end, dict:to_list(DedupDict)),
-
- % Create a fabric_dict of {Shard, Seq} entries
- % TODO relies on internal structure of fabric_dict as keylist
- Unpacked = lists:flatmap(fun({Node, [A,B], Seq}) ->
- case mem3:get_shard(DbName, Node, [A,B]) of
- {ok, Shard} ->
- [{Shard, Seq}];
- {error, not_found} ->
- []
- end
- end, Deduped),
-
- % This just handles the case if the ring in the unpacked sequence
- % received is not complete and in that case tries to fill in the
- % missing ranges with shards from the shard map
- case fabric_ring:is_progress_possible(Unpacked) of
- true ->
- Unpacked;
- false ->
- PotentialWorkers = lists:map(fun({Node, [A, B], Seq}) ->
- case mem3:get_shard(DbName, Node, [A, B]) of
- {ok, Shard} ->
- {Shard, Seq};
- {error, not_found} ->
- {#shard{node = Node, range = [A, B]}, Seq}
- end
- end, Deduped),
- Shards = mem3:shards(DbName),
- {Unpacked1, Dead, Reps} = find_replacements(PotentialWorkers, Shards),
- {Splits, Reps1} = find_split_shard_replacements(Dead, Reps),
- RepSeqs = lists:map(fun(#shard{} = S) ->
- {S, get_old_seq(S, Deduped)}
- end, Reps1),
- Unpacked1 ++ Splits ++ RepSeqs
- end.
-
-
-get_old_seq(#shard{range=R}=Shard, SinceSeqs) ->
- case lists:keyfind(R, 2, SinceSeqs) of
- {Node, R, Seq} when is_number(Seq) ->
- % Unfortunately we don't have access to the db
- % uuid so we can't set a replacememnt here.
- couch_log:warning("~p get_old_seq missing uuid "
- "node: ~p, range: ~p, seq: ~p", [?MODULE, Node, R, Seq]),
- 0;
- {Node, R, {Seq, Uuid}} ->
- % This update seq is using the old format that
- % didn't include the node. This information is
- % important for replacement.
- {Seq, Uuid, Node};
- {_Node, R, {Seq, Uuid, EpochNode}} ->
- % This is the newest sequence format that we
- % can use for replacement.
- {Seq, Uuid, EpochNode};
- Error ->
- couch_log:warning("~p get_old_seq error: ~p, shard: ~p, seqs: ~p",
- [?MODULE, Error, Shard, SinceSeqs]),
- 0
- end.
-
-
-changes_row(Props0) ->
- Props1 = case couch_util:get_value(deleted, Props0) of
- true ->
- Props0;
- _ ->
- lists:keydelete(deleted, 1, Props0)
- end,
- Allowed = [seq, id, changes, deleted, doc, error],
- Props2 = lists:filter(fun({K,_V}) -> lists:member(K, Allowed) end, Props1),
- {change, {Props2}}.
-
-
-find_replacements(Workers, AllShards) ->
- % Build map [B, E] => [Worker1, Worker2, ...] for all workers
- WrkMap = lists:foldl(fun({#shard{range = [B, E]}, _} = W, Acc) ->
- maps:update_with({B, E}, fun(Ws) -> [W | Ws] end, [W], Acc)
- end, #{}, fabric_dict:to_list(Workers)),
-
- % Build map [B, E] => [Shard1, Shard2, ...] for all shards
- AllMap = lists:foldl(fun(#shard{range = [B, E]} = S, Acc) ->
- maps:update_with({B, E}, fun(Ss) -> [S | Ss] end, [S], Acc)
- end, #{}, AllShards),
-
- % Custom sort function will prioritize workers over other shards.
- % The idea is to not unnecessarily kill workers if we don't have to
- SortFun = fun
- (R1 = {B, E1}, R2 = {B, E2}) ->
- case {maps:is_key(R1, WrkMap), maps:is_key(R2, WrkMap)} of
- {true, true} ->
- % Both are workers, larger interval wins
- E1 >= E2;
- {true, false} ->
- % First element is a worker range, it wins
- true;
- {false, true} ->
- % Second element is a worker range, it wins
- false;
- {false, false} ->
- % Neither one is a worker interval, pick larger one
- E1 >= E2
- end;
- ({B1, _}, {B2, _}) ->
- B1 =< B2
- end,
- Ring = mem3_util:get_ring(maps:keys(AllMap), SortFun),
-
- % Keep only workers in the ring and from one of the available nodes
- Keep = fun(#shard{range = [B, E], node = N}) ->
- lists:member({B, E}, Ring) andalso lists:keyfind(N, #shard.node,
- maps:get({B, E}, AllMap)) =/= false
- end,
- Workers1 = fabric_dict:filter(fun(S, _) -> Keep(S) end, Workers),
- Removed = fabric_dict:filter(fun(S, _) -> not Keep(S) end, Workers),
-
- {Rep, _} = lists:foldl(fun(R, {RepAcc, AllMapAcc}) ->
- case maps:is_key(R, WrkMap)of
- true ->
- % It's a worker and in the map of available shards. Make sure
- % to keep it only if there is a range available on that node
- % only (reuse Keep/1 predicate from above)
- WorkersInRange = maps:get(R, WrkMap),
- case lists:any(fun({S, _}) -> Keep(S) end, WorkersInRange) of
- true ->
- {RepAcc, AllMapAcc};
- false ->
- [Shard | Rest] = maps:get(R, AllMapAcc),
- {[Shard | RepAcc], AllMapAcc#{R := Rest}}
- end;
- false ->
- % No worker for this range. Replace from available shards
- [Shard | Rest] = maps:get(R, AllMapAcc),
- {[Shard | RepAcc], AllMapAcc#{R := Rest}}
- end
- end, {[], AllMap}, Ring),
-
- % Return the list of workers that are part of ring, list of removed workers
- % and a list of replacement shards that could be used to make sure the ring
- % completes.
- {Workers1, Removed, Rep}.
-
-
-% From the list of dead workers determine if any are a result of a split shard.
-% In that case perhaps there is a way to not rewind the changes feed back to 0.
-% Returns {NewWorkers, Available} where NewWorkers is the list of
-% viable workers Available is the list of still unused input Shards
-find_split_shard_replacements(DeadWorkers, Shards) ->
- Acc0 = {[], Shards},
- AccF = fabric_dict:fold(fun(#shard{node = WN, range = R}, Seq, Acc) ->
- [B, E] = R,
- {SplitWorkers, Available} = Acc,
- ShardsOnSameNode = [S || #shard{node = N} = S <- Available, N =:= WN],
- SplitShards = mem3_util:non_overlapping_shards(ShardsOnSameNode, B, E),
- RepCount = length(SplitShards),
- NewWorkers = [{S, make_split_seq(Seq, RepCount)} || S <- SplitShards],
- NewAvailable = [S || S <- Available, not lists:member(S, SplitShards)],
- {NewWorkers ++ SplitWorkers, NewAvailable}
- end, Acc0, DeadWorkers),
- {Workers, Available} = AccF,
- {fabric_dict:from_list(Workers), Available}.
-
-
-make_split_seq({Num, Uuid, Node}, RepCount) when RepCount > 1 ->
- {Num, {split, Uuid}, Node};
-make_split_seq(Seq, _) ->
- Seq.
-
-
-validate_start_seq(_DbName, "now") ->
- ok;
-validate_start_seq(_DbName, 0) ->
- ok;
-validate_start_seq(_DbName, "0") ->
- ok;
-validate_start_seq(_DbName, Seq) ->
- try
- case Seq of
- [_SeqNum, Opaque] ->
- unpack_seq_decode_term(Opaque);
- Seq ->
- Opaque = unpack_seq_regex_match(Seq),
- unpack_seq_decode_term(Opaque)
- end,
- ok
- catch
- _:_ ->
- Reason = <<"Malformed sequence supplied in 'since' parameter.">>,
- {error, {bad_request, Reason}}
- end.
-
-get_changes_epoch() ->
- case application:get_env(fabric, changes_epoch) of
- undefined ->
- increment_changes_epoch(),
- get_changes_epoch();
- {ok, Epoch} ->
- Epoch
- end.
-
-increment_changes_epoch() ->
- application:set_env(fabric, changes_epoch, os:timestamp()).
-
-
-%% unpack_seq_setup() ->
-%% meck:new(mem3),
-%% meck:new(fabric_view),
-%% meck:expect(mem3, get_shard, fun(_, _, _) -> {ok, #shard{}} end),
-%% meck:expect(fabric_ring, is_progress_possible, fun(_) -> true end),
-%% ok.
-%%
-%%
-%% unpack_seqs_test_() ->
-%% {
-%% setup,
-%% fun unpack_seq_setup/0,
-%% fun (_) -> meck:unload() end,
-%% [
-%% t_unpack_seqs()
-%% ]
-%% }.
-%%
-%%
-%% t_unpack_seqs() ->
-%% ?_test(begin
-%% % BigCouch 0.3 style.
-%% assert_shards("23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-%% "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-%% "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA"),
-%%
-%% % BigCouch 0.4 style.
-%% assert_shards([23423,<<"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-%% "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-%% "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA">>]),
-%%
-%% % BigCouch 0.4 style (as string).
-%% assert_shards("[23423,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-%% "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-%% "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-%% assert_shards("[23423 ,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-%% "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-%% "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-%% assert_shards("[23423, \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-%% "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-%% "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-%% assert_shards("[23423 , \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-%% "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-%% "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-%%
-%% % with internal hypen
-%% assert_shards("651-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
-%% "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
-%% "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"),
-%% assert_shards([651,"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
-%% "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
-%% "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"]),
-%%
-%% % CouchDB 1.2 style
-%% assert_shards("\"23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-%% "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-%% "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"")
-%% end).
-%%
-%%
-%% assert_shards(Packed) ->
-%% ?assertMatch([{#shard{},_}|_], unpack_seqs(Packed, <<"foo">>)).
-%%
-%%
-%% find_replacements_test() ->
-%% % None of the workers are in the live list of shard but there is a
-%% % replacement on n3 for the full range. It should get picked instead of
-%% % the two smaller one on n2.
-%% Workers1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
-%% AllShards1 = [
-%% mk_shard("n1", 11, ?RING_END),
-%% mk_shard("n2", 0, 4),
-%% mk_shard("n2", 5, 10),
-%% mk_shard("n3", 0, ?RING_END)
-%% ],
-%% {WorkersRes1, Dead1, Reps1} = find_replacements(Workers1, AllShards1),
-%% ?assertEqual([], WorkersRes1),
-%% ?assertEqual(Workers1, Dead1),
-%% ?assertEqual([mk_shard("n3", 0, ?RING_END)], Reps1),
-%%
-%% % None of the workers are in the live list of shards and there is a
-%% % split replacement from n2 (range [0, 10] replaced with [0, 4], [5, 10])
-%% Workers2 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
-%% AllShards2 = [
-%% mk_shard("n1", 11, ?RING_END),
-%% mk_shard("n2", 0, 4),
-%% mk_shard("n2", 5, 10)
-%% ],
-%% {WorkersRes2, Dead2, Reps2} = find_replacements(Workers2, AllShards2),
-%% ?assertEqual([], WorkersRes2),
-%% ?assertEqual(Workers2, Dead2),
-%% ?assertEqual([
-%% mk_shard("n1", 11, ?RING_END),
-%% mk_shard("n2", 0, 4),
-%% mk_shard("n2", 5, 10)
-%% ], lists:sort(Reps2)),
-%%
-%% % One worker is available and one needs to be replaced. Replacement will be
-%% % from two split shards
-%% Workers3 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
-%% AllShards3 = [
-%% mk_shard("n1", 11, ?RING_END),
-%% mk_shard("n2", 0, 4),
-%% mk_shard("n2", 5, 10),
-%% mk_shard("n2", 11, ?RING_END)
-%% ],
-%% {WorkersRes3, Dead3, Reps3} = find_replacements(Workers3, AllShards3),
-%% ?assertEqual(mk_workers([{"n2", 11, ?RING_END}]), WorkersRes3),
-%% ?assertEqual(mk_workers([{"n1", 0, 10}]), Dead3),
-%% ?assertEqual([
-%% mk_shard("n2", 0, 4),
-%% mk_shard("n2", 5, 10)
-%% ], lists:sort(Reps3)),
-%%
-%% % All workers are available. Make sure they are not killed even if there is
-%% % a longer (single) shard to replace them.
-%% Workers4 = mk_workers([{"n1", 0, 10}, {"n1", 11, ?RING_END}]),
-%% AllShards4 = [
-%% mk_shard("n1", 0, 10),
-%% mk_shard("n1", 11, ?RING_END),
-%% mk_shard("n2", 0, 4),
-%% mk_shard("n2", 5, 10),
-%% mk_shard("n3", 0, ?RING_END)
-%% ],
-%% {WorkersRes4, Dead4, Reps4} = find_replacements(Workers4, AllShards4),
-%% ?assertEqual(Workers4, WorkersRes4),
-%% ?assertEqual([], Dead4),
-%% ?assertEqual([], Reps4).
-%%
-%%
-%% mk_workers(NodesRanges) ->
-%% mk_workers(NodesRanges, nil).
-%%
-%% mk_workers(NodesRanges, Val) ->
-%% orddict:from_list([{mk_shard(N, B, E), Val} || {N, B, E} <- NodesRanges]).
-%%
-%%
-%% mk_shard(Name, B, E) ->
-%% Node = list_to_atom(Name),
-%% BName = list_to_binary(Name),
-%% #shard{name = BName, node = Node, range = [B, E]}.
-%%
-%%
-%% find_split_shard_replacements_test() ->
-%% % One worker is can be replaced and one can't
-%% Dead1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
-%% Shards1 = [
-%% mk_shard("n1", 0, 4),
-%% mk_shard("n1", 5, 10),
-%% mk_shard("n3", 11, ?RING_END)
-%% ],
-%% {Workers1, ShardsLeft1} = find_split_shard_replacements(Dead1, Shards1),
-%% ?assertEqual(mk_workers([{"n1", 0, 4}, {"n1", 5, 10}], 42), Workers1),
-%% ?assertEqual([mk_shard("n3", 11, ?RING_END)], ShardsLeft1),
-%%
-%% % All workers can be replaced - one by 1 shard, another by 3 smaller shards
-%% Dead2 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
-%% Shards2 = [
-%% mk_shard("n1", 0, 10),
-%% mk_shard("n2", 11, 12),
-%% mk_shard("n2", 13, 14),
-%% mk_shard("n2", 15, ?RING_END)
-%% ],
-%% {Workers2, ShardsLeft2} = find_split_shard_replacements(Dead2, Shards2),
-%% ?assertEqual(mk_workers([
-%% {"n1", 0, 10},
-%% {"n2", 11, 12},
-%% {"n2", 13, 14},
-%% {"n2", 15, ?RING_END}
-%% ], 42), Workers2),
-%% ?assertEqual([], ShardsLeft2),
-%%
-%% % No workers can be replaced. Ranges match but they are on different nodes
-%% Dead3 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
-%% Shards3 = [
-%% mk_shard("n2", 0, 10),
-%% mk_shard("n3", 11, ?RING_END)
-%% ],
-%% {Workers3, ShardsLeft3} = find_split_shard_replacements(Dead3, Shards3),
-%% ?assertEqual([], Workers3),
-%% ?assertEqual(Shards3, ShardsLeft3).
diff --git a/src/fabric/src/fabric_view_map.erl b/src/fabric/src/fabric_view_map.erl
deleted file mode 100644
index b8d0d392a..000000000
--- a/src/fabric/src/fabric_view_map.erl
+++ /dev/null
@@ -1,267 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view_map).
-
--export([go/8]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-go(DbName, Options, GroupId, View, Args, Callback, Acc, VInfo)
- when is_binary(GroupId) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
- go(DbName, Options, DDoc, View, Args, Callback, Acc, VInfo);
-
-go(Db, Options, DDoc, View, Args, Callback, Acc, VInfo) ->
- DbName = fabric:dbname(Db),
- {Shards, RingOpts} = fabric_view:get_shards(Db, Args),
- {CoordArgs, WorkerArgs} = fabric_view:fix_skip_and_limit(Args),
- DocIdAndRev = fabric_util:doc_id_and_rev(DDoc),
- fabric_view:maybe_update_others(DbName, DocIdAndRev, Shards, View, Args),
- Repls = fabric_ring:get_shard_replacements(DbName, Shards),
- RPCArgs = [DocIdAndRev, View, WorkerArgs, Options],
- StartFun = fun(Shard) ->
- hd(fabric_util:submit_jobs([Shard], fabric_rpc, map_view, RPCArgs))
- end,
- Workers0 = fabric_util:submit_jobs(Shards, fabric_rpc, map_view, RPCArgs),
- RexiMon = fabric_util:create_monitors(Workers0),
- try
- case fabric_streams:start(Workers0, #shard.ref, StartFun, Repls,
- RingOpts) of
- {ok, ddoc_updated} ->
- Callback({error, ddoc_updated}, Acc);
- {ok, Workers} ->
- try
- go(DbName, Workers, VInfo, CoordArgs, Callback, Acc)
- after
- fabric_streams:cleanup(Workers)
- end;
- {timeout, NewState} ->
- DefunctWorkers = fabric_util:remove_done_workers(
- NewState#stream_acc.workers,
- waiting
- ),
- fabric_util:log_timeout(
- DefunctWorkers,
- "map_view"
- ),
- Callback({error, timeout}, Acc);
- {error, Error} ->
- Callback({error, Error}, Acc)
- end
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-go(DbName, Workers, {map, View, _}, Args, Callback, Acc0) ->
- #mrargs{limit = Limit, skip = Skip, keys = Keys, update_seq=UpdateSeq} = Args,
- Collation = couch_util:get_value(<<"collation">>, View#mrview.options),
- State = #collector{
- db_name=DbName,
- query_args = Args,
- callback = Callback,
- counters = fabric_dict:init(Workers, 0),
- skip = Skip,
- limit = Limit,
- keys = fabric_view:keydict(Keys),
- sorted = Args#mrargs.sorted,
- collation = Collation,
- user_acc = Acc0,
- update_seq = case UpdateSeq of true -> []; false -> nil end
- },
- case rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, fabric_util:view_timeout(Args), 1000 * 60 * 60) of
- {ok, NewState} ->
- {ok, NewState#collector.user_acc};
- {timeout, NewState} ->
- Callback({error, timeout}, NewState#collector.user_acc);
- {error, Resp} ->
- {ok, Resp}
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
- fabric_view:check_down_shards(State, NodeRef);
-
-handle_message({rexi_EXIT, Reason}, Worker, State) ->
- fabric_view:handle_worker_exit(State, Worker, Reason);
-
-handle_message({meta, Meta0}, {Worker, From}, State) ->
- Tot = couch_util:get_value(total, Meta0, 0),
- Off = couch_util:get_value(offset, Meta0, 0),
- Seq = couch_util:get_value(update_seq, Meta0, 0),
- #collector{
- callback = Callback,
- counters = Counters0,
- total_rows = Total0,
- offset = Offset0,
- user_acc = AccIn,
- update_seq = UpdateSeq0
- } = State,
- % Assert that we don't have other messages from this
- % worker when the total_and_offset message arrives.
- 0 = fabric_dict:lookup_element(Worker, Counters0),
- rexi:stream_ack(From),
- Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- Total = Total0 + Tot,
- Offset = Offset0 + Off,
- UpdateSeq = case UpdateSeq0 of
- nil -> nil;
- _ -> [{Worker, Seq} | UpdateSeq0]
- end,
- case fabric_dict:any(0, Counters1) of
- true ->
- {ok, State#collector{
- counters = Counters1,
- total_rows = Total,
- update_seq = UpdateSeq,
- offset = Offset
- }};
- false ->
- FinalOffset = erlang:min(Total, Offset+State#collector.skip),
- Meta = [{total, Total}, {offset, FinalOffset}] ++
- case UpdateSeq of
- nil ->
- [];
- _ ->
- [{update_seq, fabric_view_changes:pack_seqs(UpdateSeq)}]
- end,
- {Go, Acc} = Callback({meta, Meta}, AccIn),
- {Go, State#collector{
- counters = fabric_dict:decrement_all(Counters1),
- total_rows = Total,
- offset = FinalOffset,
- user_acc = Acc
- }}
- end;
-
-handle_message(#view_row{}, {_, _}, #collector{limit=0} = State) ->
- #collector{callback=Callback} = State,
- {_, Acc} = Callback(complete, State#collector.user_acc),
- {stop, State#collector{user_acc=Acc}};
-
-handle_message(#view_row{} = Row, {_,From}, #collector{sorted=false} = St) ->
- #collector{callback=Callback, user_acc=AccIn, limit=Limit} = St,
- {Go, Acc} = Callback(fabric_view:transform_row(Row), AccIn),
- rexi:stream_ack(From),
- {Go, St#collector{user_acc=Acc, limit=Limit-1}};
-
-handle_message(#view_row{} = Row, {Worker, From}, State) ->
- #collector{
- query_args = #mrargs{direction = Dir},
- counters = Counters0,
- rows = Rows0,
- keys = KeyDict0,
- collation = Collation
- } = State,
- {Rows, KeyDict} = merge_row(
- Dir,
- Collation,
- KeyDict0,
- Row#view_row{worker={Worker, From}},
- Rows0
- ),
- Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- State1 = State#collector{rows=Rows, counters=Counters1, keys=KeyDict},
- fabric_view:maybe_send_row(State1);
-
-handle_message(complete, Worker, State) ->
- Counters = fabric_dict:update_counter(Worker, 1, State#collector.counters),
- fabric_view:maybe_send_row(State#collector{counters = Counters});
-
-handle_message({execution_stats, _} = Msg, {_,From}, St) ->
- #collector{callback=Callback, user_acc=AccIn} = St,
- {Go, Acc} = Callback(Msg, AccIn),
- rexi:stream_ack(From),
- {Go, St#collector{user_acc=Acc}};
-
-handle_message(ddoc_updated, _Worker, State) ->
- {stop, State}.
-
-merge_row(Dir, Collation, undefined, Row, Rows0) ->
- Rows1 = lists:merge(
- fun(#view_row{key=KeyA, id=IdA}, #view_row{key=KeyB, id=IdB}) ->
- compare(Dir, Collation, {KeyA, IdA}, {KeyB, IdB})
- end,
- [Row],
- Rows0
- ),
- {Rows1, undefined};
-merge_row(Dir, Collation, KeyDict0, Row, Rows0) ->
- CmpFun = case Collation of
- <<"raw">> ->
- fun (A, A) -> 0;
- (A, B) -> case A < B of
- true -> -1;
- false -> 1
- end
- end;
- _ ->
- fun couch_ejson_compare:less/2
- end,
- case maybe_update_keydict(Row#view_row.key, KeyDict0, CmpFun) of
- undefined ->
- {Rows0, KeyDict0};
- KeyDict1 ->
- Rows1 = lists:merge(
- fun(#view_row{key=A, id=IdA}, #view_row{key=B, id=IdB}) ->
- case {Dir, CmpFun(A, B)} of
- {fwd, 0} ->
- IdA < IdB;
- {rev, 0} ->
- IdB < IdA;
- {fwd, _} ->
- dict:fetch(A, KeyDict1) < dict:fetch(B, KeyDict1);
- {rev, _} ->
- dict:fetch(B, KeyDict1) < dict:fetch(A, KeyDict1)
- end
- end,
- [Row],
- Rows0
- ),
- {Rows1, KeyDict1}
- end.
-
-compare(_, _, A, A) -> true;
-compare(fwd, <<"raw">>, A, B) -> A < B;
-compare(rev, <<"raw">>, A, B) -> B < A;
-compare(fwd, _, A, B) -> couch_ejson_compare:less_json_ids(A, B);
-compare(rev, _, A, B) -> couch_ejson_compare:less_json_ids(B, A).
-
-% KeyDict captures the user-supplied ordering of keys POSTed by the user by
-% mapping to integers (see fabric_view:keydict/1). It's possible that these keys
-% do not compare equal (i.e., =:=, used by dict) to those returned by the view
-% but are in fact equal under ICU. In this case (assuming the view uses ICU
-% collation) we must update KeyDict with a mapping from the ICU-equal key to its
-% appropriate value.
-maybe_update_keydict(Key, KeyDict, CmpFun) ->
- case dict:find(Key, KeyDict) of
- {ok, _} ->
- KeyDict;
- error ->
- case key_index(Key, dict:to_list(KeyDict), CmpFun) of
- undefined ->
- undefined;
- Value ->
- dict:store(Key, Value, KeyDict)
- end
- end.
-
-key_index(_, [], _) ->
- undefined;
-key_index(KeyA, [{KeyB, Value}|KVs], CmpFun) ->
- case CmpFun(KeyA, KeyB) of
- 0 -> Value;
- _ -> key_index(KeyA, KVs, CmpFun)
- end.
diff --git a/src/fabric/src/fabric_view_reduce.erl b/src/fabric/src/fabric_view_reduce.erl
deleted file mode 100644
index a432b2cd5..000000000
--- a/src/fabric/src/fabric_view_reduce.erl
+++ /dev/null
@@ -1,165 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view_reduce).
-
--export([go/7]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-go(DbName, GroupId, View, Args, Callback, Acc0, VInfo) when is_binary(GroupId) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
- go(DbName, DDoc, View, Args, Callback, Acc0, VInfo);
-
-go(Db, DDoc, VName, Args, Callback, Acc, VInfo) ->
- DbName = fabric:dbname(Db),
- {Shards, RingOpts} = fabric_view:get_shards(Db, Args),
- {CoordArgs, WorkerArgs} = fabric_view:fix_skip_and_limit(Args),
- DocIdAndRev = fabric_util:doc_id_and_rev(DDoc),
- RPCArgs = [DocIdAndRev, VName, WorkerArgs],
- fabric_view:maybe_update_others(DbName, DocIdAndRev, Shards, VName, Args),
- Repls = fabric_ring:get_shard_replacements(DbName, Shards),
- StartFun = fun(Shard) ->
- hd(fabric_util:submit_jobs([Shard], fabric_rpc, reduce_view, RPCArgs))
- end,
- Workers0 = fabric_util:submit_jobs(Shards,fabric_rpc,reduce_view,RPCArgs),
- RexiMon = fabric_util:create_monitors(Workers0),
- try
- case fabric_streams:start(Workers0, #shard.ref, StartFun, Repls,
- RingOpts) of
- {ok, ddoc_updated} ->
- Callback({error, ddoc_updated}, Acc);
- {ok, Workers} ->
- try
- go2(DbName, Workers, VInfo, CoordArgs, Callback, Acc)
- after
- fabric_streams:cleanup(Workers)
- end;
- {timeout, NewState} ->
- DefunctWorkers = fabric_util:remove_done_workers(
- NewState#stream_acc.workers,
- waiting
- ),
- fabric_util:log_timeout(
- DefunctWorkers,
- "reduce_view"
- ),
- Callback({error, timeout}, Acc);
- {error, Error} ->
- Callback({error, Error}, Acc)
- end
- after
- rexi_monitor:stop(RexiMon)
- end.
-
-go2(DbName, Workers, {red, {_, Lang, View}, _}=VInfo, Args, Callback, Acc0) ->
- #mrargs{limit = Limit, skip = Skip, keys = Keys, update_seq = UpdateSeq} = Args,
- RedSrc = couch_mrview_util:extract_view_reduce(VInfo),
- OsProc = case os_proc_needed(RedSrc) of
- true -> couch_query_servers:get_os_process(Lang);
- _ -> nil
- end,
- State = #collector{
- db_name = DbName,
- query_args = Args,
- callback = Callback,
- counters = fabric_dict:init(Workers, 0),
- keys = Keys,
- skip = Skip,
- limit = Limit,
- lang = Lang,
- os_proc = OsProc,
- reducer = RedSrc,
- collation = couch_util:get_value(<<"collation">>, View#mrview.options),
- rows = dict:new(),
- user_acc = Acc0,
- update_seq = case UpdateSeq of true -> []; false -> nil end
- },
- try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, fabric_util:view_timeout(Args), 1000 * 60 * 60) of
- {ok, NewState} ->
- {ok, NewState#collector.user_acc};
- {timeout, NewState} ->
- Callback({error, timeout}, NewState#collector.user_acc);
- {error, Resp} ->
- {ok, Resp}
- after
- if OsProc == nil -> ok; true ->
- catch couch_query_servers:ret_os_process(OsProc)
- end
- end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
- fabric_view:check_down_shards(State, NodeRef);
-
-handle_message({rexi_EXIT, Reason}, Worker, State) ->
- fabric_view:handle_worker_exit(State, Worker, Reason);
-
-handle_message({meta, Meta0}, {Worker, From}, State) ->
- Seq = couch_util:get_value(update_seq, Meta0, 0),
- #collector{
- callback = Callback,
- counters = Counters0,
- user_acc = AccIn,
- update_seq = UpdateSeq0
- } = State,
- % Assert that we don't have other messages from this
- % worker when the total_and_offset message arrives.
- 0 = fabric_dict:lookup_element(Worker, Counters0),
- rexi:stream_ack(From),
- Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- UpdateSeq = case UpdateSeq0 of
- nil -> nil;
- _ -> [{Worker, Seq} | UpdateSeq0]
- end,
- case fabric_dict:any(0, Counters1) of
- true ->
- {ok, State#collector{
- counters = Counters1,
- update_seq = UpdateSeq
- }};
- false ->
- Meta = case UpdateSeq of
- nil ->
- [];
- _ ->
- [{update_seq, fabric_view_changes:pack_seqs(UpdateSeq)}]
- end,
- {Go, Acc} = Callback({meta, Meta}, AccIn),
- {Go, State#collector{
- counters = fabric_dict:decrement_all(Counters1),
- user_acc = Acc
- }}
- end;
-
-handle_message(#view_row{key=Key} = Row, {Worker, From}, State) ->
- #collector{counters = Counters0, rows = Rows0} = State,
- true = fabric_dict:is_key(Worker, Counters0),
- Rows = dict:append(Key, Row#view_row{worker={Worker, From}}, Rows0),
- C1 = fabric_dict:update_counter(Worker, 1, Counters0),
- State1 = State#collector{rows=Rows, counters=C1},
- fabric_view:maybe_send_row(State1);
-
-handle_message(complete, Worker, #collector{counters = Counters0} = State) ->
- true = fabric_dict:is_key(Worker, Counters0),
- C1 = fabric_dict:update_counter(Worker, 1, Counters0),
- fabric_view:maybe_send_row(State#collector{counters = C1});
-
-handle_message(ddoc_updated, _Worker, State) ->
- {stop, State}.
-
-os_proc_needed(<<"_", _/binary>>) -> false;
-os_proc_needed(_) -> true.
-
diff --git a/src/fabric/test/eunit/fabric_rpc_tests.erl b/src/fabric/test/eunit/fabric_rpc_tests.erl
deleted file mode 100644
index b94caf659..000000000
--- a/src/fabric/test/eunit/fabric_rpc_tests.erl
+++ /dev/null
@@ -1,181 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_rpc_tests).
-
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--define(TDEF(A), {A, fun A/1}).
-
-
-main_test_() ->
- {
- setup,
- spawn,
- fun setup_all/0,
- fun teardown_all/1,
- [
- {
- foreach,
- fun setup_no_db_or_config/0,
- fun teardown_db/1,
- lists:map(fun wrap/1, [
- ?TDEF(t_no_config_non_shard_db_create_succeeds)
- ])
- },
- {
- foreach,
- fun setup_shard/0,
- fun teardown_noop/1,
- lists:map(fun wrap/1, [
- ?TDEF(t_no_db),
- ?TDEF(t_no_config_db_create_fails_for_shard),
- ?TDEF(t_no_config_db_create_fails_for_shard_rpc)
- ])
- },
- {
- foreach,
- fun setup_shard/0,
- fun teardown_db/1,
- lists:map(fun wrap/1, [
- ?TDEF(t_db_create_with_config)
- ])
- }
-
- ]
- }.
-
-
-setup_all() ->
- test_util:start_couch([rexi, mem3, fabric]).
-
-
-teardown_all(Ctx) ->
- test_util:stop_couch(Ctx).
-
-
-setup_no_db_or_config() ->
- ?tempdb().
-
-
-setup_shard() ->
- ?tempshard().
-
-
-teardown_noop(_DbName) ->
- ok.
-
-teardown_db(DbName) ->
- ok = couch_server:delete(DbName, []).
-
-
-wrap({Name, Fun}) ->
- fun(Arg) ->
- {timeout, 60, {atom_to_list(Name), fun() ->
- process_flag(trap_exit, true),
- Fun(Arg)
- end}}
- end.
-
-
-t_no_db(DbName) ->
- ?assertEqual({not_found, no_db_file}, couch_db:open_int(DbName, [?ADMIN_CTX])).
-
-
-t_no_config_non_shard_db_create_succeeds(DbName) ->
- ?assertEqual({not_found, no_db_file}, couch_db:open_int(DbName, [?ADMIN_CTX])),
- ?assertEqual(DbName, mem3:dbname(DbName)),
- ?assertMatch({ok, _}, mem3_util:get_or_create_db(DbName, [?ADMIN_CTX])).
-
-
-t_no_config_db_create_fails_for_shard(DbName) ->
- ?assertEqual({not_found, no_db_file}, couch_db:open_int(DbName, [?ADMIN_CTX])),
- ?assertException(throw, {error, missing_target}, mem3_util:get_or_create_db(DbName, [?ADMIN_CTX])).
-
-
-t_no_config_db_create_fails_for_shard_rpc(DbName) ->
- ?assertEqual({not_found, no_db_file}, couch_db:open_int(DbName, [?ADMIN_CTX])),
- ?assertException(throw, {error, missing_target}, mem3_util:get_or_create_db(DbName, [?ADMIN_CTX])),
- MFA = {fabric_rpc, get_db_info, [DbName]},
- Ref = rexi:cast(node(), self(), MFA),
- Resp = receive
- Resp0 -> Resp0
- end,
- ?assertMatch({Ref, {'rexi_EXIT', {{error, missing_target}, _}}}, Resp).
-
-
-t_db_create_with_config(DbName) ->
- MDbName = mem3:dbname(DbName),
- DbDoc = #doc{id = MDbName, body = test_db_doc()},
-
- ?assertEqual({not_found, no_db_file}, couch_db:open_int(DbName, [?ADMIN_CTX])),
-
- %% Write the dbs db config
- couch_util:with_db(mem3_sync:shards_db(), fun(Db) ->
- ?assertEqual({not_found, missing}, couch_db:open_doc(Db, MDbName, [ejson_body])),
- ?assertMatch({ok, _}, couch_db:update_docs(Db, [DbDoc]))
- end),
-
- %% Test get_or_create_db loads the properties as expected
- couch_util:with_db(mem3_sync:shards_db(), fun(Db) ->
- ?assertMatch({ok, _}, couch_db:open_doc(Db, MDbName, [ejson_body])),
- ?assertEqual({not_found, no_db_file}, couch_db:open_int(DbName, [?ADMIN_CTX])),
- Resp = mem3_util:get_or_create_db(DbName, [?ADMIN_CTX]),
- ?assertMatch({ok, _}, Resp),
- {ok, LDb} = Resp,
-
- {Body} = test_db_doc(),
- DbProps = mem3_util:get_shard_opts(Body),
- {Props} = case couch_db_engine:get_props(LDb) of
- undefined -> {[]};
- Else -> {Else}
- end,
- %% We don't normally store the default engine name
- EngineProps = case couch_db_engine:get_engine(LDb) of
- couch_bt_engine ->
- [];
- EngineName ->
- [{engine, EngineName}]
- end,
- ?assertEqual([{props, Props} | EngineProps], DbProps)
- end).
-
-
-test_db_doc() ->
- {[
- {<<"shard_suffix">>, ".1584997648"},
- {<<"changelog">>, [
- [<<"add">>, <<"00000000-7fffffff">>, <<"node1@127.0.0.1">>],
- [<<"add">>, <<"00000000-7fffffff">>, <<"node2@127.0.0.1">>],
- [<<"add">>, <<"00000000-7fffffff">>, <<"node3@127.0.0.1">>],
- [<<"add">>, <<"80000000-ffffffff">>, <<"node1@127.0.0.1">>],
- [<<"add">>, <<"80000000-ffffffff">>, <<"node2@127.0.0.1">>],
- [<<"add">>, <<"80000000-ffffffff">>, <<"node3@127.0.0.1">>]
- ]},
- {<<"by_node">>, {[
- {<<"node1@127.0.0.1">>, [<<"00000000-7fffffff">>, <<"80000000-ffffffff">>]},
- {<<"node2@127.0.0.1">>, [<<"00000000-7fffffff">>, <<"80000000-ffffffff">>]},
- {<<"node3@127.0.0.1">>, [<<"00000000-7fffffff">>, <<"80000000-ffffffff">>]}
- ]}},
- {<<"by_range">>, {[
- {<<"00000000-7fffffff">>, [<<"node1@127.0.0.1">>, <<"node2@127.0.0.1">>, <<"node3@127.0.0.1">>]},
- {<<"80000000-ffffffff">>, [<<"node1@127.0.0.1">>, <<"node2@127.0.0.1">>, <<"node3@127.0.0.1">>]}
- ]}},
- {<<"props">>, {[
- {partitioned, true},
- {hash, [couch_partition, hash, []]}
- ]}}
- ]}.
-
diff --git a/src/fabric/test/fabric2_changes_fold_tests.erl b/src/fabric/test/fabric2_changes_fold_tests.erl
index 8541d973c..2f6388388 100644
--- a/src/fabric/test/fabric2_changes_fold_tests.erl
+++ b/src/fabric/test/fabric2_changes_fold_tests.erl
@@ -22,6 +22,32 @@
-define(DOC_COUNT, 25).
+next_vs_function_with_txid_test() ->
+ Cases = [
+ {{0, 0, 1}, {0, 0, 0}},
+ {{0, 0, 2}, {0, 0, 1}},
+ {{0, 1, 0}, {0, 0, 16#FFFF}},
+ {{0, 2, 0}, {0, 1, 16#FFFF}},
+ {{1, 0, 0}, {0, 16#FFFF, 16#FFFF}},
+ {{2, 0, 0}, {1, 16#FFFF, 16#FFFF}}
+ ],
+ Next = fun({V, B, T}) -> fabric2_fdb:next_vs({versionstamp, V, B, T}) end,
+ [?assertEqual({versionstamp, RV, RB, RT}, Next({V, B, T})) ||
+ {{RV, RB, RT}, {V, B, T}} <- Cases].
+
+
+next_vs_function_without_txid_test() ->
+ Cases = [
+ {{0, 1}, {0, 0}},
+ {{0, 2}, {0, 1}},
+ {{1, 0}, {0, 16#FFFF}},
+ {{2, 0}, {1, 16#FFFF}}
+ ],
+ Next = fun({V, B}) -> fabric2_fdb:next_vs({versionstamp, V, B}) end,
+ [?assertEqual({versionstamp, RV, RB}, Next({V, B})) ||
+ {{RV, RB}, {V, B}} <- Cases].
+
+
changes_fold_test_() ->
{
"Test changes fold operations",
@@ -40,6 +66,7 @@ changes_fold_test_() ->
?TDEF_FE(fold_changes_basic_rev),
?TDEF_FE(fold_changes_since_now_rev),
?TDEF_FE(fold_changes_since_seq_rev),
+ ?TDEF_FE(fold_changes_with_end_key),
?TDEF_FE(fold_changes_basic_tx_too_old),
?TDEF_FE(fold_changes_reverse_tx_too_old),
?TDEF_FE(fold_changes_tx_too_old_with_single_row_emits),
@@ -54,6 +81,7 @@ changes_fold_test_() ->
setup_all() ->
Ctx = test_util:start_couch([fabric]),
meck:new(erlfdb, [passthrough]),
+ meck:new(fabric2_server, [passthrough]),
Ctx.
@@ -64,6 +92,7 @@ teardown_all(Ctx) ->
setup() ->
fabric2_test_util:tx_too_old_mock_erlfdb(),
+ meck:expect(fabric2_server, get_retry_limit, 0, 3),
{ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
Rows = lists:map(fun(Val) ->
DocId = fabric2_util:uuid(),
@@ -84,6 +113,8 @@ setup() ->
cleanup({Db, _DocIdRevs}) ->
+ meck:reset(fabric2_server),
+ meck:expect(fabric2_server, get_retry_limit, 0, meck:passthrough()),
fabric2_test_util:tx_too_old_reset_errors(),
ok = fabric2_db:delete(fabric2_db:name(Db), []).
@@ -124,6 +155,16 @@ fold_changes_since_seq_rev({Db, DocRows}) ->
fold_changes_since_seq_rev({Db, RestRows}).
+fold_changes_with_end_key({Db, DocRows}) ->
+ lists:foldl(fun(DocRow, Acc) ->
+ EndSeq = maps:get(sequence, DocRow),
+ Changes = changes(Db, 0, [{end_key, EndSeq}]),
+ NewAcc = [DocRow | Acc],
+ ?assertEqual(Changes, NewAcc),
+ NewAcc
+ end, [], DocRows).
+
+
fold_changes_basic_tx_too_old({Db, DocRows0}) ->
DocRows = lists:reverse(DocRows0),
diff --git a/src/fabric/test/fabric2_db_crud_tests.erl b/src/fabric/test/fabric2_db_crud_tests.erl
index 3d90c65b5..ab157d881 100644
--- a/src/fabric/test/fabric2_db_crud_tests.erl
+++ b/src/fabric/test/fabric2_db_crud_tests.erl
@@ -449,9 +449,12 @@ list_dbs_tx_too_old(_) ->
?assertMatch({ok, _}, fabric2_db:create(DbName1, [])),
?assertMatch({ok, _}, fabric2_db:create(DbName2, [])),
- UserFun = fun(Row, Acc) ->
- fabric2_test_util:tx_too_old_raise_in_user_fun(),
- {ok, [Row | Acc]}
+ UserFun = fun
+ ({row, _} = Row, Acc) ->
+ fabric2_test_util:tx_too_old_raise_in_user_fun(),
+ {ok, [Row | Acc]};
+ (Row, Acc) ->
+ {ok, [Row | Acc]}
end,
% Get get expected output without any transactions timing out
@@ -492,9 +495,12 @@ list_dbs_info_tx_too_old(_) ->
DbName
end, lists:seq(1, DbCount)),
- UserFun = fun(Row, Acc) ->
- fabric2_test_util:tx_too_old_raise_in_user_fun(),
- {ok, [Row | Acc]}
+ UserFun = fun
+ ({row, _} = Row, Acc) ->
+ fabric2_test_util:tx_too_old_raise_in_user_fun(),
+ {ok, [Row | Acc]};
+ (Row, Acc) ->
+ {ok, [Row | Acc]}
end,
% This is the expected return with no tx timeouts
diff --git a/src/fabric/test/fabric2_dir_prefix_tests.erl b/src/fabric/test/fabric2_dir_prefix_tests.erl
index 2943d6533..8eacfaf82 100644
--- a/src/fabric/test/fabric2_dir_prefix_tests.erl
+++ b/src/fabric/test/fabric2_dir_prefix_tests.erl
@@ -23,10 +23,10 @@ dir_prefix_test_() ->
"Test couchdb fdb directory prefix",
setup,
fun() ->
- % erlfdb, rexi and mem3 are all dependent apps for fabric. We make
+ % erlfdb, ctrace are all dependent apps for fabric. We make
% sure to start them so when fabric is started during the test it
% already has its dependencies
- test_util:start_couch([erlfdb, rexi, mem3, ctrace, fabric])
+ test_util:start_couch([erlfdb, ctrace, fabric])
end,
fun(Ctx) ->
config:delete("fabric", "fdb_directory"),
diff --git a/src/fabric/test/fabric2_doc_crud_tests.erl b/src/fabric/test/fabric2_doc_crud_tests.erl
index 7a24b7d52..bc78887d2 100644
--- a/src/fabric/test/fabric2_doc_crud_tests.erl
+++ b/src/fabric/test/fabric2_doc_crud_tests.erl
@@ -75,7 +75,7 @@ doc_crud_test_() ->
setup() ->
- Ctx = test_util:start_couch([fabric]),
+ Ctx = test_util:start_couch([fabric, couch_js]),
{ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
{Db, Ctx}.
diff --git a/src/fabric/test/fabric2_node_types_tests.erl b/src/fabric/test/fabric2_node_types_tests.erl
index 074afe86b..98bfd7e18 100644
--- a/src/fabric/test/fabric2_node_types_tests.erl
+++ b/src/fabric/test/fabric2_node_types_tests.erl
@@ -25,10 +25,10 @@ node_types_test_() ->
os:putenv("COUCHDB_NODE_TYPE_FOO", "false"),
os:putenv("COUCHDB_NODE_TYPE_BAZ", "true"),
os:putenv("COUCHDB_NODE_TYPE_ZIG", ""),
- % erlfdb, rexi and mem3 are all dependent apps for fabric. We make
+ % erlfdb, ctrace are all dependent apps for fabric. We make
% sure to start them so when fabric is started during the test it
% already has its dependencies
- test_util:start_couch([erlfdb, rexi, mem3, ctrace, fabric])
+ test_util:start_couch([erlfdb, ctrace, fabric])
end,
fun(Ctx) ->
test_util:stop_couch(Ctx),
diff --git a/src/fabric/test/fabric2_snapshot_tests.erl b/src/fabric/test/fabric2_snapshot_tests.erl
new file mode 100644
index 000000000..37ba62664
--- /dev/null
+++ b/src/fabric/test/fabric2_snapshot_tests.erl
@@ -0,0 +1,134 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_snapshot_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2.hrl").
+-include("fabric2_test.hrl").
+
+
+fdb_ss_test_() ->
+ {
+ "Test snapshot usage",
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ with([
+ ?TDEF(retry_without_snapshot),
+ ?TDEF(no_retry_with_snapshot)
+ ])
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([fabric]),
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ {Db, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+ ok = fabric2_db:delete(fabric2_db:name(Db), []),
+ test_util:stop_couch(Ctx).
+
+
+retry_without_snapshot({Db, _}) ->
+ DbName = fabric2_db:name(Db),
+ put(retry_count, 0),
+ erase(conflict_pid),
+ InitDbSeq = fabric2_db:get_update_seq(Db),
+ DbSeq = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ put(retry_count, get(retry_count) + 1),
+
+ % Fetch the update_seq
+ Seq = fabric2_db:get_update_seq(TxDb),
+
+ % Generate a no-op write so that we don't hit the
+ % optimization to skip commits on read-only
+ % transactions
+ bump_view_size(TxDb),
+
+ % Generate a conflicting transaction while
+ % we're not yet committed
+ case get(conflict_pid) of
+ undefined ->
+ {Pid, Ref} = spawn_monitor(fun() -> generate_conflict(DbName) end),
+ receive {'DOWN', Ref, _, _, normal} -> ok end,
+ put(conflict_pid, Pid);
+ Pid when is_pid(Pid) ->
+ ok
+ end,
+
+ Seq
+ end),
+
+ ?assertEqual(2, get(retry_count)),
+ ?assertNotEqual(InitDbSeq, DbSeq).
+
+
+no_retry_with_snapshot({Db, _}) ->
+ DbName = fabric2_db:name(Db),
+ put(retry_count, 0),
+ erase(conflict_pid),
+ InitDbSeq = fabric2_db:get_update_seq(Db),
+ DbSeq = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ put(retry_count, get(retry_count) + 1),
+
+ % Fetch the update_seq
+ Seq = fabric2_fdb:with_snapshot(TxDb, fun(SSDb) ->
+ fabric2_db:get_update_seq(SSDb)
+ end),
+
+ % Generate a no-op write so that we don't hit the
+ % optimization to skip commits on read-only
+ % transactions
+ bump_view_size(TxDb),
+
+ % Generate a conflicting transaction while
+ % we're not yet committed
+ case get(conflict_pid) of
+ undefined ->
+ {Pid, Ref} = spawn_monitor(fun() -> generate_conflict(DbName) end),
+ receive {'DOWN', Ref, _, _, normal} -> ok end,
+ put(conflict_pid, Pid);
+ Pid when is_pid(Pid) ->
+ ok
+ end,
+
+ Seq
+ end),
+
+ ?assertEqual(1, get(retry_count)),
+ ?assertEqual(InitDbSeq, DbSeq).
+
+
+bump_view_size(TxDb) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+
+ DbTuple = {?DB_STATS, <<"sizes">>, <<"views">>},
+ DbKey = erlfdb_tuple:pack(DbTuple, DbPrefix),
+ erlfdb:add(Tx, DbKey, 0).
+
+
+generate_conflict(DbName) ->
+ {ok, Db} = fabric2_db:open(DbName, [{user_ctx, ?ADMIN_USER}]),
+ Doc = #doc{
+ id = fabric2_util:uuid(),
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc).
diff --git a/src/fabric/test/fabric2_test_util.erl b/src/fabric/test/fabric2_test_util.erl
index acbe252b1..3d3477c5d 100644
--- a/src/fabric/test/fabric2_test_util.erl
+++ b/src/fabric/test/fabric2_test_util.erl
@@ -21,6 +21,9 @@
]).
+-include_lib("fabric/include/fabric2.hrl").
+
+
-define(PDICT_ERROR_IN_FOLD_RANGE, '$fabric2_error_in_fold_range').
-define(PDICT_ERROR_IN_USER_FUN, '$fabric2_error_throw_in_user_fun').
@@ -68,7 +71,7 @@ maybe_tx_too_old(Key) ->
put(Key, {Skip - 1, Count});
{0, Count} when is_integer(Count), Count > 0 ->
put(Key, {0, Count - 1}),
- error({erlfdb_error, 1007});
+ error({erlfdb_error, ?ERLFDB_TRANSACTION_TOO_OLD});
{0, 0} ->
ok;
undefined ->
diff --git a/src/fabric/test/fabric2_tx_options_tests.erl b/src/fabric/test/fabric2_tx_options_tests.erl
index b93cc3d69..b326464d4 100644
--- a/src/fabric/test/fabric2_tx_options_tests.erl
+++ b/src/fabric/test/fabric2_tx_options_tests.erl
@@ -26,10 +26,10 @@ fdb_tx_options_test_() ->
setup,
fun() ->
meck:new(erlfdb, [passthrough]),
- % erlfdb, rexi and mem3 are all dependent apps for fabric. We make
+ % erlfdb, ctrace are all dependent apps for fabric. We make
% sure to start them so when fabric is started during the test it
% already has its dependencies
- test_util:start_couch([erlfdb, rexi, mem3, ctrace, fabric])
+ test_util:start_couch([erlfdb, ctrace, fabric])
end,
fun(Ctx) ->
meck:unload(),
@@ -43,7 +43,9 @@ fdb_tx_options_test_() ->
end,
with([
?TDEF(options_take_effect, 15),
- ?TDEF(can_configure_options_at_runtime, 15)
+ ?TDEF(can_configure_options_at_runtime, 15),
+ ?TDEF(can_apply_options_to_db_name_transactions),
+ ?TDEF(can_apply_options_to_db_handle_transactions)
])
}.
@@ -61,7 +63,7 @@ options_take_effect(_) ->
DbName = ?tempdb(),
{ok, Db} = fabric2_db:create(DbName, [?ADMIN_CTX]),
- ?assertError({erlfdb_error, ?TRANSACTION_TOO_LARGE},
+ ?assertError({erlfdb_error, ?ERLFDB_TRANSACTION_TOO_LARGE},
add_large_doc(Db, 200000)),
ok = fabric2_db:delete(DbName, [?ADMIN_CTX]).
@@ -79,14 +81,20 @@ can_configure_options_at_runtime(_) ->
DbName = ?tempdb(),
{ok, Db} = fabric2_db:create(DbName, [?ADMIN_CTX]),
- ?assertError({erlfdb_error, ?TRANSACTION_TOO_LARGE},
+ ?assertError({erlfdb_error, ?ERLFDB_TRANSACTION_TOO_LARGE},
add_large_doc(Db, 200000)),
meck:reset(erlfdb),
+ % Wait until after fabric2_server has updated the new fdb handle
+ OldDbHandle = get(?PDICT_DB_KEY),
config:delete("fdb_tx_options", "size_limit", false),
- % Assert that we get a new handle and are setting our default values
- meck:wait(erlfdb, set_option, ['_', timeout, '_'], 4000),
+ test_util:wait(fun() ->
+ case application:get_env(fabric, db) of
+ {ok, OldDbHandle} -> wait;
+ {ok, _} -> ok
+ end
+ end),
erase(?PDICT_DB_KEY),
{ok, Db1} = fabric2_db:open(DbName, [?ADMIN_CTX]),
@@ -95,9 +103,40 @@ can_configure_options_at_runtime(_) ->
ok = fabric2_db:delete(DbName, [?ADMIN_CTX]).
+can_apply_options_to_db_name_transactions(_) ->
+ DbName = ?tempdb(),
+
+ TxFun = fun(TxDb) ->
+ #{tx := Tx} = TxDb,
+ fabric2_fdb:create(TxDb, [?ADMIN_CTX]),
+ erlfdb:wait(erlfdb:get(Tx, <<16#FF, "/primaryDatacenter">>))
+ end,
+ TxOpts = #{read_system_keys => <<>>},
+ ?assertEqual(<<>>, fabric2_fdb:transactional(DbName, TxOpts, TxFun)),
+
+ ok = fabric2_db:delete(DbName, [?ADMIN_CTX]).
+
+
+can_apply_options_to_db_handle_transactions(_) ->
+ DbName = ?tempdb(),
+ {ok, Db} = fabric2_db:create(DbName, [?ADMIN_CTX]),
+
+ TxFun = fun(TxDb) ->
+ fabric2_db:update_doc(TxDb, large_doc(200000))
+ end,
+ TxOpts = #{size_limit => 150000},
+ ?assertError({erlfdb_error, ?ERLFDB_TRANSACTION_TOO_LARGE},
+ fabric2_fdb:transactional(Db, TxOpts, TxFun)),
+
+ ok = fabric2_db:delete(DbName, [?ADMIN_CTX]).
+
+
add_large_doc(Db, Size) ->
- Doc = #doc{
+ fabric2_db:update_doc(Db, large_doc(Size)).
+
+
+large_doc(Size) ->
+ #doc{
id = fabric2_util:uuid(),
body = {[{<<"x">>, crypto:strong_rand_bytes(Size)}]}
- },
- fabric2_db:update_doc(Db, Doc).
+ }.
diff --git a/src/global_changes/.gitignore b/src/global_changes/.gitignore
deleted file mode 100644
index e1b16d52c..000000000
--- a/src/global_changes/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-.eunit/
-ebin/
diff --git a/src/global_changes/LICENSE b/src/global_changes/LICENSE
deleted file mode 100644
index 94ad231b8..000000000
--- a/src/global_changes/LICENSE
+++ /dev/null
@@ -1,203 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/src/global_changes/README.md b/src/global_changes/README.md
deleted file mode 100644
index f22ee2ce9..000000000
--- a/src/global_changes/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-### global\_changes
-
-This app supplies the functionality for the `/_db_updates` endpoint.
-
-When a database is created, deleted, or updated, a corresponding event will be persisted to disk (Note: This was designed without the guarantee that a DB event will be persisted or ever occur in the `_db_updates` feed. It probably will, but it isn't guaranteed). Users can subscribe to a `_changes`-like feed of these database events by querying the `_db_updates` endpoint.
-
-When an admin user queries the `/_db_updates` endpoint, they will see the account name associated with the DB update as well as update
-
-### Captured Metrics
-
-1: `global_changes`, `db_writes`: The number of doc updates caused by global\_changes.
-
-2: `global_changes`, `server_pending_updates`: The number of documents aggregated into the pending write batch.
-
-3: `global_changes`, `listener_pending_updates`: The number of documents aggregated into the pending event batch.
-
-4: `global_changes`, `event_doc_conflict`: The number of rev tree branches in event docs encountered by global\_changes. Should never happen.
-
-5: `global_changes`, `rpcs`: The number of non-fabric RPCs caused by global\_changes.
-
-### Important Configs
-
-1: `global_changes`, `max_event_delay`: (integer, milliseconds) The total timed added before an event is forwarded to the writer.
-
-2: `global_changes`, `max_write_delay`: (integer, milliseconds) The time added before an event is sent to disk.
-
-3: `global_changes`, `update_db`: (true/false) A flag setting whether to update the global\_changes database. If false, changes will be lost and there will be no performance impact of global\_changes on the cluster.
diff --git a/src/global_changes/priv/stats_descriptions.cfg b/src/global_changes/priv/stats_descriptions.cfg
deleted file mode 100644
index beb524895..000000000
--- a/src/global_changes/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-{[global_changes, db_writes], [
- {type, counter},
- {desc, <<"number of db writes performed by global changes">>}
-]}.
-{[global_changes, event_doc_conflict], [
- {type, counter},
- {desc, <<"number of conflicted event docs encountered by global changes">>}
-]}.
-{[global_changes, listener_pending_updates], [
- {type, gauge},
- {desc, <<"number of global changes updates pending writes in global_changes_listener">>}
-]}.
-{[global_changes, rpcs], [
- {type, counter},
- {desc, <<"number of rpc operations performed by global_changes">>}
-]}.
-{[global_changes, server_pending_updates], [
- {type, gauge},
- {desc, <<"number of global changes updates pending writes in global_changes_server">>}
-]}.
diff --git a/src/global_changes/src/global_changes.app.src b/src/global_changes/src/global_changes.app.src
deleted file mode 100644
index a1dc2f38b..000000000
--- a/src/global_changes/src/global_changes.app.src
+++ /dev/null
@@ -1,32 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, global_changes, [
- {description, "_changes-like feeds for multiple DBs"},
- {vsn, git},
- {registered, [global_changes_config_listener, global_changes_server]},
- {applications, [
- kernel,
- stdlib,
- couch_epi,
- config,
- couch_log,
- couch_stats,
- couch,
- mem3,
- fabric
- ]},
- {mod, {global_changes_app, []}},
- {env, [
- {dbname, <<"_global_changes">>}
- ]}
-]}.
diff --git a/src/global_changes/src/global_changes_app.erl b/src/global_changes/src/global_changes_app.erl
deleted file mode 100644
index 03322a27e..000000000
--- a/src/global_changes/src/global_changes_app.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_app).
--behavior(application).
-
-
--export([
- start/2,
- stop/1
-]).
-
-
-start(_StartType, _StartArgs) ->
- global_changes_sup:start_link().
-
-
-stop(_State) ->
- ok.
diff --git a/src/global_changes/src/global_changes_epi.erl b/src/global_changes/src/global_changes_epi.erl
deleted file mode 100644
index 5d8cbf928..000000000
--- a/src/global_changes/src/global_changes_epi.erl
+++ /dev/null
@@ -1,51 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_epi).
-
--behaviour(couch_epi_plugin).
-
--export([
- app/0,
- providers/0,
- services/0,
- data_subscriptions/0,
- data_providers/0,
- processes/0,
- notify/3
-]).
-
-app() ->
- global_changes.
-
-providers() ->
- [
- {chttpd_handlers, global_changes_httpd_handlers}
- ].
-
-
-services() ->
- [
- {global_changes, global_changes_plugin}
- ].
-
-data_subscriptions() ->
- [].
-
-data_providers() ->
- [].
-
-processes() ->
- [].
-
-notify(_Key, _Old, _New) ->
- ok.
diff --git a/src/global_changes/src/global_changes_httpd.erl b/src/global_changes/src/global_changes_httpd.erl
deleted file mode 100644
index e579b09ea..000000000
--- a/src/global_changes/src/global_changes_httpd.erl
+++ /dev/null
@@ -1,285 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_httpd).
-
--export([handle_global_changes_req/1]).
--export([default_transform_change/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
--record(acc, {
- heartbeat_interval,
- last_data_sent_time,
- feed,
- prepend,
- resp,
- etag,
- username,
- limit
-}).
-
-handle_global_changes_req(#httpd{method='GET'}=Req) ->
- Db = global_changes_util:get_dbname(),
- Feed = chttpd:qs_value(Req, "feed", "normal"),
- Options = parse_global_changes_query(Req),
- Heartbeat = case lists:keyfind(heartbeat, 1, Options) of
- {heartbeat, true} -> 60000;
- {heartbeat, Other} -> Other;
- false -> false
- end,
- % Limit is handled in the changes callback, since the limit count needs to
- % only account for changes which happen after the filter.
- Limit = couch_util:get_value(limit, Options),
- %Options1 = lists:keydelete(limit, 1, Options),
- Options1 = Options,
- Owner = allowed_owner(Req),
- Acc = #acc{
- username=Owner,
- feed=Feed,
- resp=Req,
- heartbeat_interval=Heartbeat,
- limit=Limit
- },
- case Feed of
- "normal" ->
- {ok, Info} = fabric:get_db_info(Db),
- Suffix = mem3:shard_suffix(Db),
- Etag = chttpd:make_etag({Info, Suffix}),
- chttpd:etag_respond(Req, Etag, fun() ->
- fabric:changes(Db, fun changes_callback/2, Acc#acc{etag=Etag}, Options1)
- end);
- Feed when Feed =:= "continuous"; Feed =:= "longpoll"; Feed =:= "eventsource" ->
- fabric:changes(Db, fun changes_callback/2, Acc, Options1);
- _ ->
- Msg = <<"Supported `feed` types: normal, continuous, longpoll, eventsource">>,
- throw({bad_request, Msg})
- end;
-handle_global_changes_req(Req) ->
- chttpd:send_method_not_allowed(Req, "GET").
-
-transform_change(Username, Change) ->
- global_changes_plugin:transform_change(Username, Change,
- fun default_transform_change/2).
-
-default_transform_change(Username, {Props}) ->
- {id, Id} = lists:keyfind(id, 1, Props),
- {seq, Seq} = lists:keyfind(seq, 1, Props),
- Info = case binary:split(Id, <<":">>) of
- [Event0, DbName0] ->
- {Event0, DbName0};
- _ ->
- skip
- end,
- case Info of
- % Client is an admin, show them everything.
- {Event, DbName} when Username == admin ->
- {[
- {db_name, DbName},
- {type, Event},
- {seq, Seq}
- ]};
- _ ->
- skip
- end.
-
-changes_callback(waiting_for_updates, Acc) ->
- {ok, Acc};
-
-% This clause is only hit when _db_updates is queried with limit=0. For
-% limit>0, the request is stopped by maybe_finish/1.
-changes_callback({change, _}, #acc{limit=0}=Acc) ->
- {stop, Acc};
-
-% callbacks for continuous feed (newline-delimited JSON Objects)
-changes_callback(start, #acc{feed="continuous"}=Acc) ->
- #acc{resp=Req} = Acc,
- {ok, Resp} = chttpd:start_delayed_json_response(Req, 200),
- {ok, Acc#acc{resp=Resp, last_data_sent_time=os:timestamp()}};
-changes_callback({change, Change0}, #acc{feed="continuous"}=Acc) ->
- #acc{resp=Resp, username=Username} = Acc,
- case transform_change(Username, Change0) of
- skip ->
- {ok, maybe_send_heartbeat(Acc)};
- Change ->
- Line = [?JSON_ENCODE(Change) | "\n"],
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, Line),
- Acc1 = Acc#acc{
- resp=Resp1,
- last_data_sent_time=os:timestamp()
- },
- maybe_finish(Acc1)
- end;
-changes_callback({stop, EndSeq}, #acc{feed="continuous"}=Acc) ->
- % Temporary upgrade clause - Case 24236
- changes_callback({stop, EndSeq, null}, Acc);
-changes_callback({stop, EndSeq, _Pending}, #acc{feed="continuous"}=Acc) ->
- #acc{resp=Resp} = Acc,
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp,
- [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"]),
- chttpd:end_delayed_json_response(Resp1);
-
-% callbacks for eventsource feed (newline-delimited eventsource Objects)
-changes_callback(start, #acc{feed = "eventsource"} = Acc) ->
- #acc{resp = Req} = Acc,
- Headers = [
- {"Content-Type", "text/event-stream"},
- {"Cache-Control", "no-cache"}
- ],
- {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, Headers),
- {ok, Acc#acc{resp = Resp, last_data_sent_time=os:timestamp()}};
-changes_callback({change, {ChangeProp}=Change}, #acc{resp = Resp, feed = "eventsource"} = Acc) ->
- Seq = proplists:get_value(seq, ChangeProp),
- Chunk = [
- "data: ", ?JSON_ENCODE(Change),
- "\n", "id: ", ?JSON_ENCODE(Seq),
- "\n\n"
- ],
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, Chunk),
- maybe_finish(Acc#acc{resp = Resp1});
-changes_callback(timeout, #acc{feed = "eventsource"} = Acc) ->
- #acc{resp = Resp} = Acc,
- Chunk = "event: heartbeat\ndata: \n\n",
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, Chunk),
- {ok, {"eventsource", Resp1}};
-changes_callback({stop, _EndSeq}, #acc{feed = "eventsource"} = Acc) ->
- #acc{resp = Resp} = Acc,
- % {ok, Resp1} = chttpd:send_delayed_chunk(Resp, Buf),
- chttpd:end_delayed_json_response(Resp);
-
-% callbacks for longpoll and normal (single JSON Object)
-changes_callback(start, #acc{feed="normal", etag=Etag}=Acc)
- when Etag =/= undefined ->
- #acc{resp=Req} = Acc,
- FirstChunk = "{\"results\":[\n",
- {ok, Resp} = chttpd:start_delayed_json_response(Req, 200,
- [{"Etag",Etag}], FirstChunk),
- {ok, Acc#acc{resp=Resp, prepend="", last_data_sent_time=os:timestamp()}};
-changes_callback(start, Acc) ->
- #acc{resp=Req} = Acc,
- FirstChunk = "{\"results\":[\n",
- {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [], FirstChunk),
- {ok, Acc#acc{
- resp=Resp,
- prepend="",
- last_data_sent_time=os:timestamp()
- }};
-changes_callback({change, Change0}, Acc) ->
- #acc{resp=Resp, prepend=Prepend, username=Username} = Acc,
- case transform_change(Username, Change0) of
- skip ->
- {ok, maybe_send_heartbeat(Acc)};
- Change ->
- #acc{resp=Resp, prepend=Prepend} = Acc,
- Line = [Prepend, ?JSON_ENCODE(Change)],
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, Line),
- Acc1 = Acc#acc{
- prepend=",\r\n",
- resp=Resp1,
- last_data_sent_time=os:timestamp()
- },
- maybe_finish(Acc1)
- end;
-changes_callback({stop, EndSeq}, Acc) ->
- % Temporary upgrade clause - Case 24236
- changes_callback({stop, EndSeq, null}, Acc);
-changes_callback({stop, EndSeq, _Pending}, Acc) ->
- #acc{resp=Resp} = Acc,
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp,
- ["\n],\n\"last_seq\":", ?JSON_ENCODE(EndSeq), "}\n"]),
- chttpd:end_delayed_json_response(Resp1);
-
-changes_callback(timeout, Acc) ->
- {ok, maybe_send_heartbeat(Acc)};
-
-changes_callback({error, Reason}, #acc{resp=Req=#httpd{}}) ->
- chttpd:send_error(Req, Reason);
-changes_callback({error, Reason}, Acc) ->
- #acc{etag=Etag, feed=Feed, resp=Resp} = Acc,
- case {Feed, Etag} of
- {"normal", Etag} when Etag =/= undefined ->
- chttpd:send_error(Resp, Reason);
- _ ->
- chttpd:send_delayed_error(Resp, Reason)
- end.
-
-
-maybe_finish(Acc) ->
- case Acc#acc.limit of
- 1 ->
- {stop, Acc};
- undefined ->
- {ok, Acc};
- Limit ->
- {ok, Acc#acc{limit=Limit-1}}
- end.
-
-
-maybe_send_heartbeat(#acc{heartbeat_interval=false}=Acc) ->
- Acc;
-maybe_send_heartbeat(Acc) ->
- #acc{last_data_sent_time=LastSentTime, heartbeat_interval=Interval, resp=Resp} = Acc,
- Now = os:timestamp(),
- case timer:now_diff(Now, LastSentTime) div 1000 > Interval of
- true ->
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp, "\n"),
- Acc#acc{last_data_sent_time=Now, resp=Resp1};
- false ->
- Acc
- end.
-
-
-parse_global_changes_query(Req) ->
- lists:foldl(fun({Key, Value}, Args) ->
- case {Key, Value} of
- {"feed", _} ->
- [{feed, Value} | Args];
- {"descending", "true"} ->
- [{dir, rev} | Args];
- {"since", _} ->
- [{since, Value} | Args];
- {"limit", _} ->
- [{limit, to_non_neg_int(Value)} | Args];
- {"heartbeat", "true"} ->
- [{heartbeat, true} | Args];
- {"heartbeat", "false"} ->
- Args;
- {"heartbeat", _} ->
- [{heartbeat, to_non_neg_int(Value)} | Args];
- {"timeout", _} ->
- [{timeout, to_non_neg_int(Value)} | Args];
- _Else -> % unknown key value pair, ignore.
- Args
- end
- end, [], chttpd:qs(Req)).
-
-
-to_non_neg_int(Value) ->
- try list_to_integer(Value) of
- V when V >= 0 ->
- V;
- _ ->
- throw({bad_request, invalid_integer})
- catch error:badarg ->
- throw({bad_request, invalid_integer})
- end.
-
-allowed_owner(Req) ->
- case config:get("global_changes", "allowed_owner", undefined) of
- undefined ->
- chttpd:verify_is_server_admin(Req),
- admin;
- SpecStr ->
- {ok, {M, F, A}} = couch_util:parse_term(SpecStr),
- couch_util:validate_callback_exists(M, F, 2),
- M:F(Req, A)
- end.
diff --git a/src/global_changes/src/global_changes_httpd_handlers.erl b/src/global_changes/src/global_changes_httpd_handlers.erl
deleted file mode 100644
index 94a50abc8..000000000
--- a/src/global_changes/src/global_changes_httpd_handlers.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_httpd_handlers).
-
--export([url_handler/1, db_handler/1, design_handler/1, handler_info/3]).
-
-url_handler(<<"_db_updates">>) -> fun global_changes_httpd:handle_global_changes_req/1;
-url_handler(_) -> no_match.
-
-db_handler(_) -> no_match.
-
-design_handler(_) -> no_match.
-
-handler_info('GET', [<<"_db_updates">>], _) ->
- {'db_updates.read', #{}};
-
-handler_info(_, _, _) ->
- no_match. \ No newline at end of file
diff --git a/src/global_changes/src/global_changes_listener.erl b/src/global_changes/src/global_changes_listener.erl
deleted file mode 100644
index 9adf0e13d..000000000
--- a/src/global_changes/src/global_changes_listener.erl
+++ /dev/null
@@ -1,165 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_listener).
--behavior(couch_event_listener).
-
-
--export([
- start/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_event/3,
- handle_cast/2,
- handle_info/2
-]).
-
--record(state, {
- update_db,
- pending_update_count,
- pending_updates,
- last_update_time,
- max_event_delay,
- dbname
-}).
-
-
--include_lib("mem3/include/mem3.hrl").
-
-
-start() ->
- couch_event_listener:start(?MODULE, nil, [all_dbs]).
-
-
-init(_) ->
- % get configs as strings
- UpdateDb0 = config:get("global_changes", "update_db", "true"),
- MaxEventDelay0 = config:get("global_changes", "max_event_delay", "25"),
-
- % make config strings into other data types
- UpdateDb = case UpdateDb0 of "false" -> false; _ -> true end,
- MaxEventDelay = list_to_integer(MaxEventDelay0),
-
- State = #state{
- update_db=UpdateDb,
- pending_update_count=0,
- pending_updates=sets:new(),
- max_event_delay=MaxEventDelay,
- dbname=global_changes_util:get_dbname()
- },
- {ok, State}.
-
-
-terminate(_Reason, _State) ->
- ok.
-
-
-handle_event(_ShardName, _Event, #state{update_db=false}=State) ->
- {ok, State};
-handle_event(ShardName, Event, State0)
- when Event =:= updated orelse Event =:= deleted
- orelse Event =:= created ->
- #state{dbname=ChangesDbName} = State0,
- State = case mem3:dbname(ShardName) of
- ChangesDbName ->
- State0;
- DbName ->
- #state{pending_update_count=Count} = State0,
- EventBin = erlang:atom_to_binary(Event, latin1),
- Key = <<EventBin/binary, <<":">>/binary, DbName/binary>>,
- Pending = sets:add_element(Key, State0#state.pending_updates),
- couch_stats:update_gauge(
- [global_changes, listener_pending_updates],
- Count + 1
- ),
- State0#state{pending_updates=Pending, pending_update_count=Count+1}
- end,
- maybe_send_updates(State);
-handle_event(_DbName, _Event, State) ->
- maybe_send_updates(State).
-
-
-handle_cast({set_max_event_delay, MaxEventDelay}, State) ->
- maybe_send_updates(State#state{max_event_delay=MaxEventDelay});
-handle_cast({set_update_db, Boolean}, State0) ->
- % If turning update_db off, clear out server state
- State = case {Boolean, State0#state.update_db} of
- {false, true} ->
- State0#state{
- update_db=Boolean,
- pending_updates=sets:new(),
- pending_update_count=0,
- last_update_time=undefined
- };
- _ ->
- State0#state{update_db=Boolean}
- end,
- maybe_send_updates(State);
-handle_cast(_Msg, State) ->
- maybe_send_updates(State).
-
-
-maybe_send_updates(#state{pending_update_count=0}=State) ->
- {ok, State};
-maybe_send_updates(#state{update_db=true}=State) ->
- #state{max_event_delay=MaxEventDelay, last_update_time=LastUpdateTime} = State,
- Now = os:timestamp(),
- case LastUpdateTime of
- undefined ->
- {ok, State#state{last_update_time=Now}, MaxEventDelay};
- _ ->
- Delta = timer:now_diff(Now, LastUpdateTime) div 1000,
- if Delta >= MaxEventDelay ->
- Updates = sets:to_list(State#state.pending_updates),
- try group_updates_by_node(State#state.dbname, Updates) of
- Grouped ->
- dict:map(fun(Node, Docs) ->
- couch_stats:increment_counter([global_changes, rpcs]),
- global_changes_server:update_docs(Node, Docs)
- end, Grouped)
- catch error:database_does_not_exist ->
- ok
- end,
- couch_stats:update_gauge(
- [global_changes, listener_pending_updates],
- 0
- ),
- State1 = State#state{
- pending_updates=sets:new(),
- pending_update_count=0,
- last_update_time=undefined
- },
- {ok, State1};
- true ->
- {ok, State, MaxEventDelay-Delta}
- end
- end;
-maybe_send_updates(State) ->
- {ok, State}.
-
-
-handle_info(_Msg, State) ->
- maybe_send_updates(State).
-
-
-%% restore spec when R14 support is dropped
-%% -spec group_updates_by_node(binary(), [binary()]) -> dict:dict().
-group_updates_by_node(DbName, Updates) ->
- lists:foldl(fun(Key, OuterAcc) ->
- Shards = mem3:shards(DbName, Key),
- lists:foldl(fun(#shard{node=Node}, InnerAcc) ->
- dict:append(Node, Key, InnerAcc)
- end, OuterAcc, Shards)
- end, dict:new(), Updates).
diff --git a/src/global_changes/src/global_changes_plugin.erl b/src/global_changes/src/global_changes_plugin.erl
deleted file mode 100644
index 96bb91eaa..000000000
--- a/src/global_changes/src/global_changes_plugin.erl
+++ /dev/null
@@ -1,40 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_plugin).
-
--export([transform_change/3]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(SERVICE_ID, global_changes).
-
-
-%% ------------------------------------------------------------------
-%% API Function Definitions
-%% ------------------------------------------------------------------
-
-transform_change(Username, Change, Default) ->
- maybe_handle(transform_change, [Username, Change], Default).
-
-%% ------------------------------------------------------------------
-%% Internal Function Definitions
-%% ------------------------------------------------------------------
-
-maybe_handle(Func, Args, Default) ->
- Handle = couch_epi:get_handle(?SERVICE_ID),
- case couch_epi:apply(Handle, ?SERVICE_ID, Func, Args, []) of
- [] ->
- apply(Default, Args);
- [Result] ->
- Result
- end.
diff --git a/src/global_changes/src/global_changes_server.erl b/src/global_changes/src/global_changes_server.erl
deleted file mode 100644
index a116e0668..000000000
--- a/src/global_changes/src/global_changes_server.erl
+++ /dev/null
@@ -1,229 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_server).
--behaviour(gen_server).
--vsn(1).
-
-
--export([
- start_link/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3,
- format_status/2
-]).
-
--export([
- update_docs/2
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--record(state, {
- update_db,
- pending_update_count,
- pending_updates,
- max_write_delay,
- dbname,
- handler_ref
-}).
-
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-init([]) ->
- {ok, Handler} = global_changes_listener:start(),
- % get configs as strings
- UpdateDb0 = config:get("global_changes", "update_db", "true"),
- MaxWriteDelay0 = config:get("global_changes", "max_write_delay", "500"),
-
- % make config strings into other data types
- UpdateDb = case UpdateDb0 of "false" -> false; _ -> true end,
- MaxWriteDelay = list_to_integer(MaxWriteDelay0),
-
- % Start our write triggers
- erlang:send_after(MaxWriteDelay, self(), flush_updates),
-
- State = #state{
- update_db=UpdateDb,
- pending_update_count=0,
- pending_updates=sets:new(),
- max_write_delay=MaxWriteDelay,
- dbname=global_changes_util:get_dbname(),
- handler_ref=erlang:monitor(process, Handler)
- },
- {ok, State}.
-
-
-terminate(_Reason, _Srv) ->
- ok.
-
-
-handle_call(_Msg, _From, State) ->
- {reply, ok, State}.
-
-
-handle_cast(_Msg, #state{update_db=false}=State) ->
- {noreply, State};
-handle_cast({update_docs, DocIds}, State) ->
- Pending = sets:union(sets:from_list(DocIds), State#state.pending_updates),
- PendingCount = sets:size(Pending),
- couch_stats:update_gauge(
- [global_changes, server_pending_updates],
- PendingCount
- ),
- NewState = State#state{
- pending_updates=Pending,
- pending_update_count=PendingCount
- },
- {noreply, NewState};
-
-handle_cast({set_max_write_delay, MaxWriteDelay}, State) ->
- NewState = State#state{max_write_delay=MaxWriteDelay},
- {noreply, NewState};
-handle_cast({set_update_db, Boolean}, State0) ->
- % If turning update_db off, clear out server state
- State = case {Boolean, State0#state.update_db} of
- {false, true} ->
- State0#state{
- update_db=Boolean,
- pending_updates=sets:new(),
- pending_update_count=0
- };
- _ ->
- State0#state{update_db=Boolean}
- end,
- {noreply, State};
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-
-handle_info(flush_updates, #state{pending_update_count=0}=State) ->
- erlang:send_after(State#state.max_write_delay, self(), flush_updates),
- {noreply, State};
-handle_info(flush_updates, #state{update_db=false}=State) ->
- erlang:send_after(State#state.max_write_delay, self(), flush_updates),
- {noreply, State};
-handle_info(flush_updates, State) ->
- erlang:send_after(State#state.max_write_delay, self(), flush_updates),
- flush_updates(State);
-handle_info(start_listener, State) ->
- {ok, Handler} = global_changes_listener:start(),
- NewState = State#state{
- handler_ref=erlang:monitor(process, Handler)
- },
- {noreply, NewState};
-handle_info({'DOWN', Ref, _, _, Reason}, #state{handler_ref=Ref}=State) ->
- couch_log:error("global_changes_listener terminated: ~w", [Reason]),
- erlang:send_after(5000, self(), start_listener),
- {noreply, State};
-handle_info(_, State) ->
- {noreply, State}.
-
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-format_status(_Opt, [_PDict, State]) ->
- Scrubbed = State#state{
- pending_updates=nil
- },
- [{data, [{"State",
- ?record_to_keyval(state, Scrubbed)
- }]}].
-
-flush_updates(State) ->
- DocIds = sets:to_list(State#state.pending_updates),
- try group_ids_by_shard(State#state.dbname, DocIds) of
- GroupedIds ->
- Docs = dict:fold(fun(ShardName, Ids, DocInfoAcc) ->
- {ok, Shard} = couch_db:open(ShardName, [?ADMIN_CTX]),
- try
- GroupedDocs = get_docs_locally(Shard, Ids),
- GroupedDocs ++ DocInfoAcc
- after
- couch_db:close(Shard)
- end
- end, [], GroupedIds),
-
- spawn(fun() ->
- fabric:update_docs(State#state.dbname, Docs, [])
- end),
-
- Count = State#state.pending_update_count,
- couch_stats:increment_counter(
- [global_changes, db_writes],
- Count
- )
- catch error:database_does_not_exist ->
- {noreply, State}
- end,
- couch_stats:update_gauge(
- [global_changes, server_pending_updates],
- 0
- ),
- {noreply, State#state{
- pending_updates=sets:new(),
- pending_update_count=0
- }}.
-
-
-update_docs(Node, Updates) ->
- gen_server:cast({?MODULE, Node}, {update_docs, Updates}).
-
-
-group_ids_by_shard(DbName, DocIds) ->
- LocalNode = node(),
- lists:foldl(fun(DocId, Acc) ->
- Shards = mem3:shards(DbName, DocId),
- lists:foldl(fun
- (#shard{node=Node, name=Name}, Acc1) when Node == LocalNode ->
- dict:append(Name, DocId, Acc1);
- (_, Acc1) ->
- Acc1
- end, Acc, Shards)
- end, dict:new(), DocIds).
-
-
-get_docs_locally(Shard, Ids) ->
- lists:map(fun(Id) ->
- DocInfo = couch_db:get_doc_info(Shard, Id),
- #doc{id=Id, revs=get_rev(DocInfo)}
- end, Ids).
-
-
-get_rev(not_found) ->
- {0, []};
-get_rev({ok, #doc_info{revs=[RevInfo]}}) ->
- {Pos, Rev} = RevInfo#rev_info.rev,
- {Pos, [Rev]};
-get_rev({ok, #doc_info{revs=[RevInfo|_]}}) ->
- % couch_doc:to_doc_info/1 sorts things so that the first
- % #rev_info in the list is the "winning" revision which is
- % the one we'd want to base our edit off of. In theory
- % global_changes should never encounter a conflict by design
- % but we should record if it happens in case our design isn't
- % quite right.
- couch_stats:increment_counter([global_changes, event_doc_conflict]),
- {Pos, Rev} = RevInfo#rev_info.rev,
- {Pos, [Rev]}.
diff --git a/src/global_changes/src/global_changes_sup.erl b/src/global_changes/src/global_changes_sup.erl
deleted file mode 100644
index 59a40f26a..000000000
--- a/src/global_changes/src/global_changes_sup.erl
+++ /dev/null
@@ -1,84 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_sup).
--behavior(supervisor).
-
-
--export([start_link/0]).
-
--export([init/1]).
-
--export([handle_config_change/5]).
--export([handle_config_terminate/3]).
-
--define(LISTENER, global_changes_listener).
--define(SERVER, global_changes_server).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-
-init([]) ->
- {ok, {
- {one_for_one, 5, 10}, couch_epi:register_service(global_changes_epi, [
- {
- config_listener_mon,
- {config_listener_mon, start_link, [?MODULE, nil]},
- permanent,
- 5000,
- worker,
- [config_listener_mon]
- },
- {
- global_changes_server,
- {global_changes_server, start_link, []},
- permanent,
- 5000,
- worker,
- [global_changes_server]
- }
- ])}}.
-
-handle_config_change("global_changes", "max_event_delay", MaxDelayStr, _, _) ->
- try list_to_integer(MaxDelayStr) of
- MaxDelay ->
- gen_server:cast(?LISTENER, {set_max_event_delay, MaxDelay})
- catch error:badarg ->
- ok
- end,
- {ok, nil};
-
-handle_config_change("global_changes", "max_write_delay", MaxDelayStr, _, _) ->
- try list_to_integer(MaxDelayStr) of
- MaxDelay ->
- gen_server:cast(?SERVER, {set_max_write_delay, MaxDelay})
- catch error:badarg ->
- ok
- end,
- {ok, nil};
-
-handle_config_change("global_changes", "update_db", "false", _, _) ->
- gen_server:cast(?LISTENER, {set_update_db, false}),
- gen_server:cast(?SERVER, {set_update_db, false}),
- {ok, nil};
-
-handle_config_change("global_changes", "update_db", _, _, _) ->
- gen_server:cast(?LISTENER, {set_update_db, true}),
- gen_server:cast(?SERVER, {set_update_db, true}),
- {ok, nil};
-
-handle_config_change(_, _, _, _, _) ->
- {ok, nil}.
-
-handle_config_terminate(_Server, _Reason, _State) ->
- ok.
diff --git a/src/global_changes/src/global_changes_util.erl b/src/global_changes/src/global_changes_util.erl
deleted file mode 100644
index 0ca57a35f..000000000
--- a/src/global_changes/src/global_changes_util.erl
+++ /dev/null
@@ -1,27 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_util).
-
-
--export([get_dbname/0]).
-
-
-get_dbname() ->
- case application:get_env(global_changes, dbname) of
- {ok, DbName} when is_binary(DbName) ->
- DbName;
- {ok, DbName} when is_list(DbName) ->
- iolist_to_binary(DbName);
- _ ->
- <<"_global_changes">>
- end.
diff --git a/src/global_changes/test/eunit/global_changes_hooks_tests.erl b/src/global_changes/test/eunit/global_changes_hooks_tests.erl
deleted file mode 100644
index 23fa2c87f..000000000
--- a/src/global_changes/test/eunit/global_changes_hooks_tests.erl
+++ /dev/null
@@ -1,156 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(global_changes_hooks_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--export([allowed_owner/2]).
-
--define(t2l(V), lists:flatten(io_lib:format("~p", [V]))).
-
-start() ->
- Ctx = test_util:start_couch([chttpd, global_changes]),
- DbName = ?tempdb(),
- ok = fabric:create_db(DbName, [?ADMIN_CTX]),
- application:set_env(global_changes, dbname, DbName),
- {Ctx, DbName}.
-
-stop({Ctx, DbName}) ->
- ok = fabric:delete_db(DbName, [?ADMIN_CTX]),
- test_util:stop_couch(Ctx),
- ok.
-
-setup(default) ->
- add_admin("admin", <<"pass">>),
- config:delete("couch_httpd_auth", "authentication_redirect", false),
- config:set("couch_httpd_auth", "require_valid_user", "false", false),
- get_host();
-setup(A) ->
- Host = setup(default),
- ok = config:set("global_changes", "allowed_owner",
- ?t2l({?MODULE, allowed_owner, A}), false),
- Host.
-
-teardown(_) ->
- delete_admin("admin"),
- config:delete("global_changes", "allowed_owner", false),
- ok.
-
-allowed_owner(_Req, "throw") ->
- throw({unauthorized, <<"Exception thrown.">>});
-allowed_owner(_Req, "pass") ->
- "super".
-
-allowed_owner_hook_test_() ->
- {
- "Check allowed_owner hook",
- {
- setup,
- fun start/0, fun stop/1,
- [
- disabled_allowed_owner_integration_point(),
- enabled_allowed_owner_integration_point()
- ]
- }
- }.
-
-disabled_allowed_owner_integration_point() ->
- {
- "disabled allowed_owner integration point",
- {
- foreach,
- fun() -> setup(default) end, fun teardown/1,
- [
- fun should_not_fail_for_admin/1,
- fun should_fail_for_non_admin/1
- ]
- }
- }.
-
-enabled_allowed_owner_integration_point() ->
- {
- "enabled allowed_owner integration point",
- [
- {
- foreach,
- fun() -> setup("throw") end, fun teardown/1,
- [fun should_throw/1]
- },
- {
- foreach,
- fun() -> setup("pass") end, fun teardown/1,
- [fun should_pass/1]
- }
- ]
- }.
-
-should_not_fail_for_admin(Host) ->
- ?_test(begin
- Headers = [{basic_auth, {"admin", "pass"}}],
- {Status, [Error, Reason]} =
- request(Host, Headers, [<<"error">>, <<"reason">>]),
- ?assertEqual(200, Status),
- ?assertEqual(undefined, Error),
- ?assertEqual(undefined, Reason)
- end).
-
-should_fail_for_non_admin(Host) ->
- ?_test(begin
- Headers = [],
- {Status, [Error, Reason]} =
- request(Host, Headers, [<<"error">>, <<"reason">>]),
- ?assertEqual(401, Status),
- ?assertEqual(<<"unauthorized">>, Error),
- ?assertEqual(<<"You are not a server admin.">>, Reason)
- end).
-
-should_pass(Host) ->
- ?_test(begin
- Headers = [{basic_auth, {"admin", "pass"}}],
- {Status, [Error, Reason]} =
- request(Host, Headers, [<<"error">>, <<"reason">>]),
- ?assertEqual(200, Status),
- ?assertEqual(undefined, Error),
- ?assertEqual(undefined, Reason)
- end).
-
-should_throw(Host) ->
- ?_test(begin
- Headers = [{basic_auth, {"admin", "pass"}}],
- {Status, [Error, Reason]} =
- request(Host, Headers, [<<"error">>, <<"reason">>]),
- ?assertEqual(401, Status),
- ?assertEqual(<<"unauthorized">>, Error),
- ?assertEqual(<<"Exception thrown.">>, Reason)
- end).
-
-request(Host, Headers, ToDecode) ->
- Url = Host ++ "/_db_updates",
- {ok, Status, _Headers, BinBody} = test_request:get(Url, Headers),
- {Body} = jiffy:decode(BinBody),
- Values = [couch_util:get_value(Key, Body) || Key <- ToDecode],
- {Status, Values}.
-
-add_admin(User, Pass) ->
- Hashed = couch_passwords:hash_admin_password(Pass),
- config:set("admins", User, ?b2l(Hashed), false).
-
-delete_admin(User) ->
- config:delete("admins", User, false).
-
-get_host() ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
- Host = "http://" ++ Addr ++ ":" ++ Port,
- Host.
diff --git a/src/ioq/.gitignore b/src/ioq/.gitignore
deleted file mode 100644
index 21cf3d388..000000000
--- a/src/ioq/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-.rebar
-ebin/
diff --git a/src/ioq/src/ioq.app.src b/src/ioq/src/ioq.app.src
deleted file mode 100644
index 65ea50d6d..000000000
--- a/src/ioq/src/ioq.app.src
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application,ioq, [
- {description, "I/O prioritizing engine"},
- {vsn, git},
- {registered,[]},
- {applications,[kernel,stdlib,config]},
- {mod,{ioq_app,[]}},
- {env, []},
- {modules,[ioq,ioq_app,ioq_sup]}
-]}.
diff --git a/src/ioq/src/ioq.erl b/src/ioq/src/ioq.erl
deleted file mode 100644
index 99b3ce385..000000000
--- a/src/ioq/src/ioq.erl
+++ /dev/null
@@ -1,189 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ioq).
--behaviour(gen_server).
--behaviour(config_listener).
-
--export([start_link/0, call/3]).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, code_change/3, terminate/2]).
-
-% config_listener api
--export([handle_config_change/5, handle_config_terminate/3]).
-
--define(RELISTEN_DELAY, 5000).
-
--record(state, {
- concurrency,
- ratio,
- interactive=queue:new(),
- background=queue:new(),
- running=[]
-}).
-
--record(request, {
- fd,
- msg,
- priority,
- from,
- ref
-}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-call(Fd, Msg, Metadata) ->
- Priority = io_class(Msg, Metadata),
- case bypass(Priority) of
- true ->
- gen_server:call(Fd, Msg, infinity);
- false ->
- queued_call(Fd, Msg, Priority)
- end.
-
-bypass(Priority) ->
- config:get("ioq.bypass", atom_to_list(Priority)) =:= "true".
-
-io_class({prompt, _}, _) ->
- os_process;
-io_class({data, _}, _) ->
- os_process;
-io_class(_, {interactive, _}) ->
- read;
-io_class(_, {db_update, _}) ->
- write;
-io_class(_, {view_update, _, _}) ->
- view_update;
-io_class(_, {internal_repl, _}) ->
- shard_sync;
-io_class(_, {db_compact, _}) ->
- compaction;
-io_class(_, {view_compact, _, _}) ->
- compaction;
-io_class(_, _) ->
- other.
-
-queued_call(Fd, Msg, Priority) ->
- Request = #request{fd=Fd, msg=Msg, priority=Priority, from=self()},
- try
- gen_server:call(?MODULE, Request, infinity)
- catch
- exit:{noproc,_} ->
- gen_server:call(Fd, Msg, infinity)
- end.
-
-init(_) ->
- ok = config:listen_for_changes(?MODULE, nil),
- State = #state{},
- {ok, read_config(State)}.
-
-read_config(State) ->
- Ratio = list_to_float(config:get("ioq", "ratio", "0.01")),
- Concurrency = list_to_integer(config:get("ioq", "concurrency", "10")),
- State#state{concurrency=Concurrency, ratio=Ratio}.
-
-handle_call(#request{}=Request, From, State) ->
- {noreply, enqueue_request(Request#request{from=From}, State), 0}.
-
-handle_cast(change, State) ->
- {noreply, read_config(State)};
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info({Ref, Reply}, State) ->
- case lists:keytake(Ref, #request.ref, State#state.running) of
- {value, Request, Remaining} ->
- erlang:demonitor(Ref, [flush]),
- gen_server:reply(Request#request.from, Reply),
- {noreply, State#state{running=Remaining}, 0};
- false ->
- {noreply, State, 0}
- end;
-handle_info({'DOWN', Ref, _, _, Reason}, State) ->
- case lists:keytake(Ref, #request.ref, State#state.running) of
- {value, Request, Remaining} ->
- gen_server:reply(Request#request.from, {'EXIT', Reason}),
- {noreply, State#state{running=Remaining}, 0};
- false ->
- {noreply, State, 0}
- end;
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State};
-handle_info(timeout, State) ->
- {noreply, maybe_submit_request(State)}.
-
-handle_config_change("ioq", _, _, _, _) ->
- {ok, gen_server:cast(?MODULE, change)};
-handle_config_change(_, _, _, _, _) ->
- {ok, nil}.
-
-handle_config_terminate(_Server, stop, _State) ->
- ok;
-handle_config_terminate(_Server, _Reason, _State) ->
- erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
-
-code_change(_Vsn, State, _Extra) ->
- {ok, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-enqueue_request(#request{priority=compaction}=Request, #state{}=State) ->
- State#state{background=queue:in(Request, State#state.background)};
-enqueue_request(#request{priority=shard_sync}=Request, #state{}=State) ->
- State#state{background=queue:in(Request, State#state.background)};
-enqueue_request(#request{}=Request, #state{}=State) ->
- State#state{interactive=queue:in(Request, State#state.interactive)}.
-
-maybe_submit_request(#state{concurrency=Concurrency, running=Running}=State)
- when length(Running) < Concurrency ->
- case make_next_request(State) of
- State ->
- State;
- NewState when length(Running) >= Concurrency - 1 ->
- NewState;
- NewState ->
- maybe_submit_request(NewState)
- end;
-maybe_submit_request(State) ->
- State.
-
-make_next_request(#state{}=State) ->
- case {queue:is_empty(State#state.background), queue:is_empty(State#state.interactive)} of
- {true, true} ->
- State;
- {true, false} ->
- choose_next_request(#state.interactive, State);
- {false, true} ->
- choose_next_request(#state.background, State);
- {false, false} ->
- case couch_rand:uniform() < State#state.ratio of
- true ->
- choose_next_request(#state.background, State);
- false ->
- choose_next_request(#state.interactive, State)
- end
- end.
-
-choose_next_request(Index, State) ->
- case queue:out(element(Index, State)) of
- {empty, _} ->
- State;
- {{value, Request}, Q} ->
- submit_request(Request, setelement(Index, State, Q))
- end.
-
-submit_request(#request{}=Request, #state{}=State) ->
- Ref = erlang:monitor(process, Request#request.fd),
- Request#request.fd ! {'$gen_call', {self(), Ref}, Request#request.msg},
- State#state{running = [Request#request{ref=Ref} | State#state.running]}.
diff --git a/src/ioq/src/ioq_app.erl b/src/ioq/src/ioq_app.erl
deleted file mode 100644
index 2e6d75acb..000000000
--- a/src/ioq/src/ioq_app.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ioq_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_StartType, _StartArgs) ->
- ioq_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/ioq/src/ioq_sup.erl b/src/ioq/src/ioq_sup.erl
deleted file mode 100644
index c4d04a9e4..000000000
--- a/src/ioq/src/ioq_sup.erl
+++ /dev/null
@@ -1,24 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ioq_sup).
--behaviour(supervisor).
--export([start_link/0, init/1]).
-
-%% Helper macro for declaring children of supervisor
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init([]) ->
- {ok, { {one_for_one, 5, 10}, [?CHILD(ioq, worker)]}}.
diff --git a/src/jwtf/src/jwtf.erl b/src/jwtf/src/jwtf.erl
index 247f2b508..a0bbf1fc1 100644
--- a/src/jwtf/src/jwtf.erl
+++ b/src/jwtf/src/jwtf.erl
@@ -188,8 +188,7 @@ validate_alg(Props, Checks) ->
end.
-%% Not all these fields have to be present, but if they _are_ present
-%% they must be valid.
+%% Only validate required checks.
validate_payload(Props, Checks) ->
validate_iss(Props, Checks),
validate_iat(Props, Checks),
@@ -202,7 +201,7 @@ validate_iss(Props, Checks) ->
ActualISS = prop(<<"iss">>, Props),
case {ExpectedISS, ActualISS} of
- {undefined, undefined} ->
+ {undefined, _} -> % ignore unrequired check
ok;
{ISS, undefined} when ISS /= undefined ->
throw({bad_request, <<"Missing iss claim">>});
@@ -218,11 +217,11 @@ validate_iat(Props, Checks) ->
IAT = prop(<<"iat">>, Props),
case {Required, IAT} of
- {undefined, undefined} ->
+ {undefined, _} -> % ignore unrequired check
ok;
{true, undefined} ->
throw({bad_request, <<"Missing iat claim">>});
- {_, IAT} when is_integer(IAT) ->
+ {true, IAT} when is_integer(IAT) ->
ok;
{true, _} ->
throw({bad_request, <<"Invalid iat claim">>})
@@ -234,12 +233,12 @@ validate_nbf(Props, Checks) ->
NBF = prop(<<"nbf">>, Props),
case {Required, NBF} of
- {undefined, undefined} ->
+ {undefined, _} -> % ignore unrequired check
ok;
{true, undefined} ->
throw({bad_request, <<"Missing nbf claim">>});
- {_, IAT} ->
- assert_past(<<"nbf">>, IAT)
+ {true, NBF} ->
+ assert_past(<<"nbf">>, NBF)
end.
@@ -248,11 +247,11 @@ validate_exp(Props, Checks) ->
EXP = prop(<<"exp">>, Props),
case {Required, EXP} of
- {undefined, undefined} ->
+ {undefined, _} -> % ignore unrequired check
ok;
{true, undefined} ->
throw({bad_request, <<"Missing exp claim">>});
- {_, EXP} ->
+ {true, EXP} ->
assert_future(<<"exp">>, EXP)
end.
@@ -351,3 +350,20 @@ now_seconds() ->
prop(Prop, Props) ->
proplists:get_value(Prop, Props).
+
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+validate_payload_ignore_unchecked_props_test() ->
+ ?assertEqual(ok, validate_payload(_Props = [], _Checks = [])),
+ BogusProps = [
+ {iss, bogus},
+ {iat, bogus},
+ {nbf, bogus},
+ {exp, bogus}
+ ],
+ ?assertEqual(ok, validate_payload(BogusProps, _Checks = [])),
+ ok.
+
+-endif.
diff --git a/src/jwtf/test/jwtf_keystore_tests.erl b/src/jwtf/test/jwtf_keystore_tests.erl
index acbc002b5..1063a4527 100644
--- a/src/jwtf/test/jwtf_keystore_tests.erl
+++ b/src/jwtf/test/jwtf_keystore_tests.erl
@@ -22,7 +22,7 @@
-define(EC_SECRET, "-----BEGIN PUBLIC KEY-----\\nMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEDsr0lz/Dg3luarb+Kua0Wcj9WrfR23os\\nwHzakglb8GhWRDn+oZT0Bt/26sX8uB4/ij9PEOLHPo+IHBtX4ELFFVr5GTzlqcJe\\nyctaTDd1OOAPXYuc67EWtGZ3pDAzztRs\\n-----END PUBLIC KEY-----\\n").
setup() ->
- test_util:start_applications([config, jwtf]),
+ test_util:start_applications([couch_log, config, jwtf]),
config:set("jwt_keys", "hmac:hmac", ?HMAC_SECRET),
config:set("jwt_keys", "rsa:hmac", ?HMAC_SECRET),
config:set("jwt_keys", "ec:hmac", ?HMAC_SECRET),
@@ -39,7 +39,7 @@ setup() ->
teardown(_) ->
- test_util:stop_applications([config, jwtf]).
+ test_util:stop_applications([couch_log, config, jwtf]).
jwtf_keystore_test_() ->
{
diff --git a/src/ken/README.md b/src/ken/README.md
deleted file mode 100644
index a5a657611..000000000
--- a/src/ken/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-ken
-===
-
-Ken builds views and search indexes. Automatically.
-
-#### Overview
-
-When the couch\_db\_update event is triggered with an `updated` event, ken will spawn indexing jobs for view groups and search indexes (one job per view group shard or search index shard). If a `deleted` event is triggered, all jobs associated with the corresponding database shard will be removed.
-
-#### Testing
-
-Testing for ken expected to be executed from the top level `couchdb` repo as a part of `make check` run. The isolated ken test could be ran as `rebar eunit apps=ken verbose=1` from the `couchdb`'s root directory.
diff --git a/src/ken/rebar.config.script b/src/ken/rebar.config.script
deleted file mode 100644
index 3344206e5..000000000
--- a/src/ken/rebar.config.script
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-HaveDreyfus = element(1, file:list_dir("../dreyfus")) == ok.
-
-HastingsHome = os:getenv("HASTINGS_HOME", "../hastings").
-HaveHastings = element(1, file:list_dir(HastingsHome)) == ok.
-
-CurrOpts = case lists:keyfind(erl_opts, 1, CONFIG) of
- {erl_opts, Opts} -> Opts;
- false -> []
-end,
-
-NewOpts =
- if HaveDreyfus -> [{d, 'HAVE_DREYFUS'}]; true -> [] end ++
- if HaveHastings -> [{d, 'HAVE_HASTINGS'}]; true -> [] end ++
- [{i, "../"}] ++ CurrOpts.
-
-lists:keystore(erl_opts, 1, CONFIG, {erl_opts, NewOpts}).
diff --git a/src/ken/src/ken.app.src.script b/src/ken/src/ken.app.src.script
deleted file mode 100644
index dcf4a23d1..000000000
--- a/src/ken/src/ken.app.src.script
+++ /dev/null
@@ -1,38 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name}.
-HaveHastings = code:lib_dir(hastings) /= {error, bad_name}.
-
-BaseApplications = [
- kernel,
- stdlib,
- couch_log,
- couch_event,
- couch,
- config
-].
-
-Applications =
- if HaveDreyfus -> [dreyfus]; true -> [] end ++
- if HaveHastings -> [hastings]; true -> [] end ++
- BaseApplications.
-
-{application, ken,
- [
- {description, ""},
- {vsn, git},
- {registered, []},
- {applications, Applications},
- {mod, { ken_app, []}},
- {env, []}
- ]}.
diff --git a/src/ken/src/ken.erl b/src/ken/src/ken.erl
deleted file mode 100644
index 87a724ba1..000000000
--- a/src/ken/src/ken.erl
+++ /dev/null
@@ -1,29 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ken).
-
--export([add/1]).
--export([remove/1]).
--export([add_all_shards/1]).
-
-% Add a database shard to be indexed.
-add(DbName) ->
- ken_server:add(DbName).
-
-% Remove all pending jobs for a database shard.
-remove(DbName) ->
- ken_server:remove(DbName).
-
-% Add all shards for a database to be indexed.
-add_all_shards(DbName) ->
- ken_server:add_all_shards(DbName).
diff --git a/src/ken/src/ken_app.erl b/src/ken/src/ken_app.erl
deleted file mode 100644
index 15f235d42..000000000
--- a/src/ken/src/ken_app.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ken_app).
-
--behaviour(application).
-
-%% Application callbacks
--export([start/2, stop/1]).
-
-%% ===================================================================
-%% Application callbacks
-%% ===================================================================
-
-start(_StartType, _StartArgs) ->
- ken_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/ken/src/ken_event_handler.erl b/src/ken/src/ken_event_handler.erl
deleted file mode 100644
index 8f158f425..000000000
--- a/src/ken/src/ken_event_handler.erl
+++ /dev/null
@@ -1,56 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ken_event_handler).
--behaviour(couch_event_listener).
-
--export([
- start_link/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_event/3,
- handle_cast/2,
- handle_info/2
-]).
-
-
-start_link() ->
- couch_event_listener:start_link(?MODULE, nil, [all_dbs]).
-
-%% couch_event_listener callbacks
-
-init(_) ->
- {ok, nil}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_event(DbName, updated, State) ->
- ken:add(DbName),
- {ok, State};
-handle_event(DbName, deleted, State) ->
- ken:remove(DbName),
- {ok, State};
-handle_event(DbName, ddoc_updated, State) ->
- ken:add_all_shards(DbName),
- {ok, State};
-handle_event(_DbName, _Event, State) ->
- {ok, State}.
-
-handle_cast(_Msg, State) ->
- {ok, State}.
-
-handle_info(_Msg, State) ->
- {ok, State}.
diff --git a/src/ken/src/ken_server.erl b/src/ken/src/ken_server.erl
deleted file mode 100644
index 74c8e25ac..000000000
--- a/src/ken/src/ken_server.erl
+++ /dev/null
@@ -1,579 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ken_server).
-
-% gen_server boilerplate
--behaviour(gen_server).
--vsn(1).
--export([init/1, terminate/2]).
--export([
- handle_call/3, handle_cast/2, handle_info/2, code_change/3,format_status/2
-]).
-
-% Public interface
--export([start_link/0]).
--export([add/1]).
--export([remove/1]).
--export([add_all_shards/1]).
--export([set_batch_size/1]).
--export([set_delay/1]).
--export([set_limit/1]).
--export([set_prune_interval/1]).
-
-% exports for spawn
--export([update_db_indexes/2]).
-
--record(job, {
- name, % {DbName, GroupId} for view. {DbName, DDocId, IndexId} for search.
- server, % Pid of either view group or search index
- worker_pid = nil,
- seq = 0,
- lru = erlang:monotonic_time()
-}).
-
--record(state, {
- q = queue:new(),
- dbworker = nil,
- limit = 20,
- delay = 5000,
- batch_size = 1,
- prune_interval = 60000,
- pruned_last
-}).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--ifdef(HAVE_DREYFUS).
--include_lib("dreyfus/include/dreyfus.hrl").
--endif.
-
--ifdef(HAVE_HASTINGS).
--include_lib("hastings/src/hastings.hrl").
--endif.
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-%% @doc Adds a database shard to be indexed
--spec add(binary()) -> ok.
-add(DbName) ->
- gen_server:cast(?MODULE, {add, DbName}).
-
-%% @doc Removes all the pending jobs for a database shard.
--spec remove(binary()) -> ok.
-remove(DbName) ->
- gen_server:cast(?MODULE, {remove, DbName}).
-
-%% @doc Adds all the shards for a database to be indexed.
--spec add_all_shards(binary()) -> ok.
-add_all_shards(DbName) ->
- try
- Shards = mem3:shards(mem3:dbname(DbName)),
- lists:map(fun(Shard) ->
- rexi:cast(Shard#shard.node, {ken_server, add, [Shard#shard.name]})
- end, Shards)
- catch error:database_does_not_exist ->
- ok
- end.
-
-%% @doc Changes the configured value for a batch size.
-%% Returns previous value.
--spec set_batch_size(pos_integer()) -> pos_integer().
-set_batch_size(BS) when is_integer(BS), BS > 0 ->
- gen_server:call(?MODULE, {set_batch_size, BS}).
-
-%% @doc Changes the configured value for a delay between batches.
-%% Returns previous value.
--spec set_delay(non_neg_integer()) -> non_neg_integer().
-set_delay(Delay) when is_integer(Delay), Delay >= 0 ->
- gen_server:call(?MODULE, {set_delay, Delay}).
-
-%% @doc Changes the configured value for a limit.
-%% Returns previous value.
--spec set_limit(pos_integer()) -> pos_integer().
-set_limit(Limit) when is_integer(Limit), Limit > 0 ->
- gen_server:call(?MODULE, {set_limit, Limit}).
-
-%% @doc Changes the configured value for a prune interval.
-%% Returns previous value.
--spec set_prune_interval(pos_integer()) -> pos_integer().
-set_prune_interval(Interval) when is_integer(Interval), Interval > 1000 ->
- gen_server:call(?MODULE, {set_prune_interval, Interval}).
-
-%% gen_server callbacks
-
-init(_) ->
- erlang:send(self(), start_event_handler),
- ets:new(ken_pending, [named_table]),
- ets:new(ken_resubmit, [named_table]),
- ets:new(ken_workers, [named_table, public, {keypos, #job.name}]),
- Limit = list_to_integer(config("limit", "20")),
- {ok, #state{pruned_last = erlang:monotonic_time(), limit = Limit}}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_call({set_batch_size, BS}, _From, #state{batch_size = Old} = State) ->
- {reply, Old, State#state{batch_size = BS}, 0};
-
-handle_call({set_delay, Delay}, _From, #state{delay = Old} = State) ->
- {reply, Old, State#state{delay = Delay}, 0};
-
-handle_call({set_limit, Limit}, _From, #state{limit = Old} = State) ->
- {reply, Old, State#state{limit = Limit}, 0};
-
-handle_call({set_prune_interval, Interval}, _From , State) ->
- Old = State#state.prune_interval,
- {reply, Old, State#state{prune_interval = Interval}, 0};
-
-handle_call(Msg, From, State) ->
- {stop, {unknown_call, Msg, From}, State}.
-
-% Queues a DB to (maybe) have indexing jobs spawned.
-handle_cast({add, DbName}, State) ->
- case ets:insert_new(ken_pending, {DbName}) of
- true ->
- {noreply, State#state{q = queue:in(DbName, State#state.q)}, 0};
- false ->
- {noreply, State, 0}
- end;
-
-handle_cast({remove, DbName}, State) ->
- Q2 = queue:filter(fun(X) -> X =/= DbName end, State#state.q),
- ets:delete(ken_pending, DbName),
- % Delete search index workers
- ets:match_delete(ken_workers, #job{name={DbName,'_','_'}, _='_'}),
- % Delete view index workers
- ets:match_delete(ken_workers, #job{name={DbName,'_'}, _='_'}),
- % TODO kill off active jobs for this DB as well
- {noreply, State#state{q = Q2}, 0};
-
-handle_cast({resubmit, DbName}, State) ->
- ets:delete(ken_resubmit, DbName),
- handle_cast({add, DbName}, State);
-
-% st index job names have 3 elements, 3rd being 'hastings'. See job record definition.
-handle_cast({trigger_update, #job{name={_, _, hastings}, server=GPid, seq=Seq} = Job}, State) ->
- % hastings_index:await will trigger a hastings index update
- {Pid, _} = erlang:spawn_monitor(hastings_index, await,
- [GPid, Seq]),
- Now = erlang:monotonic_time(),
- ets:insert(ken_workers, Job#job{worker_pid = Pid, lru = Now}),
- {noreply, State, 0};
-% search index job names have 3 elements. See job record definition.
-handle_cast({trigger_update, #job{name={_,_,_}, server=GPid, seq=Seq} = Job}, State) ->
- % dreyfus_index:await will trigger a search index update.
- {Pid, _} = erlang:spawn_monitor(dreyfus_index, await,
- [GPid, Seq]),
- Now = erlang:monotonic_time(),
- ets:insert(ken_workers, Job#job{worker_pid = Pid, lru = Now}),
- {noreply, State, 0};
-handle_cast({trigger_update, #job{name={_,_}, server=SrvPid, seq=Seq} = Job}, State) ->
- % couch_index:get_state/2 will trigger a view group index update.
- {Pid, _} = erlang:spawn_monitor(couch_index, get_state, [SrvPid, Seq]),
- Now = erlang:monotonic_time(),
- ets:insert(ken_workers, Job#job{worker_pid = Pid, lru = Now}),
- {noreply, State, 0};
-
-handle_cast(Msg, State) ->
- {stop, {unknown_cast, Msg}, State}.
-
-handle_info({gen_event_EXIT, ken_event_handler, Reason}, State) ->
- couch_log:error("ken_event_handler terminated: ~w", [Reason]),
- erlang:send_after(5000, self(), start_event_handler),
- {ok, State, 0};
-
-handle_info(start_event_handler, State) ->
- case ken_event_handler:start_link() of
- {ok, _Pid} ->
- ok;
- Error ->
- couch_log:error("ken_event_handler init: ~w", [Error]),
- erlang:send_after(5000, self(), start_event_handler)
- end,
- {noreply, State, 0};
-
-handle_info(timeout, #state{prune_interval = I, pruned_last = Last} = State) ->
- Now = erlang:monotonic_time(),
- Interval = erlang:convert_time_unit(
- State#state.delay, millisecond, native),
- case Now - Last > Interval of
- true ->
- NewState = prune_worker_table(State);
- _ ->
- NewState = State
- end,
- {noreply, maybe_start_next_queued_job(NewState), I};
-
-handle_info({'DOWN', _, _, Pid, Reason}, #state{dbworker = {Name,Pid}} = St) ->
- maybe_resubmit(Name, Reason),
- {noreply, St#state{dbworker=nil}, 0};
-
-handle_info({'DOWN', _, _, Pid, Reason}, State) ->
- debrief_worker(Pid, Reason, State),
- {noreply, State, 0};
-
-handle_info(Msg, State) ->
- {stop, {unknown_info, Msg}, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-
-format_status(_Opt, [_PDict, State]) ->
- #state{
- q = Queue
- } = State,
- Scrubbed = State#state{
- q = {queue_length, queue:len(Queue)}
- },
- [{data, [{"State",
- ?record_to_keyval(state, Scrubbed)
- }]}].
-
-%% private functions
-
-maybe_start_next_queued_job(#state{dbworker = {_,_}} = State) ->
- State;
-maybe_start_next_queued_job(#state{q=Q} = State) ->
- IncrementalChannels = list_to_integer(config("incremental_channels", "80")),
- BatchChannels = list_to_integer(config("batch_channels", "20")),
- TotalChannels = IncrementalChannels + BatchChannels,
- case queue:out(Q) of
- {{value, DbName}, Q2} ->
- case skip_job(DbName) of
- true ->
- % job is either being resubmitted or ignored, skip it
- ets:delete(ken_pending, DbName),
- maybe_start_next_queued_job(State#state{q = Q2});
- false ->
- case get_active_count() of A when A < TotalChannels ->
- Args = [DbName, State],
- {Pid, _} = spawn_monitor(?MODULE, update_db_indexes, Args),
- ets:delete(ken_pending, DbName),
- State#state{dbworker = {DbName,Pid}, q = Q2};
- _ ->
- State#state{q = queue:in_r(DbName, Q2)}
- end
- end;
- {empty, Q} ->
- State
- end.
-
-skip_job(DbName) ->
- ets:member(ken_resubmit, DbName) orelse ignore_db(DbName).
-
-ignore_db(DbName) ->
- case config:get("ken.ignore", ?b2l(DbName), false) of
- "true" ->
- true;
- _ ->
- false
- end.
-
-get_active_count() ->
- MatchSpec = [{#job{worker_pid='$1', _='_'}, [{is_pid, '$1'}], [true]}],
- ets:select_count(ken_workers, MatchSpec).
-
-% If any indexing job fails, resubmit requests for all indexes.
-update_db_indexes(Name, State) ->
- {ok, DDocs} = design_docs(Name),
- RandomSorted = lists:sort([{rand:uniform(), D} || D <- DDocs]),
- Resubmit = lists:foldl(fun({_, DDoc}, Acc) ->
- JsonDDoc = couch_doc:from_json_obj(DDoc),
- case update_ddoc_indexes(Name, JsonDDoc, State) of
- ok -> Acc;
- _ -> true
- end
- end, false, RandomSorted),
- if Resubmit -> exit(resubmit); true -> ok end.
-
-design_docs(Name) ->
- try
- case fabric:design_docs(mem3:dbname(Name)) of
- {error, {maintenance_mode, _, _Node}} ->
- {ok, []};
- Else ->
- Else
- end
- catch error:database_does_not_exist ->
- {ok, []}
- end.
-
-% Returns an error if any job creation fails.
-update_ddoc_indexes(Name, #doc{}=Doc, State) ->
- {ok, Db} = case couch_db:open_int(Name, []) of
- {ok, _} = Resp -> Resp;
- Else -> exit(Else)
- end,
- Seq = couch_db:get_update_seq(Db),
- couch_db:close(Db),
- ViewUpdated = case should_update(Doc, <<"views">>) of true ->
- try couch_mrview_util:ddoc_to_mrst(Name, Doc) of
- {ok, MRSt} -> update_ddoc_views(Name, MRSt, Seq, State)
- catch _:_ ->
- ok
- end;
- false ->
- ok
- end,
- SearchUpdated = search_updated(Name, Doc, Seq, State),
- STUpdated = st_updated(Name, Doc, Seq, State),
- case {ViewUpdated, SearchUpdated, STUpdated} of
- {ok, ok, ok} -> ok;
- _ -> resubmit
- end.
-
--ifdef(HAVE_DREYFUS).
-search_updated(Name, Doc, Seq, State) ->
- case should_update(Doc, <<"indexes">>) of true ->
- try dreyfus_index:design_doc_to_indexes(Doc) of
- SIndexes -> update_ddoc_search_indexes(Name, SIndexes, Seq, State)
- catch _:_ ->
- ok
- end;
- false ->
- ok
- end.
--else.
-search_updated(_Name, _Doc, _Seq, _State) ->
- ok.
--endif.
-
--ifdef(HAVE_HASTINGS).
-st_updated(Name, Doc, Seq, State) ->
- case should_update(Doc, <<"st_indexes">>) of true ->
- try
- hastings_index:design_doc_to_indexes(Doc) of
- STIndexes -> update_ddoc_st_indexes(Name, STIndexes, Seq, State)
- catch _:_ ->
- ok
- end;
- false ->
- ok
- end.
--else.
-st_updated(_Name, _Doc, _Seq, _State) ->
- ok.
--endif.
-
-should_update(#doc{body={Props}}, IndexType) ->
- case couch_util:get_value(<<"autoupdate">>, Props) of
- false ->
- false;
- {AUProps} ->
- case couch_util:get_value(IndexType, AUProps) of
- false ->
- false;
- _ ->
- true
- end;
- _ ->
- true
- end.
-
-update_ddoc_views(Name, MRSt, Seq, State) ->
- Language = couch_mrview_index:get(language, MRSt),
- Allowed = lists:member(Language, allowed_languages()),
- Views = couch_mrview_index:get(views, MRSt),
- if Allowed andalso Views =/= [] ->
- {ok, Pid} = couch_index_server:get_index(couch_mrview_index, MRSt),
- GroupName = couch_mrview_index:get(idx_name, MRSt),
- maybe_start_job({Name, GroupName}, Pid, Seq, State);
- true -> ok end.
-
--ifdef(HAVE_DREYFUS).
-update_ddoc_search_indexes(DbName, Indexes, Seq, State) ->
- if Indexes =/= [] ->
- % Spawn a job for each search index in the ddoc
- lists:foldl(fun(#index{name=IName, ddoc_id=DDocName}=Index, Acc) ->
- case dreyfus_index_manager:get_index(DbName, Index) of
- {ok, Pid} ->
- case maybe_start_job({DbName, DDocName, IName}, Pid, Seq, State) of
- resubmit -> resubmit;
- _ -> Acc
- end;
- _ ->
- % If any job fails, retry the db.
- resubmit
- end end, ok, Indexes);
- true -> ok end.
--endif.
-
--ifdef(HAVE_HASTINGS).
-update_ddoc_st_indexes(DbName, Indexes, Seq, State) ->
- if Indexes =/= [] ->
- % The record name in hastings is #h_idx rather than #index as it is for dreyfus
- % Spawn a job for each spatial index in the ddoc
- lists:foldl(fun(#h_idx{ddoc_id=DDocName}=Index, Acc) ->
- case hastings_index_manager:get_index(DbName, Index) of
- {ok, Pid} ->
- case maybe_start_job({DbName, DDocName, hastings}, Pid, Seq, State) of
- resubmit -> resubmit;
- _ -> Acc
- end;
- _ ->
- % If any job fails, retry the db.
- resubmit
- end end, ok, Indexes);
- true -> ok end.
--endif.
-
-should_start_job(#job{name = Name, seq = Seq, server = Pid}, State) ->
- Threshold = list_to_integer(config("max_incremental_updates", "1000")),
- IncrementalChannels = list_to_integer(config("incremental_channels", "80")),
- BatchChannels = list_to_integer(config("batch_channels", "20")),
- TotalChannels = IncrementalChannels + BatchChannels,
- A = get_active_count(),
- #state{delay = Delay, batch_size = BS} = State,
- case ets:lookup(ken_workers, Name) of
- [] ->
- if
- A < BatchChannels ->
- true;
- A < TotalChannels ->
- case Name of
- % st_index name has three elements
- {_, _, hastings} ->
- {ok, CurrentSeq} = hastings_index:await(Pid, 0),
- (Seq - CurrentSeq) < Threshold;
- % View name has two elements.
- {_,_} ->
- % Since seq is 0, couch_index:get_state/2 won't
- % spawn an index update.
- {ok, MRSt} = couch_index:get_state(Pid, 0),
- CurrentSeq = couch_mrview_index:get(update_seq, MRSt),
- (Seq - CurrentSeq) < Threshold;
- % Search name has three elements.
- {_,_,_} ->
- {ok, _IndexPid, CurrentSeq} = dreyfus_index:await(Pid, 0),
- (Seq - CurrentSeq) < Threshold;
- _ -> % Should never happen, but if it does, ignore.
- false
- end;
- true ->
- false
- end;
- [#job{worker_pid = nil, lru = LRU, seq = OldSeq}] ->
- Now = erlang:monotonic_time(),
- DeltaT = erlang:convert_time_unit(Now - LRU, native, millisecond),
- if
- A < BatchChannels, (Seq - OldSeq) >= BS ->
- true;
- A < BatchChannels, DeltaT > Delay ->
- true;
- A < TotalChannels, (Seq - OldSeq) < Threshold, DeltaT > Delay ->
- true;
- true ->
- false
- end;
- _ ->
- false
- end.
-
-maybe_start_job(JobName, IndexPid, Seq, State) ->
- Job = #job{
- name = JobName,
- server = IndexPid,
- seq = Seq
- },
- case should_start_job(Job, State) of
- true ->
- gen_server:cast(?MODULE, {trigger_update, Job});
- false ->
- resubmit
- end.
-
-debrief_worker(Pid, Reason, _State) ->
- case ets:match_object(ken_workers, #job{worker_pid=Pid, _='_'}) of
- [#job{name = Name} = Job] ->
- case Name of
- {DbName,_} ->
- maybe_resubmit(DbName, Reason);
- {DbName,_,_} ->
- maybe_resubmit(DbName, Reason)
- end,
- ets:insert(ken_workers, Job#job{worker_pid = nil});
- [] -> % should never happen, but if it does, ignore
- ok
- end.
-
-maybe_resubmit(_DbName, normal) ->
- ok;
-maybe_resubmit(_DbName, {database_does_not_exist, _}) ->
- ok;
-maybe_resubmit(_DbName, {not_found, no_db_file}) ->
- ok;
-maybe_resubmit(DbName, resubmit) ->
- resubmit(60000, DbName);
-maybe_resubmit(DbName, _) ->
- resubmit(5000, DbName).
-
-resubmit(Delay, DbName) ->
- case ets:insert_new(ken_resubmit, {DbName}) of
- true ->
- erlang:send_after(Delay, ?MODULE, {'$gen_cast', {resubmit, DbName}});
- false ->
- ok
- end.
-
-prune_worker_table(State) ->
- % remove all entries older than specified `delay` in milliseconds
- Delay = erlang:convert_time_unit(State#state.delay, millisecond, native),
- C = erlang:monotonic_time() - Delay,
- %% fun(#job{worker_pid=nil, lru=A) when A < C -> true end
- MatchHead = #job{worker_pid=nil, lru='$1', _='_'},
- Guard = {'<', '$1', C},
- ets:select_delete(ken_workers, [{MatchHead, [Guard], [true]}]),
- State#state{pruned_last = erlang:monotonic_time()}.
-
-allowed_languages() ->
- Config = couch_proc_manager:get_servers_from_env("COUCHDB_QUERY_SERVER_") ++
- couch_proc_manager:get_servers_from_env("COUCHDB_NATIVE_QUERY_SERVER_"),
- Allowed = [list_to_binary(string:to_lower(Lang)) || {Lang, _Cmd} <- Config],
- [<<"query">> | Allowed].
-
-config(Key, Default) ->
- config:get("ken", Key, Default).
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-
-
-prune_old_entries_test() ->
- {
- setup,
- fun() ->
- ets:new(ken_workers, [named_table, public, {keypos, #job.name}])
- end,
- fun(_) ->
- catch ets:delete(ken_workers)
- end,
- ?_test(begin
- lists:foreach(fun(Idx) ->
- ets:insert(ken_workers, #job{name=Idx}),
- timer:sleep(100)
- end, lists:seq(1, 3)),
- prune_worker_table(#state{delay=250}),
- ?assertEqual(
- [2, 3],
- lists:usort(
- [N || #job{name = N} <- ets:tab2list(ken_workers)])
- ),
- ok
- end)
- }.
-
--endif.
diff --git a/src/ken/src/ken_sup.erl b/src/ken/src/ken_sup.erl
deleted file mode 100644
index fd08cfd11..000000000
--- a/src/ken/src/ken_sup.erl
+++ /dev/null
@@ -1,33 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ken_sup).
-
--behaviour(supervisor).
-
-%% API
--export([start_link/0]).
-
-%% Supervisor callbacks
--export([init/1]).
-
-%% Helper macro for declaring children of supervisor
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-%% supervisor callbacks
-
-init([]) ->
- {ok, { {one_for_one, 5, 10}, [?CHILD(ken_server, worker)]} }.
-
diff --git a/src/ken/test/config.ini b/src/ken/test/config.ini
deleted file mode 100644
index a28eae4c0..000000000
--- a/src/ken/test/config.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-[ken]
-limit = 42
diff --git a/src/ken/test/ken_server_test.erl b/src/ken/test/ken_server_test.erl
deleted file mode 100644
index eed348422..000000000
--- a/src/ken/test/ken_server_test.erl
+++ /dev/null
@@ -1,97 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ken_server_test).
-
--include_lib("eunit/include/eunit.hrl").
-
-%% hardcoded defaults: limit: 20; batch: 1; delay: 5000; prune: 60000
-default_test_() ->
- {inorder, {setup,
- fun setup_default/0,
- fun teardown/1,
- [
- set_builder("returns default", set_limit, 12, 20),
- set_builder("keeps set", set_limit, 6, 12),
- set_builder("returns default", set_batch_size, 3, 1),
- set_builder("keeps set", set_batch_size, 6, 3),
- set_builder("returns default", set_delay, 7000, 5000),
- set_builder("keeps set", set_delay, 10000, 7000),
- set_builder("returns default", set_prune_interval, 70000, 60000),
- set_builder("keeps set", set_prune_interval, 80000, 70000)
- ]
- }}.
-
-exception_test_() ->
- {inorder, {foreach,
- fun setup_default/0,
- fun teardown/1,
- [
- exception_builder("exception on zero", set_limit, 0),
- exception_builder("exception on negative", set_limit, -12),
- exception_builder("exception on zero", set_batch_size, 0),
- exception_builder("exception on negative", set_batch_size, -12),
- set_builder("no exception on zero", set_delay, 0, 5000),
- exception_builder("exception on negative", set_delay, -12),
- exception_builder("exception on zero", set_prune_interval, 0),
- exception_builder("exception on negative", set_prune_interval, -12)
- ]
- }}.
-
-config_test_() ->
- {inorder, {setup,
- fun setup_config/0,
- fun teardown/1,
- [
- set_builder("reads config", set_limit, 24, 42),
- set_builder("keeps set", set_limit, 6, 24)
- ]
- }}.
-
-setup_default() ->
- {ok, EventPid} = start_server(couch_event_server),
- {ok, CfgPid} = start_server(config),
- {ok, KenPid} = start_server(ken_server),
- [{ken_pid, KenPid}, {cfg_pid, CfgPid}, {event_pid, EventPid}].
-
-setup_config() ->
- {ok, Pwd} = file:get_cwd(),
- Config = filename:join([Pwd, "..", "test", "config.ini"]),
- {ok, EventPid} = start_server(couch_event_server),
- {ok, CfgPid} = start_server(config, [[Config]]),
- {ok, KenPid} = start_server(ken_server),
- [{ken_pid, KenPid}, {cfg_pid, CfgPid}, {event_pid, EventPid}].
-
-teardown(Cfg) ->
- ok = stop_server(event_pid, Cfg),
- ok = stop_server(cfg_pid, Cfg),
- ok = stop_server(ken_pid, Cfg).
-
-exception_builder(Desc, F, Val) ->
- D = atom_to_list(F) ++ " " ++ Desc,
- {D, ?_assertException(error, function_clause, ken_server:F(Val))}.
-
-set_builder(Desc, F, In, Out) ->
- D = atom_to_list(F) ++ " " ++ Desc,
- {D, ?_assertEqual(Out, ken_server:F(In))}.
-
-start_server(Module) ->
- start_server(Module, []).
-
-start_server(Module, Config) ->
- gen_server:start({local, Module}, Module, Config, []).
-
-stop_server(Key, Cfg) ->
- {Key, Pid} = lists:keyfind(Key, 1, Cfg),
- MRef = erlang:monitor(process, Pid),
- true = exit(Pid, kill),
- receive {'DOWN', MRef, _, _, _} -> ok end.
diff --git a/src/mango/src/mango_cursor.erl b/src/mango/src/mango_cursor.erl
index 63b449cdc..ed35817e3 100644
--- a/src/mango/src/mango_cursor.erl
+++ b/src/mango/src/mango_cursor.erl
@@ -30,18 +30,10 @@
-include("mango_idx.hrl").
--ifdef(HAVE_DREYFUS).
-define(CURSOR_MODULES, [
mango_cursor_view,
- mango_cursor_text,
mango_cursor_special
]).
--else.
--define(CURSOR_MODULES, [
- mango_cursor_view,
- mango_cursor_special
-]).
--endif.
-define(SUPERVISOR, mango_cursor_sup).
diff --git a/src/mango/src/mango_cursor_special.erl b/src/mango/src/mango_cursor_special.erl
index df1f6d655..33a1f8c46 100644
--- a/src/mango/src/mango_cursor_special.erl
+++ b/src/mango/src/mango_cursor_special.erl
@@ -24,7 +24,7 @@
-include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
-include("mango_cursor.hrl").
diff --git a/src/mango/src/mango_cursor_text.erl b/src/mango/src/mango_cursor_text.erl
deleted file mode 100644
index 65811046e..000000000
--- a/src/mango/src/mango_cursor_text.erl
+++ /dev/null
@@ -1,334 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_cursor_text).
-
--ifdef(HAVE_DREYFUS).
-
--export([
- create/4,
- explain/1,
- execute/3
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("dreyfus/include/dreyfus.hrl").
--include("mango_cursor.hrl").
--include("mango.hrl").
-
-
--record(cacc, {
- selector,
- dbname,
- ddocid,
- idx_name,
- query_args,
- bookmark,
- limit,
- skip,
- user_fun,
- user_acc,
- fields,
- execution_stats
-}).
-
-
-create(Db, Indexes, Selector, Opts) ->
- Index = case Indexes of
- [Index0] ->
- Index0;
- _ ->
- ?MANGO_ERROR(multiple_text_indexes)
- end,
-
- Bookmark = unpack_bookmark(couch_db:name(Db), Opts),
-
- DreyfusLimit = get_dreyfus_limit(),
- Limit = erlang:min(DreyfusLimit, couch_util:get_value(limit, Opts, mango_opts:default_limit())),
- Skip = couch_util:get_value(skip, Opts, 0),
- Fields = couch_util:get_value(fields, Opts, all_fields),
-
- {ok, #cursor{
- db = Db,
- index = Index,
- ranges = null,
- selector = Selector,
- opts = Opts,
- limit = Limit,
- skip = Skip,
- fields = Fields,
- bookmark = Bookmark
- }}.
-
-
-explain(Cursor) ->
- #cursor{
- selector = Selector,
- opts = Opts
- } = Cursor,
- [
- {'query', mango_selector_text:convert(Selector)},
- {sort, sort_query(Opts, Selector)}
- ].
-
-
-execute(Cursor, UserFun, UserAcc) ->
- #cursor{
- db = Db,
- index = Idx,
- limit = Limit,
- skip = Skip,
- selector = Selector,
- opts = Opts,
- execution_stats = Stats,
- bookmark = Bookmark
- } = Cursor,
- Query = mango_selector_text:convert(Selector),
- QueryArgs = #index_query_args{
- q = Query,
- sort = sort_query(Opts, Selector),
- raw_bookmark = true
- },
- CAcc = #cacc{
- selector = Selector,
- dbname = couch_db:name(Db),
- ddocid = ddocid(Idx),
- idx_name = mango_idx:name(Idx),
- bookmark = Bookmark,
- limit = Limit,
- skip = Skip,
- query_args = QueryArgs,
- user_fun = UserFun,
- user_acc = UserAcc,
- fields = Cursor#cursor.fields,
- execution_stats = mango_execution_stats:log_start(Stats)
- },
- try
- case Query of
- <<>> ->
- throw({stop, CAcc});
- _ ->
- execute(CAcc)
- end
- catch
- throw:{stop, FinalCAcc} ->
- #cacc{
- bookmark = FinalBM,
- user_fun = UserFun,
- user_acc = LastUserAcc,
- execution_stats = Stats0
- } = FinalCAcc,
- JsonBM = dreyfus_bookmark:pack(FinalBM),
- Arg = {add_key, bookmark, JsonBM},
- {_Go, FinalUserAcc} = UserFun(Arg, LastUserAcc),
- FinalUserAcc0 = mango_execution_stats:maybe_add_stats(Opts, UserFun, Stats0, FinalUserAcc),
- FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Cursor, Stats0, FinalUserAcc0),
- {ok, FinalUserAcc1}
- end.
-
-
-execute(CAcc) ->
- case search_docs(CAcc) of
- {ok, Bookmark, []} ->
- % If we don't have any results from the
- % query it means the request has paged through
- % all possible results and the request is over.
- NewCAcc = CAcc#cacc{bookmark = Bookmark},
- throw({stop, NewCAcc});
- {ok, Bookmark, Hits} ->
- NewCAcc = CAcc#cacc{bookmark = Bookmark},
- HitDocs = get_json_docs(CAcc#cacc.dbname, Hits),
- {ok, FinalCAcc} = handle_hits(NewCAcc, HitDocs),
- execute(FinalCAcc)
- end.
-
-
-search_docs(CAcc) ->
- #cacc{
- dbname = DbName,
- ddocid = DDocId,
- idx_name = IdxName
- } = CAcc,
- QueryArgs = update_query_args(CAcc),
- case dreyfus_fabric_search:go(DbName, DDocId, IdxName, QueryArgs) of
- {ok, Bookmark, _, Hits, _, _} ->
- {ok, Bookmark, Hits};
- {error, Reason} ->
- ?MANGO_ERROR({text_search_error, {error, Reason}})
- end.
-
-
-handle_hits(CAcc, []) ->
- {ok, CAcc};
-
-handle_hits(CAcc0, [{Sort, Doc} | Rest]) ->
- CAcc1 = handle_hit(CAcc0, Sort, Doc),
- handle_hits(CAcc1, Rest).
-
-
-handle_hit(CAcc0, Sort, not_found) ->
- CAcc1 = update_bookmark(CAcc0, Sort),
- CAcc1;
-
-handle_hit(CAcc0, Sort, Doc) ->
- #cacc{
- limit = Limit,
- skip = Skip,
- execution_stats = Stats
- } = CAcc0,
- CAcc1 = update_bookmark(CAcc0, Sort),
- Stats1 = mango_execution_stats:incr_docs_examined(Stats),
- couch_stats:increment_counter([mango, docs_examined]),
- CAcc2 = CAcc1#cacc{execution_stats = Stats1},
- case mango_selector:match(CAcc2#cacc.selector, Doc) of
- true when Skip > 0 ->
- CAcc2#cacc{skip = Skip - 1};
- true when Limit == 0 ->
- % We hit this case if the user spcified with a
- % zero limit. Notice that in this case we need
- % to return the bookmark from before this match
- throw({stop, CAcc0});
- true when Limit == 1 ->
- NewCAcc = apply_user_fun(CAcc2, Doc),
- throw({stop, NewCAcc});
- true when Limit > 1 ->
- NewCAcc = apply_user_fun(CAcc2, Doc),
- NewCAcc#cacc{limit = Limit - 1};
- false ->
- CAcc2
- end.
-
-
-apply_user_fun(CAcc, Doc) ->
- FinalDoc = mango_fields:extract(Doc, CAcc#cacc.fields),
- #cacc{
- user_fun = UserFun,
- user_acc = UserAcc,
- execution_stats = Stats
- } = CAcc,
- Stats0 = mango_execution_stats:incr_results_returned(Stats),
- case UserFun({row, FinalDoc}, UserAcc) of
- {ok, NewUserAcc} ->
- CAcc#cacc{user_acc = NewUserAcc, execution_stats = Stats0};
- {stop, NewUserAcc} ->
- throw({stop, CAcc#cacc{user_acc = NewUserAcc, execution_stats = Stats0}})
- end.
-
-
-%% Convert Query to Dreyfus sort specifications
-%% Covert <<"Field">>, <<"desc">> to <<"-Field">>
-%% and append to the dreyfus query
-sort_query(Opts, Selector) ->
- {sort, {Sort}} = lists:keyfind(sort, 1, Opts),
- SortList = lists:map(fun(SortField) ->
- {Dir, RawSortField} = case SortField of
- {Field, <<"asc">>} -> {asc, Field};
- {Field, <<"desc">>} -> {desc, Field};
- Field when is_binary(Field) -> {asc, Field}
- end,
- SField = mango_selector_text:append_sort_type(RawSortField, Selector),
- case Dir of
- asc ->
- SField;
- desc ->
- <<"-", SField/binary>>
- end
- end, Sort),
- case SortList of
- [] -> relevance;
- _ -> SortList
- end.
-
-
-get_bookmark(Opts) ->
- case lists:keyfind(bookmark, 1, Opts) of
- {_, BM} when is_list(BM), BM /= [] ->
- BM;
- _ ->
- nil
- end.
-
-
-update_bookmark(CAcc, Sortable) ->
- BM = CAcc#cacc.bookmark,
- QueryArgs = CAcc#cacc.query_args,
- Sort = QueryArgs#index_query_args.sort,
- NewBM = dreyfus_bookmark:update(Sort, BM, [Sortable]),
- CAcc#cacc{bookmark = NewBM}.
-
-
-pack_bookmark(Bookmark) ->
- case dreyfus_bookmark:pack(Bookmark) of
- null -> nil;
- Enc -> Enc
- end.
-
-
-unpack_bookmark(DbName, Opts) ->
- case lists:keyfind(bookmark, 1, Opts) of
- {_, nil} ->
- [];
- {_, Bin} ->
- try
- dreyfus_bookmark:unpack(DbName, Bin)
- catch _:_ ->
- ?MANGO_ERROR({invalid_bookmark, Bin})
- end
- end.
-
-
-ddocid(Idx) ->
- case mango_idx:ddoc(Idx) of
- <<"_design/", Rest/binary>> ->
- Rest;
- Else ->
- Else
- end.
-
-
-update_query_args(CAcc) ->
- #cacc{
- bookmark = Bookmark,
- query_args = QueryArgs
- } = CAcc,
- QueryArgs#index_query_args{
- bookmark = pack_bookmark(Bookmark),
- limit = get_limit(CAcc)
- }.
-
-
-get_limit(CAcc) ->
- erlang:min(get_dreyfus_limit(), CAcc#cacc.limit + CAcc#cacc.skip).
-
-
-get_dreyfus_limit() ->
- config:get_integer("dreyfus", "max_limit", 200).
-
-
-get_json_docs(DbName, Hits) ->
- Ids = lists:map(fun(#sortable{item = Item}) ->
- couch_util:get_value(<<"_id">>, Item#hit.fields)
- end, Hits),
- % TODO: respect R query parameter (same as json indexes)
- {ok, IdDocs} = dreyfus_fabric:get_json_docs(DbName, Ids),
- lists:map(fun(#sortable{item = Item} = Sort) ->
- Id = couch_util:get_value(<<"_id">>, Item#hit.fields),
- case lists:keyfind(Id, 1, IdDocs) of
- {Id, {doc, Doc}} ->
- {Sort, Doc};
- false ->
- {Sort, not_found}
- end
- end, Hits).
-
--endif.
diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl
index 411f4af65..4a22e15fa 100644
--- a/src/mango/src/mango_cursor_view.erl
+++ b/src/mango/src/mango_cursor_view.erl
@@ -27,11 +27,10 @@
-include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include_lib("fabric/include/fabric.hrl").
-
+-include_lib("couch_views/include/couch_views.hrl").
-include("mango_cursor.hrl").
-include("mango_idx_view.hrl").
+-include_lib("kernel/include/logger.hrl").
create(Db, Indexes, Selector, Opts) ->
@@ -137,8 +136,7 @@ execute(#cursor{db = Db, index = Idx, execution_stats = Stats} = Cursor0, UserFu
Result = case mango_idx:def(Idx) of
all_docs ->
CB = fun ?MODULE:handle_all_docs_message/2,
- AllDocOpts = fabric2_util:all_docs_view_opts(Args)
- ++ [{restart_tx, true}],
+ AllDocOpts = fabric2_util:all_docs_view_opts(Args),
fabric2_db:fold_docs(Db, CB, Cursor, AllDocOpts);
_ ->
CB = fun ?MODULE:handle_message/2,
@@ -244,6 +242,7 @@ handle_message({row, Props}, Cursor) ->
},
{ok, Cursor1};
Error ->
+ ?LOG_ERROR(#{what => load_doc_failure, details => Error}),
couch_log:error("~s :: Error loading doc: ~p", [?MODULE, Error]),
{ok, Cursor}
end;
diff --git a/src/mango/src/mango_eval.erl b/src/mango/src/mango_eval.erl
index 59d784b49..7fd81df77 100644
--- a/src/mango/src/mango_eval.erl
+++ b/src/mango/src/mango_eval.erl
@@ -18,7 +18,10 @@
-export([
acquire_map_context/1,
release_map_context/1,
- map_docs/2
+ map_docs/2,
+ acquire_context/0,
+ release_context/1,
+ try_compile/4
]).
@@ -60,6 +63,18 @@ map_docs(Indexes, Docs) ->
end, Docs)}.
+acquire_context() ->
+ {ok, no_ctx}.
+
+
+release_context(_) ->
+ ok.
+
+
+try_compile(_Ctx, _FunType, _IndexName, IndexInfo) ->
+ mango_idx_view:validate_index_def(IndexInfo).
+
+
index_doc(Indexes, Doc) ->
lists:map(fun(Idx) ->
{IdxDef} = mango_idx:def(Idx),
diff --git a/src/mango/src/mango_httpd.erl b/src/mango/src/mango_httpd.erl
index 8d5a2123d..0d035dd99 100644
--- a/src/mango/src/mango_httpd.erl
+++ b/src/mango/src/mango_httpd.erl
@@ -36,10 +36,9 @@ handle_req(#httpd{} = Req, Db) ->
try
handle_req_int(Req, Db)
catch
- throw:{mango_error, Module, Reason} ->
+ throw:{mango_error, Module, Reason}:Stack ->
case mango_error:info(Module, Reason) of
{500, ErrorStr, ReasonStr} ->
- Stack = erlang:get_stacktrace(),
chttpd:send_error(Req, {ErrorStr, ReasonStr, Stack});
{Code, ErrorStr, ReasonStr} ->
chttpd:send_error(Req, Code, ErrorStr, ReasonStr)
diff --git a/src/mango/src/mango_idx.erl b/src/mango/src/mango_idx.erl
index 37b6e03eb..e27f327edf 100644
--- a/src/mango/src/mango_idx.erl
+++ b/src/mango/src/mango_idx.erl
@@ -60,7 +60,7 @@ list(Db) ->
case proplists:get_value(<<"language">>, Props) == <<"query">> of
true ->
- {ok, Mrst} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
+ {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
IsInteractive = couch_views_ddoc:is_interactive(DDoc),
BuildState = couch_views_fdb:get_build_status(Db, Mrst),
@@ -263,12 +263,7 @@ cursor_mod(#idx{type = <<"json">>}) ->
cursor_mod(#idx{def = all_docs, type= <<"special">>}) ->
mango_cursor_special;
cursor_mod(#idx{type = <<"text">>}) ->
- case clouseau_rpc:connected() of
- true ->
- mango_cursor_text;
- false ->
- ?MANGO_ERROR({index_service_unavailable, <<"text">>})
- end.
+ ?MANGO_ERROR({index_service_unavailable, <<"text">>}).
idx_mod(#idx{type = <<"json">>}) ->
@@ -276,12 +271,7 @@ idx_mod(#idx{type = <<"json">>}) ->
idx_mod(#idx{type = <<"special">>}) ->
mango_idx_special;
idx_mod(#idx{type = <<"text">>}) ->
- case clouseau_rpc:connected() of
- true ->
- mango_idx_text;
- false ->
- ?MANGO_ERROR({index_service_unavailable, <<"text">>})
- end.
+ ?MANGO_ERROR({index_service_unavailable, <<"text">>}).
db_to_name(Name) when is_binary(Name) ->
@@ -318,8 +308,7 @@ get_idx_type(Opts) ->
is_text_service_available() ->
- erlang:function_exported(clouseau_rpc, connected, 0) andalso
- clouseau_rpc:connected().
+ false.
get_idx_ddoc(Idx, Opts) ->
diff --git a/src/mango/src/mango_idx_text.erl b/src/mango/src/mango_idx_text.erl
deleted file mode 100644
index 71eaf110a..000000000
--- a/src/mango/src/mango_idx_text.erl
+++ /dev/null
@@ -1,459 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_idx_text).
-
-
--export([
- validate_new/2,
- validate_fields/1,
- validate_index_def/1,
- add/2,
- remove/2,
- from_ddoc/1,
- to_json/1,
- columns/1,
- is_usable/3,
- get_default_field_options/1
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include("mango.hrl").
--include("mango_idx.hrl").
-
-
-validate_new(#idx{}=Idx, Db) ->
- {ok, Def} = do_validate(Idx#idx.def),
- maybe_reject_index_all_req(Def, Db),
- {ok, Idx#idx{def=Def}}.
-
-
-validate_index_def(IndexInfo) ->
- do_validate(IndexInfo).
-
-
-add(#doc{body={Props0}}=DDoc, Idx) ->
- Texts1 = case proplists:get_value(<<"indexes">>, Props0) of
- {Texts0} -> Texts0;
- _ -> []
- end,
- NewText = make_text(Idx),
- Texts2 = lists:keystore(element(1, NewText), 1, Texts1, NewText),
- Props1 = lists:keystore(<<"indexes">>, 1, Props0, {<<"indexes">>,
- {Texts2}}),
- {ok, DDoc#doc{body={Props1}}}.
-
-
-remove(#doc{body={Props0}}=DDoc, Idx) ->
- Texts1 = case proplists:get_value(<<"indexes">>, Props0) of
- {Texts0} ->
- Texts0;
- _ ->
- ?MANGO_ERROR({index_not_found, Idx#idx.name})
- end,
- Texts2 = lists:keydelete(Idx#idx.name, 1, Texts1),
- if Texts2 /= Texts1 -> ok; true ->
- ?MANGO_ERROR({index_not_found, Idx#idx.name})
- end,
- Props1 = case Texts2 of
- [] ->
- lists:keydelete(<<"indexes">>, 1, Props0);
- _ ->
- lists:keystore(<<"indexes">>, 1, Props0, {<<"indexes">>, {Texts2}})
- end,
- {ok, DDoc#doc{body={Props1}}}.
-
-
-from_ddoc({Props}) ->
- case lists:keyfind(<<"indexes">>, 1, Props) of
- {<<"indexes">>, {Texts}} when is_list(Texts) ->
- lists:flatmap(fun({Name, {VProps}}) ->
- case validate_ddoc(VProps) of
- invalid_ddoc ->
- [];
- Def ->
- I = #idx{
- type = <<"text">>,
- name = Name,
- def = Def
- },
- [I]
- end
- end, Texts);
- _ ->
- []
- end.
-
-
-to_json(Idx) ->
- {[
- {ddoc, Idx#idx.ddoc},
- {name, Idx#idx.name},
- {type, Idx#idx.type},
- {def, {def_to_json(Idx#idx.def)}}
- ]}.
-
-
-columns(Idx) ->
- {Props} = Idx#idx.def,
- {<<"fields">>, Fields} = lists:keyfind(<<"fields">>, 1, Props),
- case Fields of
- <<"all_fields">> ->
- all_fields;
- _ ->
- {DFProps} = couch_util:get_value(<<"default_field">>, Props, {[]}),
- Enabled = couch_util:get_value(<<"enabled">>, DFProps, true),
- Default = case Enabled of
- true -> [<<"$default">>];
- false -> []
- end,
- Default ++ lists:map(fun({FProps}) ->
- {_, Name} = lists:keyfind(<<"name">>, 1, FProps),
- {_, Type} = lists:keyfind(<<"type">>, 1, FProps),
- iolist_to_binary([Name, ":", Type])
- end, Fields)
- end.
-
-
-is_usable(_, Selector, _) when Selector =:= {[]} ->
- false;
-is_usable(Idx, Selector, _) ->
- case columns(Idx) of
- all_fields ->
- true;
- Cols ->
- Fields = indexable_fields(Selector),
- sets:is_subset(sets:from_list(Fields), sets:from_list(Cols))
- end.
-
-
-do_validate({Props}) ->
- {ok, Opts} = mango_opts:validate(Props, opts()),
- {ok, {Opts}};
-do_validate(Else) ->
- ?MANGO_ERROR({invalid_index_text, Else}).
-
-
-def_to_json({Props}) ->
- def_to_json(Props);
-def_to_json([]) ->
- [];
-def_to_json([{<<"fields">>, <<"all_fields">>} | Rest]) ->
- [{<<"fields">>, []} | def_to_json(Rest)];
-def_to_json([{fields, Fields} | Rest]) ->
- [{<<"fields">>, fields_to_json(Fields)} | def_to_json(Rest)];
-def_to_json([{<<"fields">>, Fields} | Rest]) ->
- [{<<"fields">>, fields_to_json(Fields)} | def_to_json(Rest)];
-% Don't include partial_filter_selector in the json conversion
-% if its the default value
-def_to_json([{<<"partial_filter_selector">>, {[]}} | Rest]) ->
- def_to_json(Rest);
-def_to_json([{Key, Value} | Rest]) ->
- [{Key, Value} | def_to_json(Rest)].
-
-
-fields_to_json([]) ->
- [];
-fields_to_json([{[{<<"name">>, Name}, {<<"type">>, Type0}]} | Rest]) ->
- ok = validate_field_name(Name),
- Type = validate_field_type(Type0),
- [{[{Name, Type}]} | fields_to_json(Rest)];
-fields_to_json([{[{<<"type">>, Type0}, {<<"name">>, Name}]} | Rest]) ->
- ok = validate_field_name(Name),
- Type = validate_field_type(Type0),
- [{[{Name, Type}]} | fields_to_json(Rest)].
-
-
-%% In the future, we can possibly add more restrictive validation.
-%% For now, let's make sure the field name is not blank.
-validate_field_name(<<"">>) ->
- throw(invalid_field_name);
-validate_field_name(Else) when is_binary(Else)->
- ok;
-validate_field_name(_) ->
- throw(invalid_field_name).
-
-
-validate_field_type(<<"string">>) ->
- <<"string">>;
-validate_field_type(<<"number">>) ->
- <<"number">>;
-validate_field_type(<<"boolean">>) ->
- <<"boolean">>.
-
-
-validate_fields(<<"all_fields">>) ->
- {ok, all_fields};
-validate_fields(Fields) ->
- try fields_to_json(Fields) of
- _ ->
- mango_fields:new(Fields)
- catch error:function_clause ->
- ?MANGO_ERROR({invalid_index_fields_definition, Fields});
- throw:invalid_field_name ->
- ?MANGO_ERROR({invalid_index_fields_definition, Fields})
- end.
-
-
-validate_ddoc(VProps) ->
- try
- Def = proplists:get_value(<<"index">>, VProps),
- validate_index_def(Def),
- Def
- catch Error:Reason ->
- couch_log:error("Invalid Index Def ~p: Error. ~p, Reason: ~p",
- [VProps, Error, Reason]),
- invalid_ddoc
- end.
-
-
-opts() ->
- [
- {<<"default_analyzer">>, [
- {tag, default_analyzer},
- {optional, true},
- {default, <<"keyword">>}
- ]},
- {<<"default_field">>, [
- {tag, default_field},
- {optional, true},
- {default, {[]}}
- ]},
- {<<"partial_filter_selector">>, [
- {tag, partial_filter_selector},
- {optional, true},
- {default, {[]}},
- {validator, fun mango_opts:validate_selector/1}
- ]},
- {<<"selector">>, [
- {tag, selector},
- {optional, true},
- {default, {[]}},
- {validator, fun mango_opts:validate_selector/1}
- ]},
- {<<"fields">>, [
- {tag, fields},
- {optional, true},
- {default, []},
- {validator, fun ?MODULE:validate_fields/1}
- ]},
- {<<"index_array_lengths">>, [
- {tag, index_array_lengths},
- {optional, true},
- {default, true},
- {validator, fun mango_opts:is_boolean/1}
- ]}
- ].
-
-
-make_text(Idx) ->
- Text= {[
- {<<"index">>, Idx#idx.def},
- {<<"analyzer">>, construct_analyzer(Idx#idx.def)}
- ]},
- {Idx#idx.name, Text}.
-
-
-get_default_field_options(Props) ->
- Default = couch_util:get_value(default_field, Props, {[]}),
- case Default of
- Bool when is_boolean(Bool) ->
- {Bool, <<"standard">>};
- {[]} ->
- {true, <<"standard">>};
- {Opts}->
- Enabled = couch_util:get_value(<<"enabled">>, Opts, true),
- Analyzer = couch_util:get_value(<<"analyzer">>, Opts,
- <<"standard">>),
- {Enabled, Analyzer}
- end.
-
-
-construct_analyzer({Props}) ->
- DefaultAnalyzer = couch_util:get_value(default_analyzer, Props,
- <<"keyword">>),
- {DefaultField, DefaultFieldAnalyzer} = get_default_field_options(Props),
- DefaultAnalyzerDef = case DefaultField of
- true ->
- [{<<"$default">>, DefaultFieldAnalyzer}];
- _ ->
- []
- end,
- case DefaultAnalyzerDef of
- [] ->
- <<"keyword">>;
- _ ->
- {[
- {<<"name">>, <<"perfield">>},
- {<<"default">>, DefaultAnalyzer},
- {<<"fields">>, {DefaultAnalyzerDef}}
- ]}
- end.
-
-
-indexable_fields(Selector) ->
- TupleTree = mango_selector_text:convert([], Selector),
- indexable_fields([], TupleTree).
-
-
-indexable_fields(Fields, {op_and, Args}) when is_list(Args) ->
- lists:foldl(fun(Arg, Fields0) -> indexable_fields(Fields0, Arg) end,
- Fields, Args);
-
-%% For queries that use array element access or $in operations, two
-%% fields get generated by mango_selector_text:convert. At index
-%% definition time, only one field gets defined. In this situation, we
-%% remove the extra generated field so that the index can be used. For
-%% all other situations, we include the fields as normal.
-indexable_fields(Fields, {op_or, [{op_field, Field0},
- {op_field, {[Name | _], _}} = Field1]}) ->
- case lists:member(<<"[]">>, Name) of
- true ->
- indexable_fields(Fields, {op_field, Field0});
- false ->
- Fields1 = indexable_fields(Fields, {op_field, Field0}),
- indexable_fields(Fields1, Field1)
- end;
-indexable_fields(Fields, {op_or, Args}) when is_list(Args) ->
- lists:foldl(fun(Arg, Fields0) -> indexable_fields(Fields0, Arg) end,
- Fields, Args);
-
-indexable_fields(Fields, {op_not, {ExistsQuery, Arg}}) when is_tuple(Arg) ->
- Fields0 = indexable_fields(Fields, ExistsQuery),
- indexable_fields(Fields0, Arg);
-% forces "$exists" : false to use _all_docs
-indexable_fields(_, {op_not, {_, false}}) ->
- [];
-
-indexable_fields(Fields, {op_insert, Arg}) when is_binary(Arg) ->
- Fields;
-
-%% fieldname.[]:length is not a user defined field.
-indexable_fields(Fields, {op_field, {[_, <<":length">>], _}}) ->
- Fields;
-indexable_fields(Fields, {op_field, {Name, _}}) ->
- [iolist_to_binary(Name) | Fields];
-
-%% In this particular case, the lucene index is doing a field_exists query
-%% so it is looking at all sorts of combinations of field:* and field.*
-%% We don't add the field because we cannot pre-determine what field will exist.
-%% Hence we just return Fields and make it less restrictive.
-indexable_fields(Fields, {op_fieldname, {_, _}}) ->
- Fields;
-
-%% Similar idea to op_fieldname but with fieldname:null
-indexable_fields(Fields, {op_null, {_, _}}) ->
- Fields;
-
-indexable_fields(Fields, {op_default, _}) ->
- [<<"$default">> | Fields].
-
-
-maybe_reject_index_all_req({Def}, Db) ->
- DbName = couch_db:name(Db),
- #user_ctx{name = User} = couch_db:get_user_ctx(Db),
- Fields = couch_util:get_value(fields, Def),
- case {Fields, forbid_index_all()} of
- {all_fields, "true"} ->
- ?MANGO_ERROR(index_all_disabled);
- {all_fields, "warn"} ->
- couch_log:warning("User ~p is indexing all fields in db ~p",
- [User, DbName]);
- _ ->
- ok
- end.
-
-
-forbid_index_all() ->
- config:get("mango", "index_all_disabled", "false").
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-
-setup_all() ->
- Ctx = test_util:start_couch(),
- meck:expect(couch_log, warning, 2,
- fun(_,_) ->
- throw({test_error, logged_warning})
- end),
- Ctx.
-
-
-teardown_all(Ctx) ->
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-
-setup() ->
- %default index all def that generates {fields, all_fields}
- Index = #idx{def={[]}},
- DbName = <<"testdb">>,
- UserCtx = #user_ctx{name = <<"u1">>},
- {ok, Db} = couch_db:clustered_db(DbName, UserCtx),
- {Index, Db}.
-
-
-teardown(_) ->
- ok.
-
-
-index_all_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun forbid_index_all/1,
- fun default_and_false_index_all/1,
- fun warn_index_all/1
- ]
- }
- }.
-
-
-forbid_index_all({Idx, Db}) ->
- ?_test(begin
- ok = config:set("mango", "index_all_disabled", "true", false),
- ?assertThrow({mango_error, ?MODULE, index_all_disabled},
- validate_new(Idx, Db)
- )
- end).
-
-
-default_and_false_index_all({Idx, Db}) ->
- ?_test(begin
- config:delete("mango", "index_all_disabled", false),
- {ok, #idx{def={Def}}} = validate_new(Idx, Db),
- Fields = couch_util:get_value(fields, Def),
- ?assertEqual(all_fields, Fields),
- ok = config:set("mango", "index_all_disabled", "false", false),
- {ok, #idx{def={Def2}}} = validate_new(Idx, Db),
- Fields2 = couch_util:get_value(fields, Def2),
- ?assertEqual(all_fields, Fields2)
- end).
-
-
-warn_index_all({Idx, Db}) ->
- ?_test(begin
- ok = config:set("mango", "index_all_disabled", "warn", false),
- ?assertThrow({test_error, logged_warning}, validate_new(Idx, Db))
- end).
-
-
--endif.
diff --git a/src/mango/src/mango_idx_view.erl b/src/mango/src/mango_idx_view.erl
index a73d82ae6..35b741a49 100644
--- a/src/mango/src/mango_idx_view.erl
+++ b/src/mango/src/mango_idx_view.erl
@@ -35,6 +35,7 @@
-include("mango.hrl").
-include("mango_idx.hrl").
-include("mango_idx_view.hrl").
+-include_lib("kernel/include/logger.hrl").
validate_new(#idx{}=Idx, _Db) ->
@@ -254,6 +255,12 @@ validate_ddoc(VProps) ->
Opts = lists:keydelete(<<"sort">>, 1, Opts0),
{Def, Opts}
catch Error:Reason ->
+ ?LOG_ERROR(#{
+ what => invalid_index_definition,
+ tag => Error,
+ details => Reason,
+ index => VProps
+ }),
couch_log:error("Invalid Index Def ~p. Error: ~p, Reason: ~p",
[VProps, Error, Reason]),
invalid_view
diff --git a/src/mango/src/mango_json_bookmark.erl b/src/mango/src/mango_json_bookmark.erl
index 83fd00f29..b60ecdb18 100644
--- a/src/mango/src/mango_json_bookmark.erl
+++ b/src/mango/src/mango_json_bookmark.erl
@@ -19,7 +19,7 @@
]).
--include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
-include("mango_cursor.hrl").
-include("mango.hrl").
diff --git a/src/mango/test/06-basic-text-test.py b/src/mango/test/06-basic-text-test.py
index a3fe383d6..1449ec82d 100644
--- a/src/mango/test/06-basic-text-test.py
+++ b/src/mango/test/06-basic-text-test.py
@@ -21,6 +21,10 @@ import hypothesis.strategies as st
@unittest.skipIf(mango.has_text_service(), "text service exists")
class TextIndexCheckTests(mango.DbPerClass):
+ @classmethod
+ def setUpClass(klass):
+ raise unittest.SkipTest("Re-enable once search is implemented")
+
def test_create_text_index(self):
body = json.dumps({"index": {}, "type": "text"})
resp = self.db.sess.post(self.db.path("_index"), data=body)
diff --git a/src/mango/test/17-multi-type-value-test.py b/src/mango/test/17-multi-type-value-test.py
index 5a8fcedef..194757095 100644
--- a/src/mango/test/17-multi-type-value-test.py
+++ b/src/mango/test/17-multi-type-value-test.py
@@ -53,7 +53,7 @@ class MultiValueFieldTests:
class MultiValueFieldJSONTests(mango.DbPerClass, MultiValueFieldTests):
def setUp(self):
self.db.recreate()
- self.db.create_index(["name"], wait_for_built_index=False)
+ self.db.create_index(["name"], wait_for_built_index=True)
self.db.create_index(["age", "name"], wait_for_built_index=True)
self.db.save_docs(copy.deepcopy(DOCS))
diff --git a/src/mango/test/mango.py b/src/mango/test/mango.py
index 05c4e65c4..2b41ca642 100644
--- a/src/mango/test/mango.py
+++ b/src/mango/test/mango.py
@@ -81,7 +81,7 @@ class Database(object):
def create(self, q=1, n=1):
r = self.sess.get(self.url)
if r.status_code == 404:
- r = self.sess.put(self.url, params={"q": q, "n": n})
+ r = self.sess.put(self.url, params={})
r.raise_for_status()
def delete(self):
diff --git a/src/mem3/LICENSE b/src/mem3/LICENSE
deleted file mode 100644
index f6cd2bc80..000000000
--- a/src/mem3/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/src/mem3/README.md b/src/mem3/README.md
deleted file mode 100644
index 8098f6979..000000000
--- a/src/mem3/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-## mem3
-
-Mem3 is the node membership application for clustered [CouchDB][1]. It is used
-in CouchDB since version 2.0 and tracks two very important things for the
-cluster:
-
- 1. member nodes
- 2. node/shards mappings for each database
-
-Both the nodes and shards are tracked in node-local couch databases. Shards
-are heavily used, so an ETS cache is also maintained for low-latency lookups.
-The nodes and shards are synchronized via continuous CouchDB replication,
-which serves as 'gossip' in Dynamo parlance. The shards ETS cache is kept in
-sync based on membership and database event listeners.
-
-A very important point to make here is that CouchDB does not necessarily
-divide up each database into equal shards across the nodes of a cluster. For
-instance, in a 20-node cluster, you may have the need to create a small
-database with very few documents. For efficiency reasons, you may create your
-database with Q=4 and keep the default of N=3. This means you only have 12
-shards total, so 8 nodes will hold none of the data for this database. Given
-this feature, we even shard use out across the cluster by altering the 'start'
-node for the database's shards.
-
-Shards can be split using the `/_reshard` API endpoint. Refer to a separate
-[README](README_reshard.md) regarding the technical detail on how shard
-splitting works.
-
-### Getting Started
-
-Mem3 requires R13B03 or higher and can be built with [rebar][2], which comes
-bundled in the repository. Rebar needs to be able to find the `couch_db.hrl`
-header file; one way to accomplish this is to set ERL_LIBS to point to the
-apps subdirectory of a CouchDB checkout, e.g.
-
- ERL_LIBS="/usr/local/src/couchdb/apps" ./rebar compile
-
-### License
-[Apache 2.0][3]
-
-[1]: http://couchdb.apache.org
-[2]: http://github.com/rebar/rebar
-[3]: http://www.apache.org/licenses/LICENSE-2.0.html
diff --git a/src/mem3/README_reshard.md b/src/mem3/README_reshard.md
deleted file mode 100644
index 237371485..000000000
--- a/src/mem3/README_reshard.md
+++ /dev/null
@@ -1,93 +0,0 @@
-Developer Oriented Resharding Description
-=========================================
-
-This is a technical description of the resharding logic. The discussion will focus on: job creation and life-cycle, data definitions, and on the state transition mechanisms.
-
-
-Job Life-Cycle
---------------
-
-Job creation happens in the `mem3_reshard_httpd` API handler module. That module uses `mem3_reshard_http_util` to do some validation, and eventually calls `mem3_reshard:start_split_job/2` on one or more nodes in the cluster depending where the new jobs should run.
-
-`mem3_reshard:start_split_job/2` is the main Erlang API entry point. After some more validation it creates a `#job{}` record and calls the `mem3_reshard` job manager. The manager then will add the job to its jobs ets table, save it to a `_local` document in the shards db, and most importantly, start a new resharding process. That process will be supervised by the `mem3_reshard_job_sup` supervisor.
-
-Each job will be running in a gen_server implemented in `mem3_reshard_job` module. When splitting a shard, a job will go through a series of steps such as `initial_copy`, `build_indices`, `update_shard_map`, etc. Between each step it will report progress and checkpoint with `mem3_reshard` manager. A checkpoint involved the `mem3_reshard` manager persisting that job's state to disk in `_local` document in `_dbs` db. Then job continues until `completed` state or until it failed in the `failed` state.
-
-If a user stops shard splitting on the whole cluster, then all running jobs will stop. When shard splitting is resumed, they will try to recover from their last checkpoint.
-
-A job can also be individually stopped or resumed. If a job is individually stopped it will stay so even if the global shard splitting state is `running`. A user has to explicitly set that job to a `running` state for it to resume. If a node with running jobs is turned off, when it is rebooted running jobs will resume from their last checkpoint.
-
-
-Data Definitions
-----------------
-
-This section focuses on record definition and how data is transformed to and from various formats.
-
-Right below the `mem3_reshard:start_split_job/1` API level a job is converted to a `#job{}` record defined in the `mem3_reshard.hrl` header file. That record is then used throughout most of the resharding code. The job manager `mem3_reshard` stores it in its jobs ets table, then when a job process is spawn it single argument also just a `#job{}` record. As a job process is executing it will periodically report state back to the `mem3_reshard` manager as an updated `#job{}` record.
-
-Some interesting fields from the `#job{}` record:
-
- - `id` Uniquely identifies a job in a cluster. It is derived from the source shard name, node and a version (currently = 1).
- - `type` Currently the only type supported is `split` but `merge` or `rebalance` might be added in the future.
- - `job_state` The running state of the job. Indicates if the job is `running`, `stopped`, `completed` or `failed`.
- - `split_state` Once the job is running this indicates how far along it got in the splitting process.
- - `source` Source shard file. If/when merge is implemented this will be a list.
- - `target` List of target shard files. This is expected to be a list of 2 items currently.
- - `history` A time-line of state transitions represented as a list of tuples.
- - `pid` When job is running this will be set to the pid of the process.
-
-
-In the `mem3_reshard_job_store` module the `#job{}` record is translated to an json document so it can be persisted to disk. Translation functions to and from a json in that module are also used by the HTTP API layer to return a job's state and other information to the user.
-
-Another important piece of data is the global resharding state. When a user disables resharding on a cluster, a call is made to `mem3_reshard` manager on each node and they store that in a `#state{}` record. This record is defined in the `mem3_reshard.hrl` module, and just like the `#job{}` record can be translated to/from ejson in the `mem3_reshard_store` and stored and loaded from disk.
-
-
-State Transitions
------------------
-
-Resharding logic has 3 separate states to keep track of:
-
-1. Per-node resharding state. This state can be toggled between `running` and `stopped`. That toggle happens via the `mem3_reshard:start/0` and `mem3_reshard:stop/1` function. This state is kept in the `#state{}` record of the `mem3_reshard` manager gen_server. This state is also persisted to the local shard map database as a `_local` document so that it is maintained through a node restart. When the state is `running` then all jobs that are not individually `stopped`, and have not failed or completed, will be `running`. When the state is `stopped` all the running jobs will be `stopped`.
-
-2. Job's running state held in the `#job{}` `job_state` field. This is the general running state of a resharding job. It can be `new`, `running`, `stopped`, `completed` or `failed`. This state is most relevant for the `mem3_reshard` manager. In other words, it is the `mem3_reshard` gen_server that starts the job, monitors it to see if it exits successfully on completion or with an error.
-
-3. Job's internal splitting state. This state tracks the steps taken during shard splitting by each job. This state is mostly relevant for the `mem3_reshard_job` module. This state is kept in `#job{}`'s `split_state` field. The progression of these states is linear going from one state to the next. That's reflected in the code as well, when one state is done, `mem3_reshard_job:get_next_state/1` is called which returns the next state in the list. The list is defined in the `SPLIT_STATES` macro. This simplistic transition is also one of the reasons why a gen_fsm wasn't considered for `mem3_reshard_job` logic.
-
-Another interesting aspect is how the `split_state` transitions happen in the `mem3_reshard_job` module. What follows is an examination of that.
-
-A job starts running in the `new` state or from a previously checkpointed state. In the later case, the job goes through some recovery logic (see `?STATE_RESTART` macro in `mem3_reshard_job`) where it tries to resume its work from where it left of. It means, for example, if it was in the `initial_copy` state and was interrupted it might have to reset the target files and copy everything again. After recovery, the state execution logic is driven by `run(#job{})` which ends up calling `?MODULE:State(#job{})` state specific functions for each state.
-
-In `mem3_reshard_job:switch_to_next_state/2` job's history is updated, any current `state_info` is cleared, job state is switched in the `#job{}` record. Then, the new state is checkpointed in the `checkpoint/1` function. Checkpoint will cast a message to the `mem3_reshard` manager. After that message is sent the job process sits and waits.
-
-In the meantime `mem3_reshard` manager checkpoints the state, which means it updates both its ETS table with the new `#job{}` record, persists the state with the `mem3_reshard_store` module, then, finally, it notifies the job process that checkpointing is done by calling `mem3_reshard_job:checkpoint_done/1`.
-
-`mem3_reshard_job:checkpoint_done/1` function call sends a `checkpoint_done` message to the job's process, at which point it starts executing that state.
-
-Most states in `mem3_reshard_job` try not to block the main job process and instead launch worker processes to perform long running operations. It is usually just one worker process but it could be multiple as well. After that it waits for the workers to finish and inspects their exit signal (see `wait_for_workers/1` function). When all the workers exit for a particular `split_state`, the job is switched to the next state with `switch_to_next_state/1` and the whole thing repeats until the `completed` state is reached when the whole job exits normally.
-
-If the source is updated at high rate and the cluster is under load, there is a possibility for the resharding jobs to take longer to finish. The cluster would have to be running at the limit where both compaction and internal replication will have difficulty catching up as fundamentally the logic used for the initial bulk copy is similar the compaction code, and topoff states are just reusing the internal replicator code. Eventually when the load subsides the jobs should catch up and finish.
-
-Individual Modules Description
-------------------------------
-
-These are mostly random notes about various modules involved in resharding. Most, but not all, are in the `mem3` application.
-
-* `mem3_reshard`: Main API entry point and the job manager.
-
-* `mem3_reshard_job` : Individual job logic.
-
-* `mem3_reshard_dbdoc` : Responsible for updating shard doc in the `_db`'s database. Besides just having a bunch of utility function there is a gen_server spawned which is used to update shard documents in a cluster in such a way as to minimize the risk of conflicts. That is accomplished by having each shard updater calling only one such updater for the whole cluster. This coordinator is picked by sorting the list of all the live mem3 nodes and picking the first one in the list.
-
-* `mem3_reshard_httpd` : API endpoint definitions.
-
-* `mem3_reshard_api` : Cluster API endpoint. This module is responsible for sending requests to all the nodes in a cluster and gathering results.
-
-* `mem3_reshard_index` : This is a helper module used by workers in the `build_indices` state.
-
-* `mem3_reshard_job_sup` : Simple one for one supervisor which keeps track of running jobs.
-
-* `mem3_reshard_store` : State persistence module. It knows how to save/restore `#job{}` and `#state{}` records to/from `_local` docs. It is also re-used for serializing `#job{}` into ejson by the HTTP API module.
-
-* `mem3_reshard_validate` : Validate that source exists, target ranges don't have gaps in them, etc.
-
-* `couch_db_split` : This module is not in `mem3` app but it does all the heavy lifting during the initial data copy. Given a source db and some targets, and a function to decide which doc go to which target, it will copy all data from the source to the targets. It's best to think of this module as a form of compactor. Unlike `couch_bt_engine_compactor` this one lives above the `couch_db_engine` API, and instead of copying data to one new file it copies it to 2 or more. Unsurprisingly because of that it uses some lower level `couch_db_engine` API directly, including linking to a couch_db_updater, force setting db update sequences and others.
diff --git a/src/mem3/include/mem3.hrl b/src/mem3/include/mem3.hrl
deleted file mode 100644
index d97b25469..000000000
--- a/src/mem3/include/mem3.hrl
+++ /dev/null
@@ -1,59 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-% The last element in the ring
--define(RING_END, 2 bsl 31 - 1).
-
-
-% type specification hacked to suppress dialyzer warning re: match spec
--record(shard, {
- name :: binary() | '_' | 'undefined',
- node :: node() | '_' | 'undefined',
- dbname :: binary() | 'undefined',
- range :: [non_neg_integer() | '$1' | '$2'] | '_' | 'undefined',
- ref :: reference() | '_' | 'undefined',
- opts :: list() | 'undefined'
-}).
-
-%% Do not reference outside of mem3.
--record(ordered_shard, {
- name :: binary() | '_',
- node :: node() | '_',
- dbname :: binary(),
- range :: [non_neg_integer() | '$1' | '$2'] | '_',
- ref :: reference() | 'undefined' | '_',
- order :: non_neg_integer() | 'undefined' | '_',
- opts :: list()
-}).
-
-%% types
--type join_type() :: init | join | replace | leave.
--type join_order() :: non_neg_integer().
--type options() :: list().
--type mem_node() :: {join_order(), node(), options()}.
--type mem_node_list() :: [mem_node()].
--type arg_options() :: {test, boolean()}.
--type args() :: [] | [arg_options()].
--type test() :: undefined | node().
--type epoch() :: float().
--type clock() :: {node(), epoch()}.
--type vector_clock() :: [clock()].
--type ping_node() :: node() | nil.
--type gossip_fun() :: call | cast.
-
--type part() :: #shard{}.
--type fullmap() :: [part()].
--type ref_part_map() :: {reference(), part()}.
--type tref() :: reference().
--type np() :: {node(), part()}.
--type beg_acc() :: [integer()].
diff --git a/src/mem3/priv/stats_descriptions.cfg b/src/mem3/priv/stats_descriptions.cfg
deleted file mode 100644
index 569d16ac3..000000000
--- a/src/mem3/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,12 +0,0 @@
-{[mem3, shard_cache, eviction], [
- {type, counter},
- {desc, <<"number of shard cache evictions">>}
-]}.
-{[mem3, shard_cache, hit], [
- {type, counter},
- {desc, <<"number of shard cache hits">>}
-]}.
-{[mem3, shard_cache, miss], [
- {type, counter},
- {desc, <<"number of shard cache misses">>}
-]}.
diff --git a/src/mem3/rebar.config.script b/src/mem3/rebar.config.script
deleted file mode 100644
index 8f2deb4ae..000000000
--- a/src/mem3/rebar.config.script
+++ /dev/null
@@ -1,22 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
-WithProper = code:lib_dir(proper) /= {error, bad_name}.
-
-if not WithProper -> CONFIG; true ->
- CurrOpts = case lists:keyfind(erl_opts, 1, CONFIG) of
- {erl_opts, Opts} -> Opts;
- false -> []
- end,
- NewOpts = [{d, 'WITH_PROPER'} | CurrOpts],
- lists:keystore(erl_opts, 1, CONFIG, {erl_opts, NewOpts})
-end.
diff --git a/src/mem3/src/mem3.app.src b/src/mem3/src/mem3.app.src
deleted file mode 100644
index 889ebf9a3..000000000
--- a/src/mem3/src/mem3.app.src
+++ /dev/null
@@ -1,40 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, mem3, [
- {description, "CouchDB Cluster Membership"},
- {vsn, git},
- {mod, {mem3_app, []}},
- {registered, [
- mem3_events,
- mem3_nodes,
- mem3_shards,
- mem3_sync,
- mem3_sync_nodes,
- mem3_reshard,
- mem3_sup
- ]},
- {applications, [
- kernel,
- stdlib,
- config,
- sasl,
- crypto,
- mochiweb,
- couch_epi,
- couch,
- rexi,
- couch_log,
- couch_event,
- couch_stats
- ]}
-]}.
diff --git a/src/mem3/src/mem3.erl b/src/mem3/src/mem3.erl
deleted file mode 100644
index 6f3a10df8..000000000
--- a/src/mem3/src/mem3.erl
+++ /dev/null
@@ -1,424 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3).
-
--export([start/0, stop/0, restart/0, nodes/0, node_info/2, shards/1, shards/2,
- choose_shards/2, n/1, n/2, dbname/1, ushards/1, ushards/2]).
--export([get_shard/3, local_shards/1, shard_suffix/1, fold_shards/2]).
--export([sync_security/0, sync_security/1]).
--export([compare_nodelists/0, compare_shards/1]).
--export([quorum/1, group_by_proximity/1]).
--export([live_shards/2]).
--export([belongs/2, owner/3]).
--export([get_placement/1]).
--export([ping/1, ping/2]).
--export([db_is_current/1]).
-
-%% For mem3 use only.
--export([name/1, node/1, range/1, engine/1]).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(PING_TIMEOUT_IN_MS, 60000).
-
-start() ->
- application:start(mem3).
-
-stop() ->
- application:stop(mem3).
-
-restart() ->
- stop(),
- start().
-
-%% @doc Detailed report of cluster-wide membership state. Queries the state
-%% on all member nodes and builds a dictionary with unique states as the
-%% key and the nodes holding that state as the value. Also reports member
-%% nodes which fail to respond and nodes which are connected but are not
-%% cluster members. Useful for debugging.
--spec compare_nodelists() -> [{{cluster_nodes, [node()]} | bad_nodes
- | non_member_nodes, [node()]}].
-compare_nodelists() ->
- Nodes = mem3:nodes(),
- AllNodes = erlang:nodes([this, visible]),
- {Replies, BadNodes} = gen_server:multi_call(Nodes, mem3_nodes, get_nodelist),
- Dict = lists:foldl(fun({Node, Nodelist}, D) ->
- orddict:append({cluster_nodes, Nodelist}, Node, D)
- end, orddict:new(), Replies),
- [{non_member_nodes, AllNodes -- Nodes}, {bad_nodes, BadNodes} | Dict].
-
--spec compare_shards(DbName::iodata()) -> [{bad_nodes | [#shard{}], [node()]}].
-compare_shards(DbName) when is_list(DbName) ->
- compare_shards(list_to_binary(DbName));
-compare_shards(DbName) ->
- Nodes = mem3:nodes(),
- {Replies, BadNodes} = rpc:multicall(mem3, shards, [DbName]),
- GoodNodes = [N || N <- Nodes, not lists:member(N, BadNodes)],
- Dict = lists:foldl(fun({Shards, Node}, D) ->
- orddict:append(Shards, Node, D)
- end, orddict:new(), lists:zip(Replies, GoodNodes)),
- [{bad_nodes, BadNodes} | Dict].
-
--spec n(DbName::iodata()) -> integer().
-n(DbName) ->
- % Use _design to avoid issues with
- % partition validation
- n(DbName, <<"_design/foo">>).
-
-n(DbName, DocId) ->
- length(mem3:shards(DbName, DocId)).
-
--spec nodes() -> [node()].
-nodes() ->
- mem3_nodes:get_nodelist().
-
-node_info(Node, Key) ->
- mem3_nodes:get_node_info(Node, Key).
-
--spec shards(DbName::iodata()) -> [#shard{}].
-shards(DbName) ->
- shards_int(DbName, []).
-
-shards_int(DbName, Options) when is_list(DbName) ->
- shards_int(list_to_binary(DbName), Options);
-shards_int(DbName, Options) ->
- Ordered = lists:member(ordered, Options),
- ShardDbName =
- list_to_binary(config:get("mem3", "shards_db", "_dbs")),
- case DbName of
- ShardDbName when Ordered ->
- %% shard_db is treated as a single sharded db to support calls to db_info
- %% and view_all_docs
- [#ordered_shard{
- node = node(),
- name = ShardDbName,
- dbname = ShardDbName,
- range = [0, (2 bsl 31)-1],
- order = undefined,
- opts = []}];
- ShardDbName ->
- %% shard_db is treated as a single sharded db to support calls to db_info
- %% and view_all_docs
- [#shard{
- node = node(),
- name = ShardDbName,
- dbname = ShardDbName,
- range = [0, (2 bsl 31)-1],
- opts = []}];
- _ ->
- mem3_shards:for_db(DbName, Options)
- end.
-
--spec shards(DbName::iodata(), DocId::binary()) -> [#shard{}].
-shards(DbName, DocId) ->
- shards_int(DbName, DocId, []).
-
-shards_int(DbName, DocId, Options) when is_list(DbName) ->
- shards_int(list_to_binary(DbName), DocId, Options);
-shards_int(DbName, DocId, Options) when is_list(DocId) ->
- shards_int(DbName, list_to_binary(DocId), Options);
-shards_int(DbName, DocId, Options) ->
- mem3_shards:for_docid(DbName, DocId, Options).
-
-
--spec ushards(DbName::iodata()) -> [#shard{}].
-ushards(DbName) ->
- Nodes = [node()|erlang:nodes()],
- ZoneMap = zone_map(Nodes),
- Shards = ushards(DbName, live_shards(DbName, Nodes, [ordered]), ZoneMap),
- mem3_util:downcast(Shards).
-
--spec ushards(DbName::iodata(), DocId::binary()) -> [#shard{}].
-ushards(DbName, DocId) ->
- Shards = shards_int(DbName, DocId, [ordered]),
- Shard = hd(Shards),
- mem3_util:downcast([Shard]).
-
-ushards(DbName, Shards0, ZoneMap) ->
- {L,S,D} = group_by_proximity(Shards0, ZoneMap),
- % Prefer shards in the local zone over shards in a different zone,
- % but sort each zone separately to ensure a consistent choice between
- % nodes in the same zone.
- Shards = choose_ushards(DbName, L ++ S) ++ choose_ushards(DbName, D),
- OverlappedShards = lists:ukeysort(#shard.range, Shards),
- mem3_util:non_overlapping_shards(OverlappedShards).
-
-get_shard(DbName, Node, Range) ->
- mem3_shards:get(DbName, Node, Range).
-
-local_shards(DbName) ->
- mem3_shards:local(DbName).
-
-shard_suffix(DbName0) when is_binary(DbName0) ->
- Shard = hd(shards(DbName0)),
- <<"shards/", _:8/binary, "-", _:8/binary, "/", DbName/binary>> =
- Shard#shard.name,
- filename:extension(binary_to_list(DbName));
-shard_suffix(Db) ->
- shard_suffix(couch_db:name(Db)).
-
-fold_shards(Fun, Acc) ->
- mem3_shards:fold(Fun, Acc).
-
-sync_security() ->
- mem3_sync_security:go().
-
-sync_security(Db) ->
- mem3_sync_security:go(dbname(Db)).
-
--spec choose_shards(DbName::iodata(), Options::list()) -> [#shard{}].
-choose_shards(DbName, Options) when is_list(DbName) ->
- choose_shards(list_to_binary(DbName), Options);
-choose_shards(DbName, Options) ->
- try shards(DbName)
- catch error:E when E==database_does_not_exist; E==badarg ->
- Nodes = allowed_nodes(),
- case get_placement(Options) of
- undefined ->
- choose_shards(DbName, Nodes, Options);
- Placement ->
- lists:flatmap(fun({Zone, N}) ->
- NodesInZone = nodes_in_zone(Nodes, Zone),
- Options1 = lists:keymerge(1, [{n,N}], Options),
- choose_shards(DbName, NodesInZone, Options1)
- end, Placement)
- end
- end.
-
-choose_shards(DbName, Nodes, Options) ->
- NodeCount = length(Nodes),
- Suffix = couch_util:get_value(shard_suffix, Options, ""),
- N = mem3_util:n_val(couch_util:get_value(n, Options), NodeCount),
- if N =:= 0 -> erlang:error(no_nodes_in_zone);
- true -> ok
- end,
- Q = mem3_util:q_val(couch_util:get_value(q, Options,
- config:get("cluster", "q", "8"))),
- %% rotate to a random entry in the nodelist for even distribution
- RotatedNodes = rotate_rand(Nodes),
- mem3_util:create_partition_map(DbName, N, Q, RotatedNodes, Suffix).
-
-rotate_rand(Nodes) ->
- {A, B} = lists:split(couch_rand:uniform(length(Nodes)), Nodes),
- B ++ A.
-
-get_placement(Options) ->
- case couch_util:get_value(placement, Options) of
- undefined ->
- case config:get("cluster", "placement") of
- undefined ->
- undefined;
- PlacementStr ->
- decode_placement_string(PlacementStr)
- end;
- PlacementStr ->
- decode_placement_string(PlacementStr)
- end.
-
-decode_placement_string(PlacementStr) ->
- [begin
- [Zone, N] = string:tokens(Rule, ":"),
- {list_to_binary(Zone), list_to_integer(N)}
- end || Rule <- string:tokens(PlacementStr, ",")].
-
--spec dbname(#shard{} | iodata()) -> binary().
-dbname(#shard{dbname = DbName}) ->
- DbName;
-dbname(<<"shards/", _:8/binary, "-", _:8/binary, "/", DbName/binary>>) ->
- list_to_binary(filename:rootname(binary_to_list(DbName)));
-dbname(DbName) when is_list(DbName) ->
- dbname(list_to_binary(DbName));
-dbname(DbName) when is_binary(DbName) ->
- DbName;
-dbname(_) ->
- erlang:error(badarg).
-
-%% @doc Determine if DocId belongs in shard (identified by record or filename)
-belongs(#shard{}=Shard, DocId) when is_binary(DocId) ->
- [Begin, End] = range(Shard),
- belongs(Begin, End, Shard, DocId);
-belongs(<<"shards/", _/binary>> = ShardName, DocId) when is_binary(DocId) ->
- [Begin, End] = range(ShardName),
- belongs(Begin, End, ShardName, DocId);
-belongs(DbName, DocId) when is_binary(DbName), is_binary(DocId) ->
- true.
-
-belongs(Begin, End, Shard, DocId) ->
- HashKey = mem3_hash:calculate(Shard, DocId),
- Begin =< HashKey andalso HashKey =< End.
-
-range(#shard{range = Range}) ->
- Range;
-range(#ordered_shard{range = Range}) ->
- Range;
-range(<<"shards/", Start:8/binary, "-", End:8/binary, "/", _/binary>>) ->
- [httpd_util:hexlist_to_integer(binary_to_list(Start)),
- httpd_util:hexlist_to_integer(binary_to_list(End))].
-
-allowed_nodes() ->
- lists:filter(fun(Node) ->
- Decom = mem3:node_info(Node, <<"decom">>),
- (Decom =/= true) andalso (Decom =/= <<"true">>)
- end, mem3:nodes()).
-
-nodes_in_zone(Nodes, Zone) ->
- [Node || Node <- Nodes, Zone == mem3:node_info(Node, <<"zone">>)].
-
-live_shards(DbName, Nodes) ->
- live_shards(DbName, Nodes, []).
-
-live_shards(DbName, Nodes, Options) ->
- [S || S <- shards_int(DbName, Options), lists:member(mem3:node(S), Nodes)].
-
-zone_map(Nodes) ->
- [{Node, node_info(Node, <<"zone">>)} || Node <- Nodes].
-
-group_by_proximity(Shards) ->
- Nodes = [mem3:node(S) || S <- lists:ukeysort(#shard.node, Shards)],
- group_by_proximity(Shards, zone_map(Nodes)).
-
-group_by_proximity(Shards, ZoneMap) ->
- {Local, Remote} = lists:partition(fun(S) -> mem3:node(S) =:= node() end,
- Shards),
- LocalZone = proplists:get_value(node(), ZoneMap),
- Fun = fun(S) -> proplists:get_value(mem3:node(S), ZoneMap) =:= LocalZone end,
- {SameZone, DifferentZone} = lists:partition(Fun, Remote),
- {Local, SameZone, DifferentZone}.
-
-choose_ushards(DbName, Shards) ->
- Groups0 = group_by_range(Shards),
- Groups1 = [mem3_util:rotate_list({DbName, R}, order_shards(G))
- || {R, G} <- Groups0],
- [hd(G) || G <- Groups1].
-
-order_shards([#ordered_shard{}|_]=OrderedShards) ->
- lists:keysort(#ordered_shard.order, OrderedShards);
-order_shards(UnorderedShards) ->
- UnorderedShards.
-
-group_by_range(Shards) ->
- lists:foldl(fun(Shard, Dict) ->
- orddict:append(mem3:range(Shard), Shard, Dict) end, orddict:new(), Shards).
-
-% quorum functions
-
-quorum(DbName) when is_binary(DbName) ->
- n(DbName) div 2 + 1;
-quorum(Db) ->
- quorum(couch_db:name(Db)).
-
-
-node(#shard{node=Node}) ->
- Node;
-node(#ordered_shard{node=Node}) ->
- Node.
-
-name(#shard{name=Name}) ->
- Name;
-name(#ordered_shard{name=Name}) ->
- Name.
-
-% Direct calculation of node membership. This is the algorithm part. It
-% doesn't read the shard map, just picks owner based on a hash.
--spec owner(binary(), binary(), [node()]) -> node().
-owner(DbName, DocId, Nodes) ->
- hd(mem3_util:rotate_list({DbName, DocId}, lists:usort(Nodes))).
-
-engine(#shard{opts=Opts}) ->
- engine(Opts);
-engine(#ordered_shard{opts=Opts}) ->
- engine(Opts);
-engine(Opts) when is_list(Opts) ->
- case couch_util:get_value(engine, Opts) of
- Engine when is_binary(Engine) ->
- [{engine, Engine}];
- _ ->
- []
- end.
-
-%% Check whether a node is up or down
-%% side effect: set up a connection to Node if there not yet is one.
-
--spec ping(Node :: atom()) -> pong | pang.
-
-ping(Node) ->
- ping(Node, ?PING_TIMEOUT_IN_MS).
-
--spec ping(Node :: atom(), Timeout :: pos_integer()) -> pong | pang.
-
-ping(Node, Timeout) when is_atom(Node) ->
- %% The implementation of the function is copied from
- %% lib/kernel/src/net_adm.erl with addition of a Timeout
- case catch gen:call({net_kernel, Node},
- '$gen_call', {is_auth, node()}, Timeout) of
- {ok, yes} -> pong;
- _ ->
- erlang:disconnect_node(Node),
- pang
- end.
-
-
-db_is_current(#shard{name = Name}) ->
- db_is_current(Name);
-
-db_is_current(<<"shards/", _/binary>> = Name) ->
- try
- Shards = mem3:shards(mem3:dbname(Name)),
- lists:keyfind(Name, #shard.name, Shards) =/= false
- catch
- error:database_does_not_exist ->
- false
- end;
-
-db_is_current(Name) when is_binary(Name) ->
- % This accounts for local (non-sharded) dbs, and is mostly
- % for unit tests that either test or use mem3_rep logic
- couch_server:exists(Name).
-
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
--define(ALLOWED_NODE, 'node1@127.0.0.1').
-
-allowed_nodes_test_() ->
- {"allowed_nodes test", [{
- setup,
- fun () ->
- Props = [
- {?ALLOWED_NODE, []},
- {'node2@127.0.0.1', [{<<"decom">>,<<"true">>}]},
- {'node3@127.0.0.1', [{<<"decom">>,true}]}],
- ok = meck:expect(mem3_nodes, get_nodelist,
- fun() -> proplists:get_keys(Props) end),
- ok = meck:expect(mem3_nodes, get_node_info,
- fun(Node, Key) ->
- couch_util:get_value(Key, proplists:get_value(Node, Props))
- end)
- end,
- fun (_) -> meck:unload() end,
- [
- ?_assertMatch([?ALLOWED_NODE], allowed_nodes())
- ]
- }]}.
-
-rotate_rand_degenerate_test() ->
- ?assertEqual([1], rotate_rand([1])).
-
-rotate_rand_distribution_test() ->
- Cases = [rotate_rand([1, 2, 3]) || _ <- lists:seq(1, 100)],
- ?assertEqual(3, length(lists:usort(Cases))).
-
--endif.
diff --git a/src/mem3/src/mem3_app.erl b/src/mem3/src/mem3_app.erl
deleted file mode 100644
index 3ddfbe6fd..000000000
--- a/src/mem3/src/mem3_app.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_Type, []) ->
- mem3_sup:start_link().
-
-stop([]) ->
- ok.
diff --git a/src/mem3/src/mem3_cluster.erl b/src/mem3/src/mem3_cluster.erl
deleted file mode 100644
index 7e3d477cb..000000000
--- a/src/mem3/src/mem3_cluster.erl
+++ /dev/null
@@ -1,161 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Maintain cluster stability information. A cluster is considered stable if there
-% were no changes to during a given period of time.
-%
-% To be notified of cluster stability / instability the owner module must
-% implement the mem3_cluster behavior. When cluster membership changes,
-% cluster_unstable behavior callback will be called. After that is are no more
-% changes to the cluster, then cluster_stable callback will be called.
-%
-% The period is passed in as start argument but it can also be set dynamically
-% via the set_period/2 API call.
-%
-% In some cases it might be useful to have a shorter pariod during startup.
-% That can be configured via the StartPeriod argument. If the time since start
-% is less than a full period, then the StartPeriod is used as the period.
-
-
--module(mem3_cluster).
-
--behaviour(gen_server).
-
--export([
- start_link/4,
- set_period/2
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
-
--callback cluster_stable(Context :: term()) -> NewContext :: term().
--callback cluster_unstable(Context :: term()) -> NewContext :: term().
-
-
--record(state, {
- mod :: atom(),
- ctx :: term(),
- start_time :: erlang:timestamp(),
- last_change :: erlang:timestamp(),
- period :: integer(),
- start_period :: integer(),
- timer :: reference()
-}).
-
-
--spec start_link(module(), term(), integer(), integer()) ->
- {ok, pid()} | ignore | {error, term()}.
-start_link(Module, Context, StartPeriod, Period)
- when is_atom(Module), is_integer(StartPeriod), is_integer(Period) ->
- gen_server:start_link(?MODULE, [Module, Context, StartPeriod, Period], []).
-
-
--spec set_period(pid(), integer()) -> ok.
-set_period(Server, Period) when is_pid(Server), is_integer(Period) ->
- gen_server:cast(Server, {set_period, Period}).
-
-
-% gen_server callbacks
-
-init([Module, Context, StartPeriod, Period]) ->
- net_kernel:monitor_nodes(true),
- {ok, #state{
- mod = Module,
- ctx = Context,
- start_time = os:timestamp(),
- last_change = os:timestamp(),
- period = Period,
- start_period = StartPeriod,
- timer = new_timer(StartPeriod)
- }}.
-
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_call(_Msg, _From, State) ->
- {reply, ignored, State}.
-
-
-handle_cast({set_period, Period}, State) ->
- {noreply, State#state{period = Period}}.
-
-
-handle_info({nodeup, _Node}, State) ->
- {noreply, cluster_changed(State)};
-
-handle_info({nodedown, _Node}, State) ->
- {noreply, cluster_changed(State)};
-
-handle_info(stability_check, #state{mod = Mod, ctx = Ctx} = State) ->
- erlang:cancel_timer(State#state.timer),
- case now_diff_sec(State#state.last_change) > interval(State) of
- true ->
- {noreply, State#state{ctx = Mod:cluster_stable(Ctx)}};
- false ->
- Timer = new_timer(interval(State)),
- {noreply, State#state{timer = Timer}}
- end.
-
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-
-%% Internal functions
-
--spec cluster_changed(#state{}) -> #state{}.
-cluster_changed(#state{mod = Mod, ctx = Ctx} = State) ->
- State#state{
- last_change = os:timestamp(),
- timer = new_timer(interval(State)),
- ctx = Mod:cluster_unstable(Ctx)
- }.
-
-
--spec new_timer(non_neg_integer()) -> reference().
-new_timer(IntervalSec) ->
- erlang:send_after(IntervalSec * 1000, self(), stability_check).
-
-
-% For the first Period seconds after node boot we check cluster stability every
-% StartPeriod seconds. Once the initial Period seconds have passed we continue
-% to monitor once every Period seconds
--spec interval(#state{}) -> non_neg_integer().
-interval(#state{period = Period, start_period = StartPeriod,
- start_time = T0}) ->
- case now_diff_sec(T0) > Period of
- true ->
- % Normal operation
- Period;
- false ->
- % During startup
- StartPeriod
- end.
-
-
--spec now_diff_sec(erlang:timestamp()) -> non_neg_integer().
-now_diff_sec(Time) ->
- case timer:now_diff(os:timestamp(), Time) of
- USec when USec < 0 ->
- 0;
- USec when USec >= 0 ->
- USec / 1000000
- end.
diff --git a/src/mem3/src/mem3_epi.erl b/src/mem3/src/mem3_epi.erl
deleted file mode 100644
index 4bf2bf5d2..000000000
--- a/src/mem3/src/mem3_epi.erl
+++ /dev/null
@@ -1,51 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
--module(mem3_epi).
-
--behaviour(couch_epi_plugin).
-
--export([
- app/0,
- providers/0,
- services/0,
- data_subscriptions/0,
- data_providers/0,
- processes/0,
- notify/3
-]).
-
-app() ->
- mem3.
-
-providers() ->
- [
- {couch_db, mem3_plugin_couch_db},
- {chttpd_handlers, mem3_httpd_handlers}
- ].
-
-
-services() ->
- [].
-
-data_subscriptions() ->
- [].
-
-data_providers() ->
- [].
-
-processes() ->
- [].
-
-notify(_Key, _Old, _New) ->
- ok.
diff --git a/src/mem3/src/mem3_hash.erl b/src/mem3/src/mem3_hash.erl
deleted file mode 100644
index 665c61cb1..000000000
--- a/src/mem3/src/mem3_hash.erl
+++ /dev/null
@@ -1,73 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_hash).
-
--export([
- calculate/2,
-
- get_hash_fun/1,
-
- crc32/1
-]).
-
-
--include_lib("mem3/include/mem3.hrl").
-
-
-calculate(#shard{opts = Opts}, DocId) ->
- Props = couch_util:get_value(props, Opts, []),
- MFA = get_hash_fun_int(Props),
- calculate(MFA, DocId);
-
-calculate(#ordered_shard{opts = Opts}, DocId) ->
- Props = couch_util:get_value(props, Opts, []),
- MFA = get_hash_fun_int(Props),
- calculate(MFA, DocId);
-
-calculate(DbName, DocId) when is_binary(DbName) ->
- MFA = get_hash_fun(DbName),
- calculate(MFA, DocId);
-
-calculate({Mod, Fun, Args}, DocId) ->
- erlang:apply(Mod, Fun, [DocId | Args]).
-
-
-get_hash_fun(#shard{opts = Opts}) ->
- get_hash_fun_int(Opts);
-
-get_hash_fun(#ordered_shard{opts = Opts}) ->
- get_hash_fun_int(Opts);
-
-get_hash_fun(DbName0) when is_binary(DbName0) ->
- DbName = mem3:dbname(DbName0),
- try
- [#shard{opts=Opts} | _] = mem3_shards:for_db(DbName),
- get_hash_fun_int(couch_util:get_value(props, Opts, []))
- catch error:database_does_not_exist ->
- {?MODULE, crc32, []}
- end.
-
-
-crc32(Item) when is_binary(Item) ->
- erlang:crc32(Item);
-crc32(Item) ->
- erlang:crc32(term_to_binary(Item)).
-
-
-get_hash_fun_int(Opts) when is_list(Opts) ->
- case lists:keyfind(hash, 1, Opts) of
- {hash, [Mod, Fun, Args]} ->
- {Mod, Fun, Args};
- _ ->
- {?MODULE, crc32, []}
- end.
diff --git a/src/mem3/src/mem3_httpd.erl b/src/mem3/src/mem3_httpd.erl
deleted file mode 100644
index 3df7e1876..000000000
--- a/src/mem3/src/mem3_httpd.erl
+++ /dev/null
@@ -1,84 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_httpd).
-
--export([handle_membership_req/1, handle_shards_req/2,
- handle_sync_req/2]).
-
-%% includes
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-handle_membership_req(#httpd{method='GET',
- path_parts=[<<"_membership">>]} = Req) ->
- ClusterNodes = try mem3:nodes()
- catch _:_ -> {ok,[]} end,
- couch_httpd:send_json(Req, {[
- {all_nodes, lists:sort([node()|nodes()])},
- {cluster_nodes, lists:sort(ClusterNodes)}
- ]});
-handle_membership_req(#httpd{path_parts=[<<"_membership">>]}=Req) ->
- chttpd:send_method_not_allowed(Req, "GET").
-
-handle_shards_req(#httpd{method='GET',
- path_parts=[_DbName, <<"_shards">>]} = Req, Db) ->
- DbName = mem3:dbname(couch_db:name(Db)),
- Shards = mem3:shards(DbName),
- JsonShards = json_shards(Shards, dict:new()),
- couch_httpd:send_json(Req, {[
- {shards, JsonShards}
- ]});
-handle_shards_req(#httpd{method='GET',
- path_parts=[_DbName, <<"_shards">>, DocId]} = Req, Db) ->
- DbName = mem3:dbname(couch_db:name(Db)),
- Shards = mem3:shards(DbName, DocId),
- {[{Shard, Dbs}]} = json_shards(Shards, dict:new()),
- couch_httpd:send_json(Req, {[
- {range, Shard},
- {nodes, Dbs}
- ]});
-handle_shards_req(#httpd{path_parts=[_DbName, <<"_shards">>]}=Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "GET");
-handle_shards_req(#httpd{path_parts=[_DbName, <<"_shards">>, _DocId]}=Req, _Db) ->
- chttpd:send_method_not_allowed(Req, "GET").
-
-handle_sync_req(#httpd{method='POST',
- path_parts=[_DbName, <<"_sync_shards">>]} = Req, Db) ->
- DbName = mem3:dbname(couch_db:name(Db)),
- ShardList = [S#shard.name || S <- mem3:ushards(DbName)],
- [ sync_shard(S) || S <- ShardList ],
- chttpd:send_json(Req, 202, {[{ok, true}]});
-handle_sync_req(Req, _) ->
- chttpd:send_method_not_allowed(Req, "POST").
-
-
-%%
-%% internal
-%%
-
-json_shards([], AccIn) ->
- List = dict:to_list(AccIn),
- {lists:sort(List)};
-json_shards([#shard{node=Node, range=[B,E]} | Rest], AccIn) ->
- HexBeg = couch_util:to_hex(<<B:32/integer>>),
- HexEnd = couch_util:to_hex(<<E:32/integer>>),
- Range = list_to_binary(HexBeg ++ "-" ++ HexEnd),
- json_shards(Rest, dict:append(Range, Node, AccIn)).
-
-sync_shard(ShardName) ->
- Shards = mem3_shards:for_shard_range(ShardName),
- [rpc:call(S1#shard.node, mem3_sync, push, [S1, S2#shard.node]) ||
- S1 <- Shards, S2 <- Shards, S1 =/= S2],
- ok.
-
diff --git a/src/mem3/src/mem3_httpd_handlers.erl b/src/mem3/src/mem3_httpd_handlers.erl
deleted file mode 100644
index eeec1edf3..000000000
--- a/src/mem3/src/mem3_httpd_handlers.erl
+++ /dev/null
@@ -1,61 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_httpd_handlers).
-
--export([url_handler/1, db_handler/1, design_handler/1, handler_info/3]).
-
-url_handler(<<"_membership">>) -> fun mem3_httpd:handle_membership_req/1;
-url_handler(<<"_reshard">>) -> fun mem3_reshard_httpd:handle_reshard_req/1;
-url_handler(_) -> no_match.
-
-db_handler(<<"_shards">>) -> fun mem3_httpd:handle_shards_req/2;
-db_handler(<<"_sync_shards">>) -> fun mem3_httpd:handle_sync_req/2;
-db_handler(_) -> no_match.
-
-design_handler(_) -> no_match.
-
-handler_info('GET', [<<"_membership">>], _) ->
- {'cluster.membership.read', #{}};
-
-handler_info('GET', [<<"_reshard">>], _) ->
- {'reshard.summary.read', #{}};
-
-handler_info('GET', [<<"_reshard">>, <<"state">>], _) ->
- {'reshard.state.read', #{}};
-
-handler_info('PUT', [<<"_reshard">>, <<"state">>], _) ->
- {'reshard.state.write', #{}};
-
-handler_info('GET', [<<"_reshard">>, <<"jobs">>], _) ->
- {'reshard.jobs.read', #{}};
-
-handler_info('POST', [<<"_reshard">>, <<"jobs">>], _) ->
- {'reshard.jobs.create', #{}};
-
-handler_info('GET', [<<"_reshard">>, <<"jobs">>, JobId], _) ->
- {'reshard.job.read', #{'job.id' => JobId}};
-
-handler_info('DELETE', [<<"_reshard">>, <<"jobs">>, JobId], _) ->
- {'reshard.job.delete', #{'job.id' => JobId}};
-
-handler_info('GET', [DbName, <<"_shards">>], _) ->
- {'db.shards.read', #{'db.name' => DbName}};
-
-handler_info('GET', [DbName, <<"_shards">>, DocId], _) ->
- {'db.shards.read', #{'db.name' => DbName, 'doc.id' => DocId}};
-
-handler_info('POST', [DbName, <<"_sync_shards">>], _) ->
- {'db.shards.sync', #{'db.name' => DbName}};
-
-handler_info(_, _, _) ->
- no_match.
diff --git a/src/mem3/src/mem3_nodes.erl b/src/mem3/src/mem3_nodes.erl
deleted file mode 100644
index dd5be1a72..000000000
--- a/src/mem3/src/mem3_nodes.erl
+++ /dev/null
@@ -1,155 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_nodes).
--behaviour(gen_server).
--vsn(1).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
-
--export([start_link/0, get_nodelist/0, get_node_info/2]).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--record(state, {changes_pid, update_seq}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-get_nodelist() ->
- try
- lists:sort([N || {N,_} <- ets:tab2list(?MODULE)])
- catch error:badarg ->
- gen_server:call(?MODULE, get_nodelist)
- end.
-
-get_node_info(Node, Key) ->
- try
- couch_util:get_value(Key, ets:lookup_element(?MODULE, Node, 2))
- catch error:badarg ->
- gen_server:call(?MODULE, {get_node_info, Node, Key})
- end.
-
-init([]) ->
- ets:new(?MODULE, [named_table, {read_concurrency, true}]),
- UpdateSeq = initialize_nodelist(),
- {Pid, _} = spawn_monitor(fun() -> listen_for_changes(UpdateSeq) end),
- {ok, #state{changes_pid = Pid, update_seq = UpdateSeq}}.
-
-handle_call(get_nodelist, _From, State) ->
- {reply, lists:sort([N || {N,_} <- ets:tab2list(?MODULE)]), State};
-handle_call({get_node_info, Node, Key}, _From, State) ->
- Resp = try
- couch_util:get_value(Key, ets:lookup_element(?MODULE, Node, 2))
- catch error:badarg ->
- error
- end,
- {reply, Resp, State};
-handle_call({add_node, Node, NodeInfo}, _From, State) ->
- gen_event:notify(mem3_events, {add_node, Node}),
- ets:insert(?MODULE, {Node, NodeInfo}),
- {reply, ok, State};
-handle_call({remove_node, Node}, _From, State) ->
- gen_event:notify(mem3_events, {remove_node, Node}),
- ets:delete(?MODULE, Node),
- {reply, ok, State};
-handle_call(_Call, _From, State) ->
- {noreply, State}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid=Pid} = State) ->
- couch_log:notice("~p changes listener died ~p", [?MODULE, Reason]),
- StartSeq = State#state.update_seq,
- Seq = case Reason of {seq, EndSeq} -> EndSeq; _ -> StartSeq end,
- erlang:send_after(5000, self(), start_listener),
- {noreply, State#state{update_seq = Seq}};
-handle_info(start_listener, #state{update_seq = Seq} = State) ->
- {NewPid, _} = spawn_monitor(fun() -> listen_for_changes(Seq) end),
- {noreply, State#state{changes_pid=NewPid}};
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, #state{}=State, _Extra) ->
- {ok, State}.
-
-%% internal functions
-
-initialize_nodelist() ->
- DbName = config:get("mem3", "nodes_db", "_nodes"),
- {ok, Db} = mem3_util:ensure_exists(DbName),
- {ok, _} = couch_db:fold_docs(Db, fun first_fold/2, Db, []),
- insert_if_missing(Db, [node() | mem3_seeds:get_seeds()]),
- Seq = couch_db:get_update_seq(Db),
- couch_db:close(Db),
- Seq.
-
-first_fold(#full_doc_info{id = <<"_design/", _/binary>>}, Acc) ->
- {ok, Acc};
-first_fold(#full_doc_info{deleted=true}, Acc) ->
- {ok, Acc};
-first_fold(#full_doc_info{id=Id}=DocInfo, Db) ->
- {ok, #doc{body={Props}}} = couch_db:open_doc(Db, DocInfo, [ejson_body]),
- ets:insert(?MODULE, {mem3_util:to_atom(Id), Props}),
- {ok, Db}.
-
-listen_for_changes(Since) ->
- DbName = config:get("mem3", "nodes_db", "_nodes"),
- {ok, Db} = mem3_util:ensure_exists(DbName),
- Args = #changes_args{
- feed = "continuous",
- since = Since,
- heartbeat = true,
- include_docs = true
- },
- ChangesFun = couch_changes:handle_db_changes(Args, nil, Db),
- ChangesFun(fun changes_callback/2).
-
-changes_callback(start, _) ->
- {ok, nil};
-changes_callback({stop, EndSeq}, _) ->
- exit({seq, EndSeq});
-changes_callback({change, {Change}, _}, _) ->
- Node = couch_util:get_value(<<"id">>, Change),
- case Node of <<"_design/", _/binary>> -> ok; _ ->
- case mem3_util:is_deleted(Change) of
- false ->
- {Props} = couch_util:get_value(doc, Change),
- gen_server:call(?MODULE, {add_node, mem3_util:to_atom(Node), Props});
- true ->
- gen_server:call(?MODULE, {remove_node, mem3_util:to_atom(Node)})
- end
- end,
- {ok, couch_util:get_value(<<"seq">>, Change)};
-changes_callback(timeout, _) ->
- {ok, nil}.
-
-insert_if_missing(Db, Nodes) ->
- Docs = lists:foldl(fun(Node, Acc) ->
- case ets:lookup(?MODULE, Node) of
- [_] ->
- Acc;
- [] ->
- ets:insert(?MODULE, {Node, []}),
- [#doc{id = couch_util:to_binary(Node)} | Acc]
- end
- end, [], Nodes),
- if Docs =/= [] ->
- {ok, _} = couch_db:update_docs(Db, Docs, []);
- true ->
- {ok, []}
- end.
diff --git a/src/mem3/src/mem3_plugin_couch_db.erl b/src/mem3/src/mem3_plugin_couch_db.erl
deleted file mode 100644
index 8cb5d7898..000000000
--- a/src/mem3/src/mem3_plugin_couch_db.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_plugin_couch_db).
-
--export([
- is_valid_purge_client/2
-]).
-
-
-is_valid_purge_client(DbName, Props) ->
- mem3_rep:verify_purge_checkpoint(DbName, Props).
diff --git a/src/mem3/src/mem3_rep.erl b/src/mem3/src/mem3_rep.erl
deleted file mode 100644
index 7fa0fc027..000000000
--- a/src/mem3/src/mem3_rep.erl
+++ /dev/null
@@ -1,998 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_rep).
-
-
--export([
- go/2,
- go/3,
- make_local_id/2,
- make_local_id/3,
- make_purge_id/2,
- verify_purge_checkpoint/2,
- find_source_seq/4,
- find_split_target_seq/4,
- local_id_hash/1
-]).
-
--export([
- changes_enumerator/2
-]).
-
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--record(acc, {
- batch_size,
- batch_count,
- seq = 0,
- revcount = 0,
- source,
- targets,
- filter,
- db,
- hashfun,
- incomplete_ranges
-}).
-
--record(tgt, {
- shard,
- seq = 0,
- infos = [],
- localid,
- purgeid,
- history = {[]},
- remaining = 0
-}).
-
-go(Source, Target) ->
- go(Source, Target, []).
-
-
-go(DbName, Node, Opts) when is_binary(DbName), is_atom(Node) ->
- go(#shard{name=DbName, node=node()}, #shard{name=DbName, node=Node}, Opts);
-
-go(#shard{} = Source, #shard{} = Target, Opts) ->
- case mem3:db_is_current(Source) of
- true ->
- go(Source, targets_map(Source, Target), Opts);
- false ->
- % Database could have been recreated
- {error, missing_source}
- end;
-
-go(#shard{} = Source, #{} = Targets0, Opts) when map_size(Targets0) > 0 ->
- Targets = maps:map(fun(_, T) -> #tgt{shard = T} end, Targets0),
- case couch_server:exists(Source#shard.name) of
- true ->
- sync_security(Source, Targets),
- BatchSize = case proplists:get_value(batch_size, Opts) of
- BS when is_integer(BS), BS > 0 -> BS;
- _ -> 100
- end,
- BatchCount = case proplists:get_value(batch_count, Opts) of
- all -> all;
- BC when is_integer(BC), BC > 0 -> BC;
- _ -> 1
- end,
- IncompleteRanges = config:get_boolean("mem3", "incomplete_ranges",
- false),
- Filter = proplists:get_value(filter, Opts),
- Acc = #acc{
- batch_size = BatchSize,
- batch_count = BatchCount,
- source = Source,
- targets = Targets,
- filter = Filter,
- incomplete_ranges = IncompleteRanges
- },
- go(Acc);
- false ->
- {error, missing_source}
- end.
-
-
-go(#acc{source=Source, batch_count=BC}=Acc) ->
- case couch_db:open(Source#shard.name, [?ADMIN_CTX]) of
- {ok, Db} ->
- Resp = try
- HashFun = mem3_hash:get_hash_fun(couch_db:name(Db)),
- repl(Acc#acc{db = Db, hashfun = HashFun})
- catch
- error:{error, missing_source} ->
- {error, missing_source};
- error:{not_found, no_db_file} ->
- {error, missing_target}
- after
- couch_db:close(Db)
- end,
- case Resp of
- {ok, P} when P > 0, BC == all ->
- go(Acc);
- {ok, P} when P > 0, BC > 1 ->
- go(Acc#acc{batch_count=BC-1});
- Else ->
- Else
- end;
- {not_found, no_db_file} ->
- {error, missing_source}
- end.
-
-
-make_local_id(Source, Target) ->
- make_local_id(Source, Target, undefined).
-
-
-make_local_id(#shard{node=SourceNode}, #shard{node=TargetNode}, Filter) ->
- make_local_id(SourceNode, TargetNode, Filter);
-
-make_local_id(SourceThing, TargetThing, F) when is_binary(F) ->
- S = local_id_hash(SourceThing),
- T = local_id_hash(TargetThing),
- <<"_local/shard-sync-", S/binary, "-", T/binary, F/binary>>;
-
-make_local_id(SourceThing, TargetThing, Filter) ->
- S = local_id_hash(SourceThing),
- T = local_id_hash(TargetThing),
- F = filter_hash(Filter),
- <<"_local/shard-sync-", S/binary, "-", T/binary, F/binary>>.
-
-
-filter_hash(Filter) when is_function(Filter) ->
- {new_uniq, Hash} = erlang:fun_info(Filter, new_uniq),
- B = couch_util:encodeBase64Url(Hash),
- <<"-", B/binary>>;
-
-filter_hash(_) ->
- <<>>.
-
-
-local_id_hash(Thing) ->
- couch_util:encodeBase64Url(couch_hash:md5_hash(term_to_binary(Thing))).
-
-
-make_purge_id(SourceUUID, TargetUUID) ->
- <<"_local/purge-mem3-", SourceUUID/binary, "-", TargetUUID/binary>>.
-
-
-verify_purge_checkpoint(DbName, Props) ->
- try
- Type = couch_util:get_value(<<"type">>, Props),
- if Type =/= <<"internal_replication">> -> false; true ->
- SourceBin = couch_util:get_value(<<"source">>, Props),
- TargetBin = couch_util:get_value(<<"target">>, Props),
- Range = couch_util:get_value(<<"range">>, Props),
-
- Source = binary_to_existing_atom(SourceBin, latin1),
- Target = binary_to_existing_atom(TargetBin, latin1),
-
- try
- Nodes = lists:foldl(fun(Shard, Acc) ->
- case Shard#shard.range == Range of
- true -> [Shard#shard.node | Acc];
- false -> Acc
- end
- end, [], mem3:shards(DbName)),
- lists:member(Source, Nodes) andalso lists:member(Target, Nodes)
- catch
- error:database_does_not_exist ->
- false
- end
- end
- catch _:_ ->
- false
- end.
-
-
-%% @doc Find and return the largest update_seq in SourceDb
-%% that the client has seen from TargetNode.
-%%
-%% When reasoning about this function it is very important to
-%% understand the direction of replication for this comparison.
-%% We're only interesting in internal replications initiated
-%% by this node to the node being replaced. When doing a
-%% replacement the most important thing is that the client doesn't
-%% miss any updates. This means we can only fast-forward as far
-%% as they've seen updates on this node. We can detect that by
-%% looking for our push replication history and choosing the
-%% largest source_seq that has a target_seq =< TgtSeq.
-find_source_seq(SrcDb, TgtNode, TgtUUIDPrefix, TgtSeq) ->
- case find_repl_doc(SrcDb, TgtUUIDPrefix) of
- {ok, TgtUUID, Doc} ->
- SrcNode = atom_to_binary(node(), utf8),
- find_source_seq_int(Doc, SrcNode, TgtNode, TgtUUID, TgtSeq);
- {not_found, _} ->
- couch_log:warning("~p find_source_seq repl doc not_found "
- "src_db: ~p, tgt_node: ~p, tgt_uuid_prefix: ~p, tgt_seq: ~p",
- [?MODULE, SrcDb, TgtNode, TgtUUIDPrefix, TgtSeq]),
- 0
- end.
-
-
-find_source_seq_int(#doc{body={Props}}, SrcNode0, TgtNode0, TgtUUID, TgtSeq) ->
- SrcNode = case is_atom(SrcNode0) of
- true -> atom_to_binary(SrcNode0, utf8);
- false -> SrcNode0
- end,
- TgtNode = case is_atom(TgtNode0) of
- true -> atom_to_binary(TgtNode0, utf8);
- false -> TgtNode0
- end,
- % This is split off purely for the ability to run unit tests
- % against this bit of code without requiring all sorts of mocks.
- {History} = couch_util:get_value(<<"history">>, Props, {[]}),
- SrcHistory = couch_util:get_value(SrcNode, History, []),
- UseableHistory = lists:filter(fun({Entry}) ->
- couch_util:get_value(<<"target_node">>, Entry) =:= TgtNode andalso
- couch_util:get_value(<<"target_uuid">>, Entry) =:= TgtUUID andalso
- couch_util:get_value(<<"target_seq">>, Entry) =< TgtSeq
- end, SrcHistory),
-
- % This relies on SrcHistory being ordered desceding by source
- % sequence.
- case UseableHistory of
- [{Entry} | _] ->
- couch_util:get_value(<<"source_seq">>, Entry);
- [] ->
- couch_log:warning("~p find_source_seq_int nil useable history "
- "src_node: ~p, tgt_node: ~p, tgt_uuid: ~p, tgt_seq: ~p, "
- "src_history: ~p",
- [?MODULE, SrcNode, TgtNode, TgtUUID, TgtSeq, SrcHistory]),
- 0
- end.
-
-
-find_split_target_seq(TgtDb, SrcNode0, SrcUUIDPrefix, SrcSeq) ->
- SrcNode = case is_atom(SrcNode0) of
- true -> atom_to_binary(SrcNode0, utf8);
- false -> SrcNode0
- end,
- case find_split_target_seq_int(TgtDb, SrcNode, SrcUUIDPrefix) of
- {ok, [{BulkCopySeq, BulkCopySeq} | _]} when SrcSeq =< BulkCopySeq ->
- % Check if source sequence is at or below the initial bulk copy
- % checkpointed sequence. That sequence or anything lower than it
- % can be directly replaced with the same value for each target. For
- % extra safety we assert that the initial source and target
- % sequences are the same value
- SrcSeq;
- {ok, Seqs= [{_, _} | _]} ->
- % Pick the target sequence for the greatest source sequence that is
- % less than `SrcSeq`.
- case lists:takewhile(fun({Seq, _}) -> Seq < SrcSeq end, Seqs) of
- [] ->
- couch_log:warning("~p find_split_target_seq target seq not found "
- "tgt_db: ~p, src_uuid_prefix: ~p, src_seq: ~p",
- [?MODULE, couch_db:name(TgtDb), SrcUUIDPrefix, SrcSeq]),
- 0;
- [{_, _} | _] = Seqs1 ->
- {_, TSeq} = lists:last(Seqs1),
- TSeq
- end;
- {not_found, _} ->
- couch_log:warning("~p find_split_target_seq target seq not found "
- "tgt_db: ~p, src_uuid_prefix: ~p, src_seq: ~p",
- [?MODULE, couch_db:name(TgtDb), SrcUUIDPrefix, SrcSeq]),
- 0
- end.
-
-
-repl(#acc{db = Db0} = Acc0) ->
- erlang:put(io_priority, {internal_repl, couch_db:name(Db0)}),
- Acc1 = calculate_start_seq_multi(Acc0),
- try
- Acc3 = case config:get_boolean("mem3", "replicate_purges", false) of
- true ->
- Acc2 = pull_purges_multi(Acc1),
- push_purges_multi(Acc2);
- false ->
- Acc1
- end,
- push_changes(Acc3)
- catch
- throw:{finished, Count} ->
- {ok, Count}
- end.
-
-
-pull_purges_multi(#acc{source = Source} = Acc0) ->
- #acc{batch_size = Count, seq = UpdateSeq, targets = Targets0} = Acc0,
- with_src_db(Acc0, fun(Db) ->
- Targets = maps:map(fun(_, #tgt{} = T) ->
- pull_purges(Db, Count, Source, T)
- end, reset_remaining(Targets0)),
- Remaining = maps:fold(fun(_, #tgt{remaining = R}, Sum) ->
- Sum + R
- end, 0, Targets),
- if Remaining == 0 -> Acc0#acc{targets = Targets}; true ->
- PurgeSeq = couch_db:get_purge_seq(Db),
- OldestPurgeSeq = couch_db:get_oldest_purge_seq(Db),
- PurgesToPush = PurgeSeq - OldestPurgeSeq,
- Changes = couch_db:count_changes_since(Db, UpdateSeq),
- Pending = Remaining + PurgesToPush + Changes,
- throw({finished, Pending})
- end
- end).
-
-
-pull_purges(Db, Count, SrcShard, #tgt{} = Tgt0) ->
- #tgt{shard = TgtShard} = Tgt0,
- SrcUUID = couch_db:get_uuid(Db),
- #shard{node = TgtNode, name = TgtDbName} = TgtShard,
- {LocalPurgeId, Infos, ThroughSeq, Remaining} =
- mem3_rpc:load_purge_infos(TgtNode, TgtDbName, SrcUUID, Count),
- Tgt = Tgt0#tgt{purgeid = LocalPurgeId},
- if Infos == [] -> ok; true ->
- {ok, _} = couch_db:purge_docs(Db, Infos, [replicated_edits]),
- Body = purge_cp_body(SrcShard, TgtShard, ThroughSeq),
- mem3_rpc:save_purge_checkpoint(TgtNode, TgtDbName, LocalPurgeId, Body)
- end,
- Tgt#tgt{remaining = max(0, Remaining)}.
-
-
-push_purges_multi(#acc{source = SrcShard} = Acc) ->
- #acc{batch_size = BatchSize, seq = UpdateSeq, targets = Targets0} = Acc,
- with_src_db(Acc, fun(Db) ->
- Targets = maps:map(fun(_, #tgt{} = T) ->
- push_purges(Db, BatchSize, SrcShard, T)
- end, reset_remaining(Targets0)),
- Remaining = maps:fold(fun(_, #tgt{remaining = R}, Sum) ->
- Sum + R
- end, 0, Targets),
- if Remaining == 0 -> Acc#acc{targets = Targets}; true ->
- Changes = couch_db:count_changes_since(Db, UpdateSeq),
- throw({finished, Remaining + Changes})
- end
- end).
-
-
-push_purges(Db, BatchSize, SrcShard, Tgt) ->
- #tgt{shard = TgtShard, purgeid = LocalPurgeId} = Tgt,
- #shard{node = TgtNode, name = TgtDbName} = TgtShard,
- StartSeq = case couch_db:open_doc(Db, LocalPurgeId, []) of
- {ok, #doc{body = {Props}}} ->
- couch_util:get_value(<<"purge_seq">>, Props);
- {not_found, _} ->
- Oldest = couch_db:get_oldest_purge_seq(Db),
- erlang:max(0, Oldest - 1)
- end,
- FoldFun = fun({PSeq, UUID, Id, Revs}, {Count, Infos, _}) ->
- NewCount = Count + length(Revs),
- NewInfos = [{UUID, Id, Revs} | Infos],
- Status = if NewCount < BatchSize -> ok; true -> stop end,
- {Status, {NewCount, NewInfos, PSeq}}
- end,
- InitAcc = {0, [], StartSeq},
- {ok, {_, Infos, ThroughSeq}} =
- couch_db:fold_purge_infos(Db, StartSeq, FoldFun, InitAcc),
- if Infos == [] -> ok; true ->
- ok = purge_on_target(TgtNode, TgtDbName, Infos),
- Body = purge_cp_body(SrcShard, TgtShard, ThroughSeq),
- Doc = #doc{id = LocalPurgeId, body = Body},
- {ok, _} = couch_db:update_doc(Db, Doc, [])
- end,
- Tgt#tgt{remaining = max(0, couch_db:get_purge_seq(Db) - ThroughSeq)}.
-
-
-calculate_start_seq_multi(#acc{} = Acc) ->
- #acc{db = Db, targets = Targets0, filter = Filter} = Acc,
- FilterHash = filter_hash(Filter),
- Targets = maps:map(fun(_, #tgt{} = T) ->
- calculate_start_seq(Db, FilterHash, T)
- end, Targets0),
- % There will always be at least one target
- #tgt{seq = Seq0} = hd(maps:values(Targets)),
- Seq = maps:fold(fun(_, #tgt{seq = S}, M) -> min(S, M) end, Seq0, Targets),
- Acc#acc{seq = Seq, targets = Targets}.
-
-
-calculate_start_seq(Db, FilterHash, #tgt{shard = TgtShard} = Tgt) ->
- UUID = couch_db:get_uuid(Db),
- #shard{node = Node, name = Name} = TgtShard,
- {NewDocId, Doc} = mem3_rpc:load_checkpoint(Node, Name, node(), UUID,
- FilterHash),
- #doc{id=FoundId, body={TProps}} = Doc,
- Tgt1 = Tgt#tgt{localid = NewDocId},
- % NewDocId and FoundId may be different the first time
- % this code runs to save our newly named internal replication
- % checkpoints. We store NewDocId to use when saving checkpoints
- % but use FoundId to reuse the same docid that the target used.
- case couch_db:open_doc(Db, FoundId, [ejson_body]) of
- {ok, #doc{body = {SProps}}} ->
- SourceSeq = couch_util:get_value(<<"seq">>, SProps, 0),
- TargetSeq = couch_util:get_value(<<"seq">>, TProps, 0),
- % We resume from the lower update seq stored in the two
- % shard copies. We also need to be sure and use the
- % corresponding history. A difference here could result
- % from either a write failure on one of the nodes or if
- % either shard was truncated by an operator.
- case SourceSeq =< TargetSeq of
- true ->
- Seq = SourceSeq,
- History = couch_util:get_value(<<"history">>, SProps, {[]});
- false ->
- Seq = TargetSeq,
- History = couch_util:get_value(<<"history">>, TProps, {[]})
- end,
- Tgt1#tgt{seq = Seq, history = History};
- {not_found, _} ->
- compare_epochs(Db, Tgt1)
- end.
-
-
-push_changes(#acc{} = Acc0) ->
- #acc{
- db = Db0,
- seq = Seq
- } = Acc0,
-
- % Avoid needless rewriting the internal replication
- % checkpoint document if nothing is replicated.
- UpdateSeq = couch_db:get_update_seq(Db0),
- if Seq < UpdateSeq -> ok; true ->
- throw({finished, 0})
- end,
-
- with_src_db(Acc0, fun(Db) ->
- Acc1 = Acc0#acc{db = Db},
- Fun = fun ?MODULE:changes_enumerator/2,
- {ok, Acc2} = couch_db:fold_changes(Db, Seq, Fun, Acc1),
- {ok, #acc{seq = LastSeq}} = replicate_batch_multi(Acc2),
- {ok, couch_db:count_changes_since(Db, LastSeq)}
- end).
-
-
-compare_epochs(Db, #tgt{shard = TgtShard} = Tgt) ->
- #shard{node = Node, name = Name} = TgtShard,
- UUID = couch_db:get_uuid(Db),
- Epochs = couch_db:get_epochs(Db),
- Seq = mem3_rpc:find_common_seq(Node, Name, UUID, Epochs),
- Tgt#tgt{seq = Seq, history = {[]}}.
-
-
-changes_enumerator(#doc_info{id=DocId}, #acc{db=Db}=Acc) ->
- {ok, FDI} = couch_db:get_full_doc_info(Db, DocId),
- changes_enumerator(FDI, Acc);
-changes_enumerator(#full_doc_info{}=FDI, #acc{}=Acc0) ->
- #acc{
- revcount = C,
- targets = Targets0,
- hashfun = HashFun,
- incomplete_ranges = IncompleteRanges
- } = Acc0,
- #doc_info{high_seq=Seq, revs=Revs} = couch_doc:to_doc_info(FDI),
- {Count, Targets} = case filter_doc(Acc0#acc.filter, FDI) of
- keep ->
- NewTargets = changes_append_fdi(FDI, Targets0, HashFun,
- IncompleteRanges),
- {C + length(Revs), NewTargets};
- discard ->
- {C, Targets0}
- end,
- Acc1 = Acc0#acc{seq = Seq, revcount = Count, targets = Targets},
- Go = if Count < Acc1#acc.batch_size -> ok; true -> stop end,
- {Go, Acc1}.
-
-
-changes_append_fdi(#full_doc_info{id = Id} = FDI, Targets, HashFun,
- IncompleteRanges) ->
- case mem3_reshard_job:pickfun(Id, maps:keys(Targets), HashFun) of
- not_in_range when IncompleteRanges ->
- Targets;
- not_in_range when not IncompleteRanges ->
- ErrMsg = "~p : ~p not in any target ranges: ~p",
- TShards = [TS || #tgt{shard = TS} <- maps:values(Targets)],
- TNames = [TN || #shard{name = TN} <- TShards],
- couch_log:error(ErrMsg, [?MODULE, Id, TNames]),
- error({error, {Id, not_in_target_ranges}});
- Key ->
- maps:update_with(Key, fun(#tgt{infos = Infos} = T) ->
- T#tgt{infos = [FDI | Infos]}
- end, Targets)
- end.
-
-
-replicate_batch_multi(#acc{targets = Targets0, seq = Seq, db = Db} = Acc) ->
- Targets = maps:map(fun(_, #tgt{} = T) ->
- replicate_batch(T, Db, Seq)
- end, Targets0),
- {ok, Acc#acc{targets = Targets, revcount = 0}}.
-
-
-replicate_batch(#tgt{shard = TgtShard, infos = Infos} = Target, Db, Seq) ->
- #shard{node = Node, name = Name} = TgtShard,
- case find_missing_revs(Target) of
- [] ->
- ok;
- Missing ->
- lists:map(fun(Chunk) ->
- Docs = open_docs(Db, Infos, Chunk),
- ok = save_on_target(Node, Name, Docs)
- end, chunk_revs(Missing))
- end,
- update_locals(Target, Db, Seq),
- Target#tgt{infos = []}.
-
-
-find_missing_revs(#tgt{shard = TgtShard, infos = Infos}) ->
- #shard{node = Node, name = Name} = TgtShard,
- IdsRevs = lists:map(fun(FDI) ->
- #doc_info{id=Id, revs=RevInfos} = couch_doc:to_doc_info(FDI),
- {Id, [R || #rev_info{rev=R} <- RevInfos]}
- end, Infos),
- Missing = mem3_rpc:get_missing_revs(Node, Name, IdsRevs, [
- {io_priority, {internal_repl, Name}},
- ?ADMIN_CTX
- ]),
- lists:filter(fun
- ({_Id, [], _Ancestors}) -> false;
- ({_Id, _Revs, _Ancestors}) -> true
- end, Missing).
-
-
-chunk_revs(Revs) ->
- Limit = list_to_integer(config:get("mem3", "rev_chunk_size", "5000")),
- chunk_revs(Revs, Limit).
-
-chunk_revs(Revs, Limit) ->
- chunk_revs(Revs, {0, []}, [], Limit).
-
-chunk_revs([], {_Count, Chunk}, Chunks, _Limit) ->
- [Chunk|Chunks];
-chunk_revs([{Id, R, A}|Revs], {Count, Chunk}, Chunks, Limit) when length(R) =< Limit - Count ->
- chunk_revs(
- Revs,
- {Count + length(R), [{Id, R, A}|Chunk]},
- Chunks,
- Limit
- );
-chunk_revs([{Id, R, A}|Revs], {Count, Chunk}, Chunks, Limit) ->
- {This, Next} = lists:split(Limit - Count, R),
- chunk_revs(
- [{Id, Next, A}|Revs],
- {0, []},
- [[{Id, This, A}|Chunk]|Chunks],
- Limit
- ).
-
-
-open_docs(Db, Infos, Missing) ->
- lists:flatmap(fun({Id, Revs, _}) ->
- FDI = lists:keyfind(Id, #full_doc_info.id, Infos),
- #full_doc_info{rev_tree=RevTree} = FDI,
- {FoundRevs, _} = couch_key_tree:get_key_leafs(RevTree, Revs),
- lists:map(fun({#leaf{deleted=IsDel, ptr=SummaryPtr}, FoundRevPath}) ->
- couch_db:make_doc(Db, Id, IsDel, SummaryPtr, FoundRevPath)
- end, FoundRevs)
- end, Missing).
-
-
-save_on_target(Node, Name, Docs) ->
- mem3_rpc:update_docs(Node, Name, Docs, [
- replicated_changes,
- full_commit,
- ?ADMIN_CTX,
- {io_priority, {internal_repl, Name}}
- ]),
- ok.
-
-
-purge_on_target(Node, Name, PurgeInfos) ->
- mem3_rpc:purge_docs(Node, Name, PurgeInfos, [
- replicated_changes,
- full_commit,
- ?ADMIN_CTX,
- {io_priority, {internal_repl, Name}}
- ]),
- ok.
-
-
-update_locals(Target, Db, Seq) ->
- #tgt{shard = TgtShard, localid = Id, history = History} = Target,
- #shard{node = Node, name = Name} = TgtShard,
- NewEntry = [
- {<<"source_node">>, atom_to_binary(node(), utf8)},
- {<<"source_uuid">>, couch_db:get_uuid(Db)},
- {<<"source_seq">>, Seq},
- {<<"timestamp">>, list_to_binary(mem3_util:iso8601_timestamp())}
- ],
- NewBody = mem3_rpc:save_checkpoint(Node, Name, Id, Seq, NewEntry, History),
- {ok, _} = couch_db:update_doc(Db, #doc{id = Id, body = NewBody}, []).
-
-
-purge_cp_body(#shard{} = Source, #shard{} = Target, PurgeSeq) ->
- {Mega, Secs, _} = os:timestamp(),
- NowSecs = Mega * 1000000 + Secs,
- {[
- {<<"type">>, <<"internal_replication">>},
- {<<"updated_on">>, NowSecs},
- {<<"purge_seq">>, PurgeSeq},
- {<<"source">>, atom_to_binary(Source#shard.node, latin1)},
- {<<"target">>, atom_to_binary(Target#shard.node, latin1)},
- {<<"range">>, Source#shard.range}
- ]}.
-
-
-find_repl_doc(SrcDb, TgtUUIDPrefix) ->
- SrcUUID = couch_db:get_uuid(SrcDb),
- S = local_id_hash(SrcUUID),
- DocIdPrefix = <<"_local/shard-sync-", S/binary, "-">>,
- FoldFun = fun(#doc{id = DocId, body = {BodyProps}} = Doc, _) ->
- TgtUUID = couch_util:get_value(<<"target_uuid">>, BodyProps, <<>>),
- case is_prefix(DocIdPrefix, DocId) of
- true ->
- case is_prefix(TgtUUIDPrefix, TgtUUID) of
- true ->
- {stop, {TgtUUID, Doc}};
- false ->
- {ok, not_found}
- end;
- _ ->
- {stop, not_found}
- end
- end,
- Options = [{start_key, DocIdPrefix}],
- case couch_db:fold_local_docs(SrcDb, FoldFun, not_found, Options) of
- {ok, {TgtUUID, Doc}} ->
- {ok, TgtUUID, Doc};
- {ok, not_found} ->
- {not_found, missing};
- Else ->
- couch_log:error("Error finding replication doc: ~w", [Else]),
- {not_found, missing}
- end.
-
-
-find_split_target_seq_int(TgtDb, Node, SrcUUIDPrefix) ->
- TgtUUID = couch_db:get_uuid(TgtDb),
- FoldFun = fun(#doc{body = {Props}}, _) ->
- DocTgtUUID = couch_util:get_value(<<"target_uuid">>, Props, <<>>),
- case TgtUUID == DocTgtUUID of
- true ->
- {History} = couch_util:get_value(<<"history">>, Props, {[]}),
- HProps = couch_util:get_value(Node, History, []),
- case get_target_seqs(HProps, TgtUUID, Node, SrcUUIDPrefix, []) of
- [] ->
- % No replication found from source to target
- {ok, not_found};
- [{_, _} | _] = SeqPairs ->
- % Found shared replicated history from source to target
- % Return sorted list by the earliest source sequence
- {stop, lists:sort(SeqPairs)}
- end;
- false ->
- {ok, not_found}
- end
- end,
- Options = [{start_key, <<"_local/shard-sync-">>}],
- case couch_db:fold_local_docs(TgtDb, FoldFun, not_found, Options) of
- {ok, Seqs} when is_list(Seqs) ->
- {ok, Seqs};
- {ok, not_found} ->
- {not_found, missing};
- Else ->
- couch_log:error("Error finding replication doc: ~w", [Else]),
- {not_found, missing}
- end.
-
-
-% Get target sequences for each checkpoint when source replicated to the target
-% The "target" is the current db where the history entry was read from and "source"
-% is another, now possibly deleted, database.
-get_target_seqs([], _TgtUUID, _Node, _SrcUUIDPrefix, Acc) ->
- lists:reverse(Acc);
-
-get_target_seqs([{Entry} | HProps], TgtUUID, Node, SrcUUIDPrefix, Acc) ->
- SameTgt = couch_util:get_value(<<"target_uuid">>, Entry) =:= TgtUUID,
- SameNode = couch_util:get_value(<<"target_node">>, Entry) =:= Node,
- SrcUUID = couch_util:get_value(<<"source_uuid">>, Entry),
- IsPrefix = is_prefix(SrcUUIDPrefix, SrcUUID),
- Acc1 = case SameTgt andalso SameNode andalso IsPrefix of
- true ->
- EntrySourceSeq = couch_util:get_value(<<"source_seq">>, Entry),
- EntryTargetSeq = couch_util:get_value(<<"target_seq">>, Entry),
- [{EntrySourceSeq, EntryTargetSeq} | Acc];
- false ->
- Acc
- end,
- get_target_seqs(HProps, TgtUUID, Node, SrcUUIDPrefix, Acc1).
-
-
-with_src_db(#acc{source = Source}, Fun) ->
- case couch_db:open(Source#shard.name, [?ADMIN_CTX]) of
- {ok, Db} ->
- try
- Fun(Db)
- after
- couch_db:close(Db)
- end;
- {not_found, _} ->
- error({error, missing_source})
- end.
-
-
-is_prefix(Prefix, Subject) ->
- binary:longest_common_prefix([Prefix, Subject]) == size(Prefix).
-
-
-filter_doc(Filter, FullDocInfo) when is_function(Filter) ->
- try Filter(FullDocInfo) of
- discard -> discard;
- _ -> keep
- catch _:_ ->
- keep
- end;
-filter_doc(_, _) ->
- keep.
-
-
-sync_security(#shard{} = Source, #{} = Targets) ->
- maps:map(fun(_, #tgt{shard = Target}) ->
- mem3_sync_security:maybe_sync(Source, Target)
- end, Targets).
-
-
-targets_map(#shard{name = <<"shards/", _/binary>> = SrcName} = Src,
- #shard{name = <<"shards/", _/binary>>, node = TgtNode} = Tgt) ->
- % Parse range from name in case the passed shard is built with a name only
- SrcRange = mem3:range(SrcName),
- Shards0 = mem3:shards(mem3:dbname(SrcName)),
- Shards1 = [S || S <- Shards0, not shard_eq(S, Src)],
- Shards2 = [S || S <- Shards1, check_overlap(SrcRange, TgtNode, S)],
- case [{R, S} || #shard{range = R} = S <- Shards2] of
- [] ->
- % If target map is empty, create a target map with just
- % that one target. This is to support tooling which may be
- % moving / copying shards using mem3:go/2,3 before the
- % shards are present in the shard map
- #{mem3:range(SrcName) => Tgt};
- [_ | _] = TMapList->
- maps:from_list(TMapList)
- end;
-
-
-targets_map(_Src, Tgt) ->
- #{[0, ?RING_END] => Tgt}.
-
-
-shard_eq(#shard{name = Name, node = Node}, #shard{name = Name, node = Node}) ->
- true;
-
-shard_eq(_, _) ->
- false.
-
-
-check_overlap(SrcRange, Node, #shard{node = Node, range = TgtRange}) ->
- mem3_util:range_overlap(SrcRange, TgtRange);
-
-check_overlap([_, _], _, #shard{}) ->
- false.
-
-
-reset_remaining(#{} = Targets) ->
- maps:map(fun(_, #tgt{} = T) ->
- T#tgt{remaining = 0}
- end, Targets).
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-
-find_source_seq_unknown_node_test() ->
- ?assertEqual(
- find_source_seq_int(doc_(), <<"foo">>, <<"bing">>, <<"bar_uuid">>, 10),
- 0
- ).
-
-
-find_source_seq_unknown_uuid_test() ->
- ?assertEqual(
- find_source_seq_int(doc_(), <<"foo">>, <<"bar">>, <<"teapot">>, 10),
- 0
- ).
-
-
-find_source_seq_ok_test() ->
- ?assertEqual(
- find_source_seq_int(doc_(), <<"foo">>, <<"bar">>, <<"bar_uuid">>, 100),
- 100
- ).
-
-
-find_source_seq_old_ok_test() ->
- ?assertEqual(
- find_source_seq_int(doc_(), <<"foo">>, <<"bar">>, <<"bar_uuid">>, 84),
- 50
- ).
-
-
-find_source_seq_different_node_test() ->
- ?assertEqual(
- find_source_seq_int(doc_(), <<"foo2">>, <<"bar">>, <<"bar_uuid">>, 92),
- 31
- ).
-
-
--define(SNODE, <<"source_node">>).
--define(SUUID, <<"source_uuid">>).
--define(SSEQ, <<"source_seq">>).
--define(TNODE, <<"target_node">>).
--define(TUUID, <<"target_uuid">>).
--define(TSEQ, <<"target_seq">>).
-
-doc_() ->
- Foo_Bar = [
- {[
- {?SNODE, <<"foo">>}, {?SUUID, <<"foo_uuid">>}, {?SSEQ, 100},
- {?TNODE, <<"bar">>}, {?TUUID, <<"bar_uuid">>}, {?TSEQ, 100}
- ]},
- {[
- {?SNODE, <<"foo">>}, {?SUUID, <<"foo_uuid">>}, {?SSEQ, 90},
- {?TNODE, <<"bar">>}, {?TUUID, <<"bar_uuid">>}, {?TSEQ, 85}
- ]},
- {[
- {?SNODE, <<"foo">>}, {?SUUID, <<"foo_uuid">>}, {?SSEQ, 50},
- {?TNODE, <<"bar">>}, {?TUUID, <<"bar_uuid">>}, {?TSEQ, 51}
- ]},
- {[
- {?SNODE, <<"foo">>}, {?SUUID, <<"foo_uuid">>}, {?SSEQ, 40},
- {?TNODE, <<"bar">>}, {?TUUID, <<"bar_uuid">>}, {?TSEQ, 45}
- ]},
- {[
- {?SNODE, <<"foo">>}, {?SUUID, <<"foo_uuid">>}, {?SSEQ, 2},
- {?TNODE, <<"bar">>}, {?TUUID, <<"bar_uuid">>}, {?TSEQ, 2}
- ]}
- ],
- Foo2_Bar = [
- {[
- {?SNODE, <<"foo2">>}, {?SUUID, <<"foo_uuid">>}, {?SSEQ, 100},
- {?TNODE, <<"bar">>}, {?TUUID, <<"bar_uuid">>}, {?TSEQ, 100}
- ]},
- {[
- {?SNODE, <<"foo2">>}, {?SUUID, <<"foo_uuid">>}, {?SSEQ, 92},
- {?TNODE, <<"bar">>}, {?TUUID, <<"bar_uuid">>}, {?TSEQ, 93}
- ]},
- {[
- {?SNODE, <<"foo2">>}, {?SUUID, <<"foo_uuid">>}, {?SSEQ, 31},
- {?TNODE, <<"bar">>}, {?TUUID, <<"bar_uuid">>}, {?TSEQ, 30}
- ]}
- ],
- History = {[
- {<<"foo">>, Foo_Bar},
- {<<"foo2">>, Foo2_Bar}
- ]},
- #doc{
- body={[{<<"history">>, History}]}
- }.
-
-
-targets_map_test_() ->
- {
- setup,
- fun() -> meck:new(mem3, [passthrough]) end,
- fun(_) -> meck:unload() end,
- [
- target_not_a_shard(),
- source_contained_in_target(),
- multiple_targets(),
- uneven_overlap(),
- target_not_in_shard_map()
- ]
- }.
-
-
-target_not_a_shard() ->
- ?_assertEqual(#{[0, ?RING_END] => <<"t">>}, targets_map(<<"s">>, <<"t">>)).
-
-
-source_contained_in_target() ->
- ?_test(begin
- R07 = [16#00000000, 16#7fffffff],
- R8f = [16#80000000, 16#ffffffff],
- R0f = [16#00000000, 16#ffffffff],
-
- Shards = [
- #shard{node = 'n1', range = R07},
- #shard{node = 'n1', range = R8f},
- #shard{node = 'n2', range = R07},
- #shard{node = 'n2', range = R8f},
- #shard{node = 'n3', range = R0f}
- ],
- meck:expect(mem3, shards, 1, Shards),
-
- SrcName1 = <<"shards/00000000-7fffffff/d.1551893552">>,
- TgtName1 = <<"shards/00000000-7fffffff/d.1551893552">>,
-
- Src1 = #shard{name = SrcName1, node = 'n1'},
- Tgt1 = #shard{name = TgtName1, node = 'n2'},
- Map1 = targets_map(Src1, Tgt1),
- ?assertEqual(1, map_size(Map1)),
- ?assertMatch(#{R07 := #shard{node = 'n2'}}, Map1),
-
- Tgt2 = #shard{name = TgtName1, node = 'n3'},
- Map2 = targets_map(Src1, Tgt2),
- ?assertEqual(1, map_size(Map2)),
- ?assertMatch(#{R0f := #shard{node = 'n3'}}, Map2)
- end).
-
-
-multiple_targets() ->
- ?_test(begin
- R07 = [16#00000000, 16#7fffffff],
- R8f = [16#80000000, 16#ffffffff],
- R0f = [16#00000000, 16#ffffffff],
-
- Shards = [
- #shard{node = 'n1', range = R07},
- #shard{node = 'n1', range = R8f},
- #shard{node = 'n2', range = R0f}
- ],
- meck:expect(mem3, shards, 1, Shards),
-
- SrcName = <<"shards/00000000-ffffffff/d.1551893552">>,
- TgtName = <<"shards/00000000-7fffffff/d.1551893552">>,
-
- Src = #shard{name = SrcName, node = 'n2'},
- Tgt = #shard{name = TgtName, node = 'n1'},
- Map = targets_map(Src, Tgt),
- ?assertEqual(2, map_size(Map)),
- ?assertMatch(#{R07 := #shard{node = 'n1'}}, Map),
- ?assertMatch(#{R8f := #shard{node = 'n1'}}, Map)
- end).
-
-
-uneven_overlap() ->
- ?_test(begin
- R04 = [16#00000000, 16#4fffffff],
- R26 = [16#20000000, 16#6fffffff],
- R58 = [16#50000000, 16#8fffffff],
- R9f = [16#90000000, 16#ffffffff],
- Shards = [
- #shard{node = 'n1', range = R04},
- #shard{node = 'n1', range = R58},
- #shard{node = 'n1', range = R9f},
- #shard{node = 'n2', range = R26}
- ],
-
- meck:expect(mem3, shards, 1, Shards),
-
- SrcName = <<"shards/20000000-6fffffff/d.1551893552">>,
- TgtName = <<"shards/20000000-6fffffff/d.1551893552">>,
-
- Src = #shard{name = SrcName, node = 'n2'},
- Tgt = #shard{name = TgtName, node = 'n1'},
- Map = targets_map(Src, Tgt),
- ?assertEqual(2, map_size(Map)),
- ?assertMatch(#{R04 := #shard{node = 'n1'}}, Map),
- ?assertMatch(#{R58 := #shard{node = 'n1'}}, Map)
- end).
-
-
-target_not_in_shard_map() ->
- ?_test(begin
- R0f = [16#00000000, 16#ffffffff],
- Name = <<"shards/00000000-ffffffff/d.1551893552">>,
- Shards = [
- #shard{name = Name, node = 'n1', range = R0f},
- #shard{name = Name, node = 'n2', range = R0f}
- ],
- meck:expect(mem3, shards, 1, Shards),
- Src = #shard{name = Name, node = 'n1'},
- Tgt = #shard{name = Name, node = 'n3'},
- Map = targets_map(Src, Tgt),
- ?assertEqual(1, map_size(Map)),
- ?assertMatch(#{R0f := #shard{name = Name, node = 'n3'}}, Map)
- end).
-
--endif.
diff --git a/src/mem3/src/mem3_reshard.erl b/src/mem3/src/mem3_reshard.erl
deleted file mode 100644
index 234670c34..000000000
--- a/src/mem3/src/mem3_reshard.erl
+++ /dev/null
@@ -1,913 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard).
-
-
--behaviour(gen_server).
-
-
--export([
- start_link/0,
-
- start/0,
- stop/1,
-
- start_split_job/1,
- stop_job/2,
- resume_job/1,
- remove_job/1,
-
- get_state/0,
- jobs/0,
- job/1,
- is_disabled/0,
-
- report/2,
- checkpoint/2,
-
- now_sec/0,
- update_history/4,
- shard_from_name/1,
- reset_state/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
-
--include("mem3_reshard.hrl").
-
-
--define(JOB_ID_VERSION, 1).
--define(JOB_STATE_VERSION, 1).
--define(DEFAULT_MAX_JOBS, 48).
--define(DEFAULT_MAX_HISTORY, 20).
--define(JOB_PREFIX, <<"reshard-job-">>).
--define(STATE_PREFIX, <<"reshard-state-">>).
-
-
-%% Public API
-
--spec start_link() -> {ok, pid()} | ignore | {error, term()}.
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
--spec start() -> ok | {error, any()}.
-start() ->
- case is_disabled() of
- true -> {error, resharding_disabled};
- false -> gen_server:call(?MODULE, start, infinity)
- end.
-
-
--spec stop(binary()) -> ok | {error, any()}.
-stop(Reason) ->
- case is_disabled() of
- true -> {error, resharding_disabled};
- false -> gen_server:call(?MODULE, {stop, Reason}, infinity)
- end.
-
-
--spec start_split_job(#shard{} | binary()) -> {ok, binary()} | {error, term()}.
-start_split_job(#shard{} = Shard) ->
- start_split_job(Shard, 2);
-
-start_split_job(ShardName) when is_binary(ShardName) ->
- start_split_job(shard_from_name(ShardName), 2).
-
-
--spec start_split_job(#shard{}, split()) -> {ok, binary()} | {error, any()}.
-start_split_job(#shard{} = Source, Split) ->
- case is_disabled() of
- true -> {error, resharding_disabled};
- false -> validate_and_start_job(Source, Split)
- end.
-
-
--spec stop_job(binary(), binary()) -> ok | {error, any()}.
-stop_job(JobId, Reason) when is_binary(JobId), is_binary(Reason) ->
- case is_disabled() of
- true -> {error, resharding_disabled};
- false -> gen_server:call(?MODULE, {stop_job, JobId, Reason}, infinity)
- end.
-
-
--spec resume_job(binary()) -> ok | {error, any()}.
-resume_job(JobId) when is_binary(JobId) ->
- case is_disabled() of
- true -> {error, resharding_disabled};
- false -> gen_server:call(?MODULE, {resume_job, JobId}, infinity)
- end.
-
-
--spec remove_job(binary()) -> ok | {error, any()}.
-remove_job(JobId) when is_binary(JobId) ->
- case is_disabled() of
- true -> {error, resharding_disabled};
- false -> gen_server:call(?MODULE, {remove_job, JobId}, infinity)
- end.
-
-
--spec get_state() -> {[_ | _]}.
-get_state() ->
- gen_server:call(?MODULE, get_state, infinity).
-
-
--spec jobs() -> [[tuple()]].
-jobs() ->
- ets:foldl(fun(Job, Acc) ->
- Opts = [iso8601],
- Props = mem3_reshard_store:job_to_ejson_props(Job, Opts),
- [{Props} | Acc]
- end, [], ?MODULE).
-
-
--spec job(job_id()) -> {ok, {[_ | _]}} | {error, not_found}.
-job(JobId) ->
- case job_by_id(JobId) of
- #job{} = Job ->
- Opts = [iso8601],
- Props = mem3_reshard_store:job_to_ejson_props(Job, Opts),
- {ok, {Props}};
- not_found ->
- {error, not_found}
- end.
-
-
-% Return true resharding is disabled in the application level settings
--spec is_disabled() -> boolean().
-is_disabled() ->
- case application:get_env(mem3, reshard_disabled) of
- {ok, "true"} -> true;
- {ok, true} -> true;
- _ -> false
- end.
-
-
-% State reporting callbacks. Used by mem3_reshard_job module.
--spec report(pid(), #job{}) -> ok.
-report(Server, #job{} = Job) when is_pid(Server) ->
- gen_server:cast(Server, {report, Job}).
-
-
--spec checkpoint(pid(), #job{}) -> ok.
-checkpoint(Server, #job{} = Job) ->
- couch_log:notice("~p checkpointing ~p ~p", [?MODULE, Server, jobfmt(Job)]),
- gen_server:cast(Server, {checkpoint, Job}).
-
-
-% Utility functions used from other mem3_reshard modules
-
--spec now_sec() -> non_neg_integer().
-now_sec() ->
- {Mega, Sec, _Micro} = os:timestamp(),
- Mega * 1000000 + Sec.
-
-
--spec update_history(atom(), binary() | null, time_sec(), list()) -> list().
-update_history(State, State, Ts, History) ->
- % State is the same as detail. Make the detail null to avoid duplication
- update_history(State, null, Ts, History);
-
-update_history(State, Detail, Ts, History) ->
- % Reverse, so we can process the last event as the head using
- % head matches, then after append and trimming, reserse again
- Rev = lists:reverse(History),
- UpdatedRev = update_history_rev(State, Detail, Ts, Rev),
- TrimmedRev = lists:sublist(UpdatedRev, max_history()),
- lists:reverse(TrimmedRev).
-
-
--spec shard_from_name(binary()) -> #shard{}.
-shard_from_name(<<"shards/", _:8/binary, "-", _:8/binary, "/",
- Rest/binary>> = Shard) ->
- Range = mem3:range(Shard),
- [DbName, Suffix] = binary:split(Rest, <<".">>),
- build_shard(Range, DbName, Suffix).
-
-
-% For debugging only
-
--spec reset_state() -> ok.
-reset_state() ->
- gen_server:call(?MODULE, reset_state, infinity).
-
-
-% Gen server functions
-
-init(_) ->
- couch_log:notice("~p start init()", [?MODULE]),
- EtsOpts = [named_table, {keypos, #job.id}, {read_concurrency, true}],
- ?MODULE = ets:new(?MODULE, EtsOpts),
- ManagerPid = self(),
- State = #state{
- state = running,
- state_info = [],
- update_time = now_sec(),
- node = node(),
- db_monitor = spawn_link(fun() -> db_monitor(ManagerPid) end)
- },
- State1 = mem3_reshard_store:init(State, ?JOB_PREFIX, state_id()),
- State2 = mem3_reshard_store:load_state(State1, running),
- State3 = maybe_disable(State2),
- gen_server:cast(self(), reload_jobs),
- {ok, State3}.
-
-
-terminate(Reason, State) ->
- couch_log:notice("~p terminate ~p ~p", [?MODULE, Reason, statefmt(State)]),
- catch unlink(State#state.db_monitor),
- catch exit(State#state.db_monitor, kill),
- lists:foreach(fun(Job) -> kill_job_int(Job) end, running_jobs()).
-
-
-handle_call(start, _From, #state{state = stopped} = State) ->
- State1 = State#state{
- state = running,
- update_time = now_sec(),
- state_info = info_delete(reason, State#state.state_info)
- },
- ok = mem3_reshard_store:store_state(State1),
- State2 = maybe_disable(State1),
- State3 = reload_jobs(State2),
- {reply, ok, State3};
-
-handle_call(start, _From, State) ->
- {reply, ok, State};
-
-handle_call({stop, Reason}, _From, #state{state = running} = State) ->
- State1 = State#state{
- state = stopped,
- update_time = now_sec(),
- state_info = info_update(reason, Reason, State#state.state_info)
- },
- ok = mem3_reshard_store:store_state(State1),
- lists:foreach(fun(Job) -> temporarily_stop_job(Job) end, running_jobs()),
- {reply, ok, State1};
-
-handle_call({stop, _}, _From, State) ->
- {reply, ok, State};
-
-handle_call({start_job, #job{id = Id, source = Source} = Job}, _From, State) ->
- couch_log:notice("~p start_job call ~p", [?MODULE, jobfmt(Job)]),
- Total = ets:info(?MODULE, size),
- SourceOk = mem3_reshard_validate:source(Source),
- case {job_by_id(Id), Total + 1 =< get_max_jobs(), SourceOk} of
- {not_found, true, ok} ->
- handle_start_job(Job, State);
- {#job{}, _, _} ->
- {reply, {error, job_already_exists}, State};
- {_, false, _} ->
- {reply, {error, max_jobs_exceeded}, State};
- {_, _, {error, _} = SourceError} ->
- {reply, SourceError, State}
- end;
-
-handle_call({resume_job, _}, _From, #state{state = stopped} = State) ->
- case couch_util:get_value(reason, State#state.state_info) of
- undefined ->
- {reply, {error, stopped}, State};
- Reason ->
- {reply, {error, {stopped, Reason}}, State}
- end;
-
-handle_call({resume_job, Id}, _From, State) ->
- couch_log:notice("~p resume_job call ~p", [?MODULE, Id]),
- case job_by_id(Id) of
- #job{job_state = stopped} = Job ->
- case start_job_int(Job, State) of
- ok ->
- {reply, ok, State};
- {error, Error} ->
- {reply, {error, Error}, State}
- end;
- #job{} ->
- {reply, ok, State};
- not_found ->
- {reply, {error, not_found}, State}
- end;
-
-handle_call({stop_job, Id, Reason}, _From, State) ->
- couch_log:notice("~p stop_job Id:~p Reason:~p", [?MODULE, Id, Reason]),
- case job_by_id(Id) of
- #job{job_state = JSt} = Job when JSt =:= running orelse JSt =:= new
- orelse JSt =:= stopped ->
- ok = stop_job_int(Job, stopped, Reason, State),
- {reply, ok, State};
- #job{} ->
- {reply, ok, State};
- not_found ->
- {reply, {error, not_found}, State}
- end;
-
-handle_call({remove_job, Id}, _From, State) ->
- {reply, remove_job_int(Id, State), State};
-
-handle_call(get_state, _From, #state{state = GlobalState} = State) ->
- StateProps = mem3_reshard_store:state_to_ejson_props(State),
- Stats0 = #{running => 0, completed => 0, failed => 0, stopped => 0},
- StateStats = ets:foldl(fun(#job{job_state = JS}, Acc) ->
- % When jobs are disabled globally their state is not checkpointed as
- % "stopped", but it stays as "running". But when returning the state we
- % don't want to mislead and indicate that there are "N running jobs"
- % when the global state is "stopped".
- JS1 = case GlobalState =:= stopped andalso JS =:= running of
- true -> stopped;
- false -> JS
- end,
- Acc#{JS1 => maps:get(JS1, Acc, 0) + 1}
- end, Stats0, ?MODULE),
- Total = ets:info(?MODULE, size),
- StateStats1 = maps:to_list(StateStats) ++ [{total, Total}],
- Result = {lists:sort(StateProps ++ StateStats1)},
- {reply, Result, State};
-
-handle_call(reset_state, _From, State) ->
- {reply, ok, reset_state(State)};
-
-handle_call(Call, From, State) ->
- couch_log:error("~p unknown call ~p from: ~p", [?MODULE, Call, From]),
- {noreply, State}.
-
-
-handle_cast({db_deleted, DbName}, State) ->
- % Remove only completed jobs. Other running states would `fail` but
- % job result would stick around so users can inspect them.
- JobIds = jobs_by_db_and_state(DbName, completed),
- [remove_job_int(JobId, State) || JobId <- JobIds],
- {noreply, State};
-
-handle_cast({report, Job}, State) ->
- report_int(Job),
- {noreply, State};
-
-handle_cast({checkpoint, Job}, State) ->
- {noreply, checkpoint_int(Job, State)};
-
-handle_cast(reload_jobs, State) ->
- couch_log:notice("~p starting reloading jobs", [?MODULE]),
- State1 = reload_jobs(State),
- couch_log:notice("~p finished reloading jobs", [?MODULE]),
- {noreply, State1};
-
-handle_cast(Cast, State) ->
- couch_log:error("~p unexpected cast ~p", [?MODULE, Cast]),
- {noreply, State}.
-
-
-handle_info({'DOWN', _Ref, process, Pid, Info}, State) ->
- case job_by_pid(Pid) of
- {ok, Job} ->
- couch_log:notice("~p job ~s exit ~p", [?MODULE, Job#job.id, Info]),
- ok = handle_job_exit(Job, Info, State);
- {error, not_found} ->
- couch_log:error("~p job not found: ~p ~p", [?MODULE, Pid, Info])
- end,
- {noreply, State};
-
-handle_info(Info, State) ->
- couch_log:error("~p unexpected info ~p", [?MODULE, Info]),
- {noreply, State}.
-
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-
-%% Private API
-
-validate_and_start_job(#shard{} = Source, Split) ->
- case mem3_reshard_validate:start_args(Source, Split) of
- ok ->
- Target = target_shards(Source, Split),
- case mem3_reshard_validate:targets(Source, Target) of
- ok ->
- TStamp = now_sec(),
- Job = #job{
- type = split,
- job_state = new,
- split_state = new,
- start_time = TStamp,
- update_time = TStamp,
- node = node(),
- source = Source,
- target = Target
- },
- Job1 = Job#job{id = job_id(Job)},
- Job2 = update_job_history(Job1),
- gen_server:call(?MODULE, {start_job, Job2}, infinity);
- {error, Error} ->
- {error, Error}
- end;
- {error, Error} ->
- {error, Error}
- end.
-
-
-handle_start_job(#job{} = Job, #state{state = running} = State) ->
- case start_job_int(Job, State) of
- ok ->
- {reply, {ok, Job#job.id}, State};
- {error, Error} ->
- {reply, {error, Error}, State}
- end;
-
-handle_start_job(#job{} = Job, #state{state = stopped} = State) ->
- ok = mem3_reshard_store:store_job(State, Job),
- % Since resharding is stopped on this node, the job is temporarily marked
- % as stopped in the ets table so as not to return a "running" result which
- % would look odd.
- temporarily_stop_job(Job),
- {reply, {ok, Job#job.id}, State}.
-
-
-% Insert job in the ets table as a temporarily stopped job. This would happen
-% when a job is reloaded or added when node-wide resharding is stopped.
--spec temporarily_stop_job(#job{}) -> #job{}.
-temporarily_stop_job(Job) ->
- Job1 = kill_job_int(Job),
- OldInfo = Job1#job.state_info,
- Reason = <<"Shard splitting disabled">>,
- Job2 = Job1#job{
- job_state = stopped,
- update_time = now_sec(),
- start_time = 0,
- state_info = info_update(reason, Reason, OldInfo),
- pid = undefined,
- ref = undefined
- },
- Job3 = update_job_history(Job2),
- true = ets:insert(?MODULE, Job3),
- Job3.
-
-
--spec reload_jobs(#state{}) -> #state{}.
-reload_jobs(State) ->
- Jobs = mem3_reshard_store:get_jobs(State),
- lists:foldl(fun reload_job/2, State, Jobs).
-
-
-% This is a case when main application is stopped but a job is reloaded that
-% was checkpointed in running state. Set that state to stopped to avoid the API
-% results looking odd.
--spec reload_job(#job{}, #state{}) -> #state{}.
-reload_job(#job{job_state = JS} = Job, #state{state = stopped} = State)
- when JS =:= running orelse JS =:= new ->
- temporarily_stop_job(Job),
- State;
-
-% This is a case when a job process should be spawend
-reload_job(#job{job_state = JS} = Job, #state{state = running} = State)
- when JS =:= running orelse JS =:= new ->
- case start_job_int(Job, State) of
- ok ->
- State;
- {error, Error} ->
- Msg = "~p could not resume ~s error: ~p",
- couch_log:error(Msg, [?MODULE, jobfmt(Job), Error]),
- State
- end;
-
-% If job is disabled individually (stopped by the user), is completed or failed
-% then simply load it into the ets table
-reload_job(#job{job_state = JS} = Job, #state{} = State)
- when JS =:= failed orelse JS =:= completed orelse JS =:= stopped ->
- true = ets:insert(?MODULE, Job),
- State.
-
-
--spec get_max_jobs() -> integer().
-get_max_jobs() ->
- config:get_integer("reshard", "max_jobs", ?DEFAULT_MAX_JOBS).
-
-
--spec start_job_int(#job{}, #state{}) -> ok | {error, term()}.
-start_job_int(Job, State) ->
- case spawn_job(Job) of
- {ok, #job{} = Job1} ->
- Job2 = update_job_history(Job1),
- ok = mem3_reshard_store:store_job(State, Job2),
- true = ets:insert(?MODULE, Job2),
- ok;
- {error, Error} ->
- {error, Error}
- end.
-
-
--spec spawn_job(#job{}) -> {ok, pid()} | {error, term()}.
-spawn_job(#job{} = Job0) ->
- Job = Job0#job{
- job_state = running,
- start_time = 0,
- update_time = now_sec(),
- state_info = info_delete(reason, Job0#job.state_info),
- manager = self(),
- workers = [],
- retries = 0
- },
- case mem3_reshard_job_sup:start_child(Job) of
- {ok, Pid} ->
- Ref = monitor(process, Pid),
- {ok, Job#job{pid = Pid, ref = Ref}};
- {error, Reason} ->
- {error, Reason}
- end.
-
-
--spec stop_job_int(#job{}, job_state(), term(), #state{}) -> ok.
-stop_job_int(#job{} = Job, JobState, Reason, State) ->
- couch_log:info("~p stop_job_int ~p newstate: ~p reason:~p", [?MODULE,
- jobfmt(Job), JobState, Reason]),
- Job1 = kill_job_int(Job),
- Job2 = Job1#job{
- job_state = JobState,
- update_time = now_sec(),
- state_info = [{reason, Reason}]
- },
- ok = mem3_reshard_store:store_job(State, Job2),
- true = ets:insert(?MODULE, Job2),
- couch_log:info("~p stop_job_int stopped ~p", [?MODULE, jobfmt(Job2)]),
- ok.
-
-
--spec kill_job_int(#job{}) -> #job{}.
-kill_job_int(#job{pid = undefined} = Job) ->
- Job;
-
-kill_job_int(#job{pid = Pid, ref = Ref} = Job) ->
- couch_log:info("~p kill_job_int ~p", [?MODULE, jobfmt(Job)]),
- demonitor(Ref, [flush]),
- case erlang:is_process_alive(Pid) of
- true ->
- ok = mem3_reshard_job_sup:terminate_child(Pid);
- false ->
- ok
- end,
- Job1 = Job#job{pid = undefined, ref = undefined},
- true = ets:insert(?MODULE, Job1),
- Job1.
-
-
--spec handle_job_exit(#job{}, term(), #state{}) -> ok.
-handle_job_exit(#job{split_state = completed} = Job, normal, State) ->
- couch_log:notice("~p completed job ~s exited", [?MODULE, Job#job.id]),
- Job1 = Job#job{
- pid = undefined,
- ref = undefined,
- job_state = completed,
- update_time = now_sec(),
- state_info = []
- },
- Job2 = update_job_history(Job1),
- ok = mem3_reshard_store:store_job(State, Job2),
- true = ets:insert(?MODULE, Job2),
- ok;
-
-handle_job_exit(#job{job_state = running} = Job, normal, _State) ->
- couch_log:notice("~p running job ~s stopped", [?MODULE, Job#job.id]),
- OldInfo = Job#job.state_info,
- Job1 = Job#job{
- pid = undefined,
- ref = undefined,
- job_state = stopped,
- update_time = now_sec(),
- state_info = info_update(reason, <<"Job stopped">>, OldInfo)
- },
- true = ets:insert(?MODULE, update_job_history(Job1)),
- ok;
-
-handle_job_exit(#job{job_state = running} = Job, shutdown, _State) ->
- couch_log:notice("~p job ~s shutdown", [?MODULE, Job#job.id]),
- OldInfo = Job#job.state_info,
- Job1 = Job#job{
- pid = undefined,
- ref = undefined,
- job_state = stopped,
- update_time = now_sec(),
- state_info = info_update(reason, <<"Job shutdown">>, OldInfo)
- },
- true = ets:insert(?MODULE, update_job_history(Job1)),
- ok;
-
-handle_job_exit(#job{job_state = running} = Job, {shutdown, Msg}, _State) ->
- couch_log:notice("~p job ~s shutdown ~p", [?MODULE, Job#job.id, Msg]),
- OldInfo = Job#job.state_info,
- Job1 = Job#job{
- pid = undefined,
- ref = undefined,
- job_state = stopped,
- update_time = now_sec(),
- state_info = info_update(reason, <<"Job shutdown">>, OldInfo)
- },
- true = ets:insert(?MODULE, update_job_history(Job1)),
- ok;
-
-handle_job_exit(#job{} = Job, Error, State) ->
- couch_log:notice("~p job ~s failed ~p", [?MODULE, Job#job.id, Error]),
- OldInfo = Job#job.state_info,
- Job1 = Job#job{
- pid = undefined,
- ref = undefined,
- job_state = failed,
- update_time = now_sec(),
- state_info = info_update(reason, Error, OldInfo)
- },
- Job2 = update_job_history(Job1),
- ok = mem3_reshard_store:store_job(State, Job2),
- true = ets:insert(?MODULE, Job2),
- ok.
-
-
--spec job_by_id(job_id()) -> #job{} | not_found.
-job_by_id(Id) ->
- case ets:lookup(?MODULE, Id) of
- [] ->
- not_found;
- [#job{} = Job] ->
- Job
- end.
-
-
--spec job_by_pid(pid()) -> {ok, #job{}} | {error, not_found}.
-job_by_pid(Pid) when is_pid(Pid) ->
- case ets:match_object(?MODULE, #job{pid=Pid, _='_'}) of
- [] ->
- {error, not_found};
- [#job{} = Job] ->
- {ok, Job}
- end.
-
-
--spec state_id() -> binary().
-state_id() ->
- Ver = iolist_to_binary(io_lib:format("~3..0B", [?JOB_STATE_VERSION])),
- <<?STATE_PREFIX/binary, Ver/binary>>.
-
-
--spec job_id(#job{}) -> binary().
-job_id(#job{source = #shard{name = SourceName}}) ->
- HashInput = [SourceName, atom_to_binary(node(), utf8)],
- IdHashList = couch_util:to_hex(crypto:hash(sha256, HashInput)),
- IdHash = iolist_to_binary(IdHashList),
- Prefix = iolist_to_binary(io_lib:format("~3..0B", [?JOB_ID_VERSION])),
- <<Prefix/binary, "-", IdHash/binary>>.
-
-
--spec target_shards(#shard{}, split()) -> [#shard{}].
-target_shards(#shard{name = Name, range = [B, E], dbname = DbName}, Split) when
- is_integer(Split), Split >= 2, (E - B + 1) >= Split ->
- Ranges = target_ranges([B, E], Split),
- <<"shards/", _:8/binary, "-", _:8/binary, "/", DbAndSuffix/binary>> = Name,
- [DbName, Suffix] = binary:split(DbAndSuffix, <<".">>),
- [build_shard(R, DbName, Suffix) || R <- Ranges].
-
-
--spec target_ranges([range_pos()], split()) -> [[range_pos()]].
-target_ranges([Begin, End], Split) when (End - Begin + 1) >= Split,
- Split >=2 ->
- Len = End - Begin + 1, % + 1 since intervals are inclusive
- NewLen = Len div Split,
- Rem = Len rem Split,
- Ranges = [[I, I + NewLen - 1] || I <- lists:seq(Begin, End - Rem, NewLen)],
- % Adjust last end to always match the original end to ensure we always
- % cover the whole range. In case when remainder is larger this will make
- % the last range larger. Improve the algorithm later to re-distribute
- % the remainder equally amonst the chunks.
- {BeforeLast, [[BeginLast, _]]} = lists:split(Split - 1, Ranges),
- BeforeLast ++ [[BeginLast, End]].
-
-
--spec build_shard([non_neg_integer()], binary(), binary()) -> #shard{}.
-build_shard(Range, DbName, Suffix) ->
- Shard = #shard{dbname = DbName, range = Range, node = node()},
- mem3_util:name_shard(Shard, <<".", Suffix/binary>>).
-
-
--spec running_jobs() -> [#job{}].
-running_jobs() ->
- Pat = #job{job_state = running, _ = '_'},
- ets:match_object(?MODULE, Pat).
-
-
--spec info_update(atom(), any(), [tuple()]) -> [tuple()].
-info_update(Key, Val, StateInfo) ->
- lists:keystore(Key, 1, StateInfo, {Key, Val}).
-
-
--spec info_delete(atom(), [tuple()]) -> [tuple()].
-info_delete(Key, StateInfo) ->
- lists:keydelete(Key, 1, StateInfo).
-
-
--spec checkpoint_int(#job{}, #state{}) -> #state{}.
-checkpoint_int(#job{} = Job, State) ->
- couch_log:debug("~p checkpoint ~s", [?MODULE, jobfmt(Job)]),
- case report_int(Job) of
- ok ->
- ok = mem3_reshard_store:store_job(State, Job),
- ok = mem3_reshard_job:checkpoint_done(Job),
- State;
- not_found ->
- couch_log:error("~p checkpoint couldn't find ~p", [?MODULE, Job]),
- State
- end.
-
-
--spec report_int(#job{}) -> ok | not_found.
-report_int(Job) ->
- case ets:lookup(?MODULE, Job#job.id) of
- [#job{ref = Ref, pid = CurPid}] ->
- case Job#job.pid =:= CurPid of
- true ->
- couch_log:debug("~p reported ~s", [?MODULE, jobfmt(Job)]),
- % Carry over the reference from ets as the #job{} coming
- % from the job process won't have it's own monitor ref.
- true = ets:insert(?MODULE, Job#job{ref = Ref}),
- ok;
- false ->
- LogMsg = "~p ignoring old job report ~p curr pid:~p",
- couch_log:warning(LogMsg, [?MODULE, jobfmt(Job), CurPid]),
- not_found
- end;
- _ ->
- couch_log:error("~p reporting : couldn't find ~p", [?MODULE, Job]),
- not_found
- end.
-
-
--spec remove_job_int(#job{}, #state{}) -> ok | {error, not_found}.
-remove_job_int(Id, State) ->
- couch_log:notice("~p call remove_job Id:~p", [?MODULE, Id]),
- case job_by_id(Id) of
- #job{} = Job ->
- kill_job_int(Job),
- ok = mem3_reshard_store:delete_job(State, Id),
- ets:delete(?MODULE, Job#job.id),
- ok;
- not_found ->
- {error, not_found}
- end.
-
-
-% This function is for testing and debugging only
--spec reset_state(#state{}) -> #state{}.
-reset_state(#state{} = State) ->
- couch_log:warning("~p resetting state", [?MODULE]),
- ok = mem3_reshard_store:delete_state(State),
- couch_log:warning("~p killing all running jobs", [?MODULE]),
- [kill_job_int(Job) || Job <- running_jobs()],
- ets:delete_all_objects(?MODULE),
- couch_log:warning("~p resetting all job states", [?MODULE]),
- Jobs = mem3_reshard_store:get_jobs(State),
- lists:foldl(fun(#job{id = Id}, StateAcc) ->
- couch_log:warning("~p resetting job state ~p", [?MODULE, Id]),
- ok = mem3_reshard_store:delete_job(StateAcc, Id),
- StateAcc
- end, State, Jobs),
- couch_log:warning("~p resetting state done", [?MODULE]),
- State#state{
- state = running,
- state_info = [],
- update_time = now_sec()
- }.
-
-
--spec update_job_history(#job{}) -> #job{}.
-update_job_history(#job{job_state = St, update_time = Ts} = Job) ->
- Hist = Job#job.history,
- Reason = case couch_util:get_value(reason, Job#job.state_info) of
- undefined -> null;
- Val -> couch_util:to_binary(Val)
- end,
- Job#job{history = update_history(St, Reason, Ts, Hist)}.
-
-
-update_history_rev(State, null, Ts, [{_, State, Detail} | Rest]) ->
- % Just updated the detail, state stays the same, no new entry added
- [{Ts, State, Detail} | Rest];
-
-update_history_rev(State, Detail, Ts, [{_, State, Detail} | Rest]) ->
- % State and detail were same as last event, just update the timestamp
- [{Ts, State, Detail} | Rest];
-
-update_history_rev(State, Detail, Ts, [{_, State, Detail} | Rest]) ->
- % State and detail were same as last event, just update the timestamp
- [{Ts, State, Detail} | Rest];
-
-update_history_rev(State, Detail, Ts, History) ->
- [{Ts, State, Detail} | History].
-
-
--spec max_history() -> non_neg_integer().
-max_history() ->
- config:get_integer("reshard", "max_history", ?DEFAULT_MAX_HISTORY).
-
-
--spec maybe_disable(#state{}) -> #state{}.
-maybe_disable(#state{} = State) ->
- case is_disabled() of
- true ->
- Reason = <<"Resharding disabled by application level config">>,
- SInfo = State#state.state_info,
- State#state{
- state = stopped,
- state_info = info_update(reason, Reason, SInfo)
- };
- false ->
- State
- end.
-
-
--spec jobs_by_db_and_state(binary(), split_state() | '_') -> [job_id()].
-jobs_by_db_and_state(Db, State) ->
- DbName = mem3:dbname(Db),
- Pat = #job{
- id = '$1',
- source =#shard{dbname = DbName, _ = '_'},
- job_state = State,
- _ = '_'
- },
- [JobId || [JobId] <- ets:match(?MODULE, Pat)].
-
-
--spec db_exists(binary()) -> boolean().
-db_exists(Name) ->
- try
- mem3:shards(mem3:dbname(Name)),
- true
- catch
- error:database_does_not_exist ->
- false
- end.
-
-
--spec db_monitor(pid()) -> no_return().
-db_monitor(Server) ->
- couch_log:notice("~p db monitor ~p starting", [?MODULE, self()]),
- EvtRef = erlang:monitor(process, couch_event_server),
- couch_event:register_all(self()),
- db_monitor_loop(Server, EvtRef).
-
-
--spec db_monitor_loop(pid(), reference()) -> no_return().
-db_monitor_loop(Server, EvtRef) ->
- receive
- {'$couch_event', DbName, deleted} ->
- case db_exists(DbName) of
- true ->
- % Could be source shard being deleted during splitting
- ok;
- false ->
- case length(jobs_by_db_and_state(DbName, '_')) > 0 of
- true ->
- % Notify only if there are jobs with that db
- gen_server:cast(Server, {db_deleted, DbName});
- false ->
- ok
- end
- end,
- db_monitor_loop(Server, EvtRef);
- {'$couch_event', _, _} ->
- db_monitor_loop(Server, EvtRef);
- {'DOWN', EvtRef, _, _, Info} ->
- couch_log:error("~p db monitor listener died ~p", [?MODULE, Info]),
- exit({db_monitor_died, Info});
- Msg ->
- couch_log:error("~p db monitor unexpected msg ~p", [?MODULE, Msg]),
- db_monitor_loop(Server, EvtRef)
- end.
-
-
--spec statefmt(#state{} | term()) -> string().
-statefmt(#state{state = StateName}) ->
- Total = ets:info(?MODULE, size),
- Active = mem3_reshard_job_sup:count_children(),
- Msg = "#state{~s total:~B active:~B}",
- Fmt = io_lib:format(Msg, [StateName, Total, Active]),
- lists:flatten(Fmt);
-
-statefmt(State) ->
- Fmt = io_lib:format("<Unknown split state:~p>", [State]),
- lists:flatten(Fmt).
-
-
--spec jobfmt(#job{}) -> string().
-jobfmt(#job{} = Job) ->
- mem3_reshard_job:jobfmt(Job).
diff --git a/src/mem3/src/mem3_reshard.hrl b/src/mem3/src/mem3_reshard.hrl
deleted file mode 100644
index ad76aeadf..000000000
--- a/src/mem3/src/mem3_reshard.hrl
+++ /dev/null
@@ -1,74 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--include_lib("mem3/include/mem3.hrl").
-
-
--type range_pos() :: non_neg_integer().
--type split() :: pos_integer().
--type job_id() :: binary() | undefined.
--type job_type() :: split.
--type time_sec() :: non_neg_integer().
-
--type shard_split_main_state() ::
- running |
- stopped.
-
--type job_state() ::
- new |
- running |
- stopped |
- failed |
- completed.
-
--type split_state() ::
- new |
- initial_copy |
- topoff1 |
- build_indices |
- topoff2 |
- copy_local_docs |
- update_shardmap |
- wait_source_close |
- topoff3 |
- source_delete |
- completed.
-
-
--record(job, {
- id :: job_id() | '$1' | '_',
- type :: job_type(),
- job_state :: job_state(),
- split_state :: split_state(),
- state_info = [] :: [{atom(), any()}],
- source :: #shard{},
- target :: [#shard{}],
- history = [] :: [{atom(), time_sec()}],
- start_time = 0 :: non_neg_integer(),
- update_time = 0 :: non_neg_integer(),
- node :: node(),
- pid :: undefined | pid() | '$1' | '_',
- ref :: undefined | reference() | '_',
- manager :: undefined | pid(),
- workers = [] :: [pid()],
- retries = 0 :: non_neg_integer()
-}).
-
--record(state, {
- state :: shard_split_main_state(),
- state_info :: [],
- update_time :: non_neg_integer(),
- job_prefix :: binary(),
- state_id :: binary(),
- node :: node(),
- db_monitor :: pid()
-}).
diff --git a/src/mem3/src/mem3_reshard_api.erl b/src/mem3/src/mem3_reshard_api.erl
deleted file mode 100644
index 0d3377db7..000000000
--- a/src/mem3/src/mem3_reshard_api.erl
+++ /dev/null
@@ -1,217 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_api).
-
--export([
- create_jobs/5,
- get_jobs/0,
- get_job/1,
- get_summary/0,
- resume_job/1,
- stop_job/2,
- start_shard_splitting/0,
- stop_shard_splitting/1,
- get_shard_splitting_state/0
-]).
-
-
-create_jobs(Node, Shard, Db, Range, split) ->
- lists:map(fun(S) ->
- N = mem3:node(S),
- Name = mem3:name(S),
- case rpc:call(N, mem3_reshard, start_split_job, [Name]) of
- {badrpc, Error} ->
- {error, Error, N, Name};
- {ok, JobId} ->
- {ok, JobId, N, Name};
- {error, Error} ->
- {error, Error, N, Name}
- end
- end, pick_shards(Node, Shard, Db, Range)).
-
-
-get_jobs() ->
- Nodes = mem3_util:live_nodes(),
- {Replies, _Bad} = rpc:multicall(Nodes, mem3_reshard, jobs, []),
- lists:flatten(Replies).
-
-
-get_job(JobId) ->
- Nodes = mem3_util:live_nodes(),
- {Replies, _Bad} = rpc:multicall(Nodes, mem3_reshard, job, [JobId]),
- case [JobInfo || {ok, JobInfo} <- Replies] of
- [JobInfo | _] ->
- {ok, JobInfo};
- [] ->
- {error, not_found}
- end.
-
-
-get_summary() ->
- Nodes = mem3_util:live_nodes(),
- {Replies, _Bad} = rpc:multicall(Nodes, mem3_reshard, get_state, []),
- Stats0 = #{running => 0, total => 0, completed => 0, failed => 0,
- stopped => 0},
- StatsF = lists:foldl(fun({Res}, Stats) ->
- maps:map(fun(Stat, OldVal) ->
- OldVal + couch_util:get_value(Stat, Res, 0)
- end, Stats)
- end, Stats0, Replies),
- {State, Reason} = state_and_reason(Replies),
- StateReasonProps = [{state, State}, {state_reason, Reason}],
- {StateReasonProps ++ lists:sort(maps:to_list(StatsF))}.
-
-
-resume_job(JobId) ->
- Nodes = mem3_util:live_nodes(),
- {Replies, _Bad} = rpc:multicall(Nodes, mem3_reshard, resume_job,
- [JobId]),
- WithoutNotFound = [R || R <- Replies, R =/= {error, not_found}],
- case lists:usort(WithoutNotFound) of
- [ok] ->
- ok;
- [{error, Error} | _] ->
- {error, {[{error, couch_util:to_binary(Error)}]}};
- [] ->
- {error, not_found}
- end.
-
-
-stop_job(JobId, Reason) ->
- Nodes = mem3_util:live_nodes(),
- {Replies, _Bad} = rpc:multicall(Nodes, mem3_reshard, stop_job,
- [JobId, Reason]),
- WithoutNotFound = [R || R <- Replies, R =/= {error, not_found}],
- case lists:usort(WithoutNotFound) of
- [ok] ->
- ok;
- [{error, Error} | _] ->
- {error, {[{error, couch_util:to_binary(Error)}]}};
- [] ->
- {error, not_found}
- end.
-
-
-start_shard_splitting() ->
- {Replies, _Bad} = rpc:multicall(mem3_reshard, start, []),
- case lists:usort(lists:flatten(Replies)) of
- [ok] ->
- {ok, {[{ok, true}]}};
- [Error | _] ->
- {error, {[{error, couch_util:to_binary(Error)}]}}
- end.
-
-
-stop_shard_splitting(Reason) ->
- {Replies, _Bad} = rpc:multicall(mem3_reshard, stop, [Reason]),
- case lists:usort(lists:flatten(Replies)) of
- [ok] ->
- {ok, {[{ok, true}]}};
- [Error | _] ->
- {error, {[{error, couch_util:to_binary(Error)}]}}
- end.
-
-
-get_shard_splitting_state() ->
- Nodes = mem3_util:live_nodes(),
- {Replies, _Bad} = rpc:multicall(Nodes, mem3_reshard, get_state, []),
- state_and_reason(Replies).
-
-
-state_and_reason(StateReplies) ->
- AccF = lists:foldl(fun({ResProps}, Acc) ->
- Reason = get_reason(ResProps),
- case couch_util:get_value(state, ResProps) of
- <<"running">> -> orddict:append(running, Reason, Acc);
- <<"stopped">> -> orddict:append(stopped, Reason, Acc);
- undefined -> Acc
- end
- end, orddict:from_list([{running, []}, {stopped, []}]), StateReplies),
- Running = orddict:fetch(running, AccF),
- case length(Running) > 0 of
- true ->
- Reason = pick_reason(Running),
- {running, Reason};
- false ->
- Reason = pick_reason(orddict:fetch(stopped, AccF)),
- {stopped, Reason}
- end.
-
-
-pick_reason(Reasons) ->
- Reasons1 = lists:usort(Reasons),
- Reasons2 = [R || R <- Reasons1, R =/= undefined],
- case Reasons2 of
- [] -> null;
- [R1 | _] -> R1
- end.
-
-
-get_reason(StateProps) when is_list(StateProps) ->
- case couch_util:get_value(state_info, StateProps) of
- [] -> undefined;
- undefined -> undefined;
- {SInfoProps} -> couch_util:get_value(reason, SInfoProps)
- end.
-
-
-pick_shards(undefined, undefined, Db, undefined) when is_binary(Db) ->
- check_node_required(),
- check_range_required(),
- mem3:shards(Db);
-
-pick_shards(Node, undefined, Db, undefined) when is_atom(Node),
- is_binary(Db) ->
- check_range_required(),
- [S || S <- mem3:shards(Db), mem3:node(S) == Node];
-
-pick_shards(undefined, undefined, Db, [_B, _E] = Range) when is_binary(Db) ->
- check_node_required(),
- [S || S <- mem3:shards(Db), mem3:range(S) == Range];
-
-pick_shards(Node, undefined, Db, [_B, _E] = Range) when is_atom(Node),
- is_binary(Db) ->
- [S || S <- mem3:shards(Db), mem3:node(S) == Node, mem3:range(S) == Range];
-
-pick_shards(undefined, Shard, undefined, undefined) when is_binary(Shard) ->
- check_node_required(),
- Db = mem3:dbname(Shard),
- [S || S <- mem3:shards(Db), mem3:name(S) == Shard];
-
-pick_shards(Node, Shard, undefined, undefined) when is_atom(Node),
- is_binary(Shard) ->
- Db = mem3:dbname(Shard),
- [S || S <- mem3:shards(Db), mem3:name(S) == Shard, mem3:node(S) == Node];
-
-pick_shards(_, undefined, undefined, _) ->
- throw({bad_request, <<"Must specify at least `db` or `shard`">>});
-
-pick_shards(_, Db, Shard, _) when is_binary(Db), is_binary(Shard) ->
- throw({bad_request, <<"`db` and `shard` are mutually exclusive">>}).
-
-
-check_node_required() ->
- case config:get_boolean("reshard", "require_node_param", false) of
- true ->
- throw({bad_request, <<"`node` prameter is required">>});
- false ->
- ok
- end.
-
-check_range_required() ->
- case config:get_boolean("reshard", "require_range_param", false) of
- true ->
- throw({bad_request, <<"`range` prameter is required">>});
- false ->
- ok
- end.
diff --git a/src/mem3/src/mem3_reshard_dbdoc.erl b/src/mem3/src/mem3_reshard_dbdoc.erl
deleted file mode 100644
index 4a0a35c1f..000000000
--- a/src/mem3/src/mem3_reshard_dbdoc.erl
+++ /dev/null
@@ -1,274 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_dbdoc).
-
--behaviour(gen_server).
-
--export([
- update_shard_map/1,
-
- start_link/0,
-
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include("mem3_reshard.hrl").
-
-
--spec update_shard_map(#job{}) -> no_return | ok.
-update_shard_map(#job{source = Source, target = Target} = Job) ->
- Node = hd(mem3_util:live_nodes()),
- JobStr = mem3_reshard_job:jobfmt(Job),
- LogMsg1 = "~p : ~p calling update_shard_map node:~p",
- couch_log:notice(LogMsg1, [?MODULE, JobStr, Node]),
- ServerRef = {?MODULE, Node},
- CallArg = {update_shard_map, Source, Target},
- TimeoutMSec = shard_update_timeout_msec(),
- try
- case gen_server:call(ServerRef, CallArg, TimeoutMSec) of
- {ok, _} -> ok;
- {error, CallError} -> throw({error, CallError})
- end
- catch
- _:Err ->
- exit(Err)
- end,
- LogMsg2 = "~p : ~p update_shard_map on node:~p returned",
- couch_log:notice(LogMsg2, [?MODULE, JobStr, Node]),
- UntilSec = mem3_reshard:now_sec() + (TimeoutMSec div 1000),
- case wait_source_removed(Source, 5, UntilSec) of
- true -> ok;
- false -> exit(shard_update_did_not_propagate)
- end.
-
-
--spec start_link() -> {ok, pid()} | ignore | {error, term()}.
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-init(_) ->
- couch_log:notice("~p start init()", [?MODULE]),
- {ok, nil}.
-
-
-terminate(_Reason, _State) ->
- ok.
-
-
-handle_call({update_shard_map, Source, Target}, _From, State) ->
- Res = try
- update_shard_map(Source, Target)
- catch
- throw:{error, Error} ->
- {error, Error}
- end,
- {reply, Res, State};
-
-handle_call(Call, From, State) ->
- couch_log:error("~p unknown call ~p from: ~p", [?MODULE, Call, From]),
- {noreply, State}.
-
-
-handle_cast(Cast, State) ->
- couch_log:error("~p unexpected cast ~p", [?MODULE, Cast]),
- {noreply, State}.
-
-
-handle_info(Info, State) ->
- couch_log:error("~p unexpected info ~p", [?MODULE, Info]),
- {noreply, State}.
-
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-
-% Private
-
-update_shard_map(Source, Target) ->
- ok = validate_coordinator(),
- ok = replicate_from_all_nodes(shard_update_timeout_msec()),
- DocId = mem3:dbname(Source#shard.name),
- OldDoc = case mem3_util:open_db_doc(DocId) of
- {ok, #doc{deleted = true}} ->
- throw({error, missing_source});
- {ok, #doc{} = Doc} ->
- Doc;
- {not_found, deleted} ->
- throw({error, missing_source});
- OpenErr ->
- throw({error, {shard_doc_open_error, OpenErr}})
- end,
- #doc{body = OldBody} = OldDoc,
- NewBody = update_shard_props(OldBody, Source, Target),
- {ok, _} = write_shard_doc(OldDoc, NewBody),
- ok = replicate_to_all_nodes(shard_update_timeout_msec()),
- {ok, NewBody}.
-
-
-validate_coordinator() ->
- case hd(mem3_util:live_nodes()) =:= node() of
- true -> ok;
- false -> throw({error, coordinator_changed})
- end.
-
-
-replicate_from_all_nodes(TimeoutMSec) ->
- case mem3_util:replicate_dbs_from_all_nodes(TimeoutMSec) of
- ok -> ok;
- Error -> throw({error, Error})
- end.
-
-
-replicate_to_all_nodes(TimeoutMSec) ->
- case mem3_util:replicate_dbs_to_all_nodes(TimeoutMSec) of
- ok -> ok;
- Error -> throw({error, Error})
- end.
-
-
-write_shard_doc(#doc{id = Id} = Doc, Body) ->
- UpdatedDoc = Doc#doc{body = Body},
- couch_util:with_db(mem3_sync:shards_db(), fun(Db) ->
- try
- {ok, _} = couch_db:update_doc(Db, UpdatedDoc, [])
- catch
- conflict ->
- throw({error, {conflict, Id, Doc#doc.body, UpdatedDoc}})
- end
- end).
-
-
-update_shard_props({Props0}, #shard{} = Source, [#shard{} | _] = Targets) ->
- {ByNode0} = couch_util:get_value(<<"by_node">>, Props0, {[]}),
- ByNodeKV = {<<"by_node">>, {update_by_node(ByNode0, Source, Targets)}},
- Props1 = lists:keyreplace(<<"by_node">>, 1, Props0, ByNodeKV),
-
- {ByRange0} = couch_util:get_value(<<"by_range">>, Props1, {[]}),
- ByRangeKV = {<<"by_range">>, {update_by_range(ByRange0, Source, Targets)}},
- Props2 = lists:keyreplace(<<"by_range">>, 1, Props1, ByRangeKV),
-
- Changelog = couch_util:get_value(<<"changelog">>, Props2, []),
- {Node, Range} = {node_key(Source), range_key(Source)},
- TRanges = [range_key(T) || T <- Targets],
- ChangelogEntry = [[<<"split">>, Range, TRanges, Node]],
- ChangelogKV = {<<"changelog">>, Changelog ++ ChangelogEntry},
- Props3 = lists:keyreplace(<<"changelog">>, 1, Props2, ChangelogKV),
-
- {Props3}.
-
-
-update_by_node(ByNode, #shard{} = Source, [#shard{} | _] = Targets) ->
- {NodeKey, SKey} = {node_key(Source), range_key(Source)},
- {_, Ranges} = lists:keyfind(NodeKey, 1, ByNode),
- Ranges1 = Ranges -- [SKey],
- Ranges2 = Ranges1 ++ [range_key(T) || T <- Targets],
- lists:keyreplace(NodeKey, 1, ByNode, {NodeKey, lists:sort(Ranges2)}).
-
-
-update_by_range(ByRange, Source, Targets) ->
- ByRange1 = remove_node_from_source(ByRange, Source),
- lists:foldl(fun add_node_to_target_foldl/2, ByRange1, Targets).
-
-
-remove_node_from_source(ByRange, Source) ->
- {NodeKey, SKey} = {node_key(Source), range_key(Source)},
- {_, SourceNodes} = lists:keyfind(SKey, 1, ByRange),
- % Double check that source had node to begin with
- case lists:member(NodeKey, SourceNodes) of
- true ->
- ok;
- false ->
- throw({source_shard_missing_node, NodeKey, SourceNodes})
- end,
- SourceNodes1 = SourceNodes -- [NodeKey],
- case SourceNodes1 of
- [] ->
- % If last node deleted, remove entry
- lists:keydelete(SKey, 1, ByRange);
- _ ->
- lists:keyreplace(SKey, 1, ByRange, {SKey, SourceNodes1})
- end.
-
-
-add_node_to_target_foldl(#shard{} = Target, ByRange) ->
- {NodeKey, TKey} = {node_key(Target), range_key(Target)},
- case lists:keyfind(TKey, 1, ByRange) of
- {_, Nodes} ->
- % Double check that target does not have node already
- case lists:member(NodeKey, Nodes) of
- false ->
- ok;
- true ->
- throw({target_shard_already_has_node, NodeKey, Nodes})
- end,
- Nodes1 = lists:sort([NodeKey | Nodes]),
- lists:keyreplace(TKey, 1, ByRange, {TKey, Nodes1});
- false ->
- % fabric_db_create:make_document/3 says they should be sorted
- lists:sort([{TKey, [NodeKey]} | ByRange])
- end.
-
-
-node_key(#shard{node = Node}) ->
- couch_util:to_binary(Node).
-
-
-range_key(#shard{range = [B, E]}) ->
- BHex = couch_util:to_hex(<<B:32/integer>>),
- EHex = couch_util:to_hex(<<E:32/integer>>),
- list_to_binary([BHex, "-", EHex]).
-
-
-shard_update_timeout_msec() ->
- config:get_integer("reshard", "shard_upate_timeout_msec", 300000).
-
-
-wait_source_removed(#shard{name = Name} = Source, SleepSec, UntilSec) ->
- case check_source_removed(Source) of
- true ->
- true;
- false ->
- case mem3_reshard:now_sec() < UntilSec of
- true ->
- LogMsg = "~p : Waiting for shard ~p removal confirmation",
- couch_log:notice(LogMsg, [?MODULE, Name]),
- timer:sleep(SleepSec * 1000),
- wait_source_removed(Source, SleepSec, UntilSec);
- false ->
- false
- end
- end.
-
-
-check_source_removed(#shard{name = Name}) ->
- DbName = mem3:dbname(Name),
- Live = mem3_util:live_nodes(),
- ShardNodes = [N || #shard{node = N} <- mem3:shards(DbName)],
- Nodes = lists:usort([N || N <- ShardNodes, lists:member(N, Live)]),
- {Responses, _} = rpc:multicall(Nodes, mem3, shards, [DbName]),
- Shards = lists:usort(lists:flatten(Responses)),
- SourcePresent = [S || S = #shard{name = S, node = N} <- Shards, S =:= Name,
- N =:= node()],
- case SourcePresent of
- [] -> true;
- [_ | _] -> false
- end.
diff --git a/src/mem3/src/mem3_reshard_httpd.erl b/src/mem3/src/mem3_reshard_httpd.erl
deleted file mode 100644
index 3d0f77f39..000000000
--- a/src/mem3/src/mem3_reshard_httpd.erl
+++ /dev/null
@@ -1,317 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_httpd).
-
--export([
- handle_reshard_req/1
-]).
-
--import(couch_httpd, [
- send_json/2,
- send_json/3,
- send_method_not_allowed/2
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
-
-
--define(JOBS, <<"jobs">>).
--define(STATE, <<"state">>).
--define(S_RUNNING, <<"running">>).
--define(S_STOPPED, <<"stopped">>).
-
-
-% GET /_reshard
-handle_reshard_req(#httpd{method='GET', path_parts=[_]} = Req) ->
- reject_if_disabled(),
- State = mem3_reshard_api:get_summary(),
- send_json(Req, State);
-
-handle_reshard_req(#httpd{path_parts=[_]} = Req) ->
- send_method_not_allowed(Req, "GET,HEAD");
-
-% GET /_reshard/state
-handle_reshard_req(#httpd{method='GET',
- path_parts=[_, ?STATE]} = Req) ->
- reject_if_disabled(),
- {State, Reason} = mem3_reshard_api:get_shard_splitting_state(),
- send_json(Req, {[{state, State}, {reason, Reason}]});
-
-% PUT /_reshard/state
-handle_reshard_req(#httpd{method='PUT',
- path_parts=[_, ?STATE]} = Req) ->
- reject_if_disabled(),
- couch_httpd:validate_ctype(Req, "application/json"),
- {Props} = couch_httpd:json_body_obj(Req),
- State = couch_util:get_value(<<"state">>, Props),
- Reason = couch_util:get_value(<<"reason">>, Props),
- case {State, Reason} of
- {undefined, _} ->
- throw({bad_request, <<"Expected a `state` field">>});
- {?S_RUNNING, _} ->
- case mem3_reshard_api:start_shard_splitting() of
- {ok, JsonResult} ->
- send_json(Req, 200, JsonResult);
- {error, JsonError} ->
- send_json(Req, 500, JsonError)
- end;
- {?S_STOPPED, Reason} ->
- Reason1 = case Reason =:= undefined of
- false -> Reason;
- true -> <<"Cluster-wide resharding stopped by the user">>
- end,
- case mem3_reshard_api:stop_shard_splitting(Reason1) of
- {ok, JsonResult} ->
- send_json(Req, 200, JsonResult);
- {error, JsonError} ->
- send_json(Req, 500, JsonError)
- end;
- {_, _} ->
- throw({bad_request, <<"State field not `running` or `stopped`">>})
- end;
-
-handle_reshard_req(#httpd{path_parts=[_, ?STATE]} = Req) ->
- send_method_not_allowed(Req, "GET,HEAD,PUT");
-
-handle_reshard_req(#httpd{path_parts=[_, ?STATE | _]} = _Req) ->
- throw(not_found);
-
-% GET /_reshard/jobs
-handle_reshard_req(#httpd{method='GET', path_parts=[_, ?JOBS]}=Req) ->
- reject_if_disabled(),
- Jobs = mem3_reshard_api:get_jobs(),
- Total = length(Jobs),
- send_json(Req, {[{total_rows, Total}, {offset, 0}, {jobs, Jobs}]});
-
-% POST /_reshard/jobs {"node": "...", "shard": "..."}
-handle_reshard_req(#httpd{method = 'POST',
- path_parts=[_, ?JOBS]} = Req) ->
- reject_if_disabled(),
- couch_httpd:validate_ctype(Req, "application/json"),
- {Props} = couch_httpd:json_body_obj(Req),
- Node = validate_node(couch_util:get_value(<<"node">>, Props)),
- Shard = validate_shard(couch_util:get_value(<<"shard">>, Props)),
- Db = validate_db(couch_util:get_value(<<"db">>, Props)),
- Range = validate_range(couch_util:get_value(<<"range">>, Props)),
- Type = validate_type(couch_util:get_value(<<"type">>, Props)),
- Res = mem3_reshard_api:create_jobs(Node, Shard, Db, Range, Type),
- case Res of
- [] -> throw(not_found);
- _ -> ok
- end,
- Oks = length([R || {ok, _, _, _} = R <- Res]),
- Code = case {Oks, length(Res)} of
- {Oks, Oks} -> 201;
- {Oks, _} when Oks > 0 -> 202;
- {0, _} -> 500
- end,
- EJson = lists:map(fun
- ({ok, Id, N, S}) ->
- {[{ok, true}, {id, Id}, {node, N}, {shard, S}]};
- ({error, E, N, S}) ->
- {[{error, couch_util:to_binary(E)}, {node, N}, {shard, S}]}
- end, Res),
- send_json(Req, Code, EJson);
-
-handle_reshard_req(#httpd{path_parts=[_, ?JOBS]} = Req) ->
- send_method_not_allowed(Req, "GET,HEAD,POST");
-
-handle_reshard_req(#httpd{path_parts=[_, _]}) ->
- throw(not_found);
-
-% GET /_reshard/jobs/$jobid
-handle_reshard_req(#httpd{method='GET',
- path_parts=[_, ?JOBS, JobId]}=Req) ->
- reject_if_disabled(),
- case mem3_reshard_api:get_job(JobId) of
- {ok, JobInfo} ->
- send_json(Req, JobInfo);
- {error, not_found} ->
- throw(not_found)
- end;
-
-% DELETE /_reshard/jobs/$jobid
-handle_reshard_req(#httpd{method='DELETE',
- path_parts=[_, ?JOBS, JobId]}=Req) ->
- reject_if_disabled(),
- case mem3_reshard_api:get_job(JobId) of
- {ok, {Props}} ->
- NodeBin = couch_util:get_value(node, Props),
- Node = binary_to_atom(NodeBin, utf8),
- case rpc:call(Node, mem3_reshard, remove_job, [JobId]) of
- ok ->
- send_json(Req, 200, {[{ok, true}]});
- {error, not_found} ->
- throw(not_found)
- end;
- {error, not_found} ->
- throw(not_found)
- end;
-
-handle_reshard_req(#httpd{path_parts=[_, ?JOBS, _]} = Req) ->
- send_method_not_allowed(Req, "GET,HEAD,DELETE");
-
-% GET /_reshard/jobs/$jobid/state
-handle_reshard_req(#httpd{method='GET',
- path_parts=[_, ?JOBS, JobId, ?STATE]} = Req) ->
- reject_if_disabled(),
- case mem3_reshard_api:get_job(JobId) of
- {ok, {Props}} ->
- JobState = couch_util:get_value(job_state, Props),
- {SIProps} = couch_util:get_value(state_info, Props),
- Reason = case couch_util:get_value(reason, SIProps) of
- undefined -> null;
- Val -> couch_util:to_binary(Val)
- end,
- send_json(Req, 200, {[{state, JobState}, {reason, Reason}]});
- {error, not_found} ->
- throw(not_found)
- end;
-
-% PUT /_reshard/jobs/$jobid/state
-handle_reshard_req(#httpd{method='PUT',
- path_parts=[_, ?JOBS, JobId, ?STATE]} = Req) ->
- reject_if_disabled(),
- couch_httpd:validate_ctype(Req, "application/json"),
- {Props} = couch_httpd:json_body_obj(Req),
- State = couch_util:get_value(<<"state">>, Props),
- Reason = couch_util:get_value(<<"reason">>, Props),
- case {State, Reason} of
- {undefined, _} ->
- throw({bad_request, <<"Expected a `state` field">>});
- {?S_RUNNING, _} ->
- case mem3_reshard_api:resume_job(JobId) of
- ok ->
- send_json(Req, 200, {[{ok, true}]});
- {error, not_found} ->
- throw(not_found);
- {error, JsonError} ->
- send_json(Req, 500, JsonError)
- end;
- {?S_STOPPED, Reason} ->
- Reason1 = case Reason =:= undefined of
- false -> Reason;
- true -> <<"Stopped by user">>
- end,
- case mem3_reshard_api:stop_job(JobId, Reason1) of
- ok ->
- send_json(Req, 200, {[{ok, true}]});
- {error, not_found} ->
- throw(not_found);
- {error, JsonError} ->
- send_json(Req, 500, JsonError)
- end;
- {_, _} ->
- throw({bad_request, <<"State field not `running` or `stopped`">>})
- end;
-
-handle_reshard_req(#httpd{path_parts=[_, ?JOBS, _, ?STATE]} = Req) ->
- send_method_not_allowed(Req, "GET,HEAD,PUT").
-
-
-reject_if_disabled() ->
- case mem3_reshard:is_disabled() of
- true -> throw(not_implemented);
- false -> ok
- end.
-
-
-validate_type(<<"split">>) ->
- split;
-
-validate_type(_Type) ->
- throw({bad_request, <<"`job type must be `split`">>}).
-
-
-validate_node(undefined) ->
- undefined;
-
-validate_node(Node0) when is_binary(Node0) ->
- Nodes = mem3_util:live_nodes(),
- try binary_to_existing_atom(Node0, utf8) of
- N1 ->
- case lists:member(N1, Nodes) of
- true -> N1;
- false -> throw({bad_request, <<"Not connected to `node`">>})
- end
- catch
- error:badarg ->
- throw({bad_request, <<"`node` is not a valid node name">>})
- end;
-
-validate_node(_Node) ->
- throw({bad_request, <<"Invalid `node`">>}).
-
-
-validate_shard(undefined) ->
- undefined;
-
-validate_shard(Shard) when is_binary(Shard) ->
- case Shard of
- <<"shards/", _:8/binary, "-", _:8/binary, "/", _/binary>> ->
- Shard;
- _ ->
- throw({bad_request, <<"`shard` is invalid">>})
- end;
-
-validate_shard(_Shard) ->
- throw({bad_request, <<"Invalid `shard`">>}).
-
-
-validate_db(undefined) ->
- undefined;
-
-validate_db(DbName) when is_binary(DbName) ->
- try mem3:shards(DbName) of
- [_ | _] -> DbName;
- _ -> throw({bad_request, <<"`No shards in `db`">>})
- catch
- _:_ ->
- throw({bad_request, <<"Invalid `db`">>})
- end;
-
-validate_db(_bName) ->
- throw({bad_request, <<"Invalid `db`">>}).
-
-
-validate_range(undefined) ->
- undefined;
-
-validate_range(<<BBin:8/binary, "-", EBin:8/binary>>) ->
- {B, E} = try
- {
- httpd_util:hexlist_to_integer(binary_to_list(BBin)),
- httpd_util:hexlist_to_integer(binary_to_list(EBin))
- }
- catch
- _:_ ->
- invalid_range()
- end,
- if
- B < 0 -> invalid_range();
- E < 0 -> invalid_range();
- B > (2 bsl 31) - 1 -> invalid_range();
- E > (2 bsl 31) - 1 -> invalid_range();
- B >= E -> invalid_range();
- true -> ok
- end,
- % Use a list format here to make it look the same as #shard's range
- [B, E];
-
-validate_range(_Range) ->
- invalid_range().
-
-
-invalid_range() ->
- throw({bad_request, <<"Invalid `range`">>}).
diff --git a/src/mem3/src/mem3_reshard_index.erl b/src/mem3/src/mem3_reshard_index.erl
deleted file mode 100644
index d4cb7caa1..000000000
--- a/src/mem3/src/mem3_reshard_index.erl
+++ /dev/null
@@ -1,164 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_index).
-
-
--export([
- design_docs/1,
- target_indices/2,
- spawn_builders/1
-]).
-
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-%% Public API
-
-design_docs(DbName) ->
- try
- case fabric_design_docs(mem3:dbname(DbName)) of
- {error, {maintenance_mode, _, _Node}} ->
- {ok, []};
- {ok, DDocs} ->
- JsonDocs = [couch_doc:from_json_obj(DDoc) || DDoc <- DDocs],
- {ok, JsonDocs};
- Else ->
- Else
- end
- catch error:database_does_not_exist ->
- {ok, []}
- end.
-
-
-target_indices(Docs, Targets) ->
- Indices = [[indices(N, D) || D <- Docs] || #shard{name = N} <- Targets],
- lists:flatten(Indices).
-
-
-spawn_builders(Indices) ->
- Results = [build_index(Index) || Index <- Indices],
- Oks = [{ok, Pid} || {ok, Pid} <- Results, is_pid(Pid)],
- case Results -- Oks of
- [] ->
- {ok, [Pid || {ok, Pid} <- Results]};
- Error ->
- % Do a all or nothing pattern, if some indices could not be
- % spawned, kill the spawned ones and and return the error.
- ErrMsg = "~p failed to spawn index builders: ~p ~p",
- couch_log:error(ErrMsg, [?MODULE, Error, Indices]),
- lists:foreach(fun({ok, Pid}) ->
- catch unlink(Pid),
- catch exit(Pid, kill)
- end, Oks),
- {error, Error}
- end.
-
-
-%% Private API
-
-fabric_design_docs(DbName) ->
- case couch_util:with_proc(fabric, design_docs, [DbName], infinity) of
- {ok, Resp} -> Resp;
- {error, Error} -> Error
- end.
-
-
-indices(DbName, Doc) ->
- mrview_indices(DbName, Doc)
- ++ [dreyfus_indices(DbName, Doc) || has_app(dreyfus)]
- ++ [hastings_indices(DbName, Doc) || has_app(hastings)].
-
-
-mrview_indices(DbName, Doc) ->
- try
- {ok, MRSt} = couch_mrview_util:ddoc_to_mrst(DbName, Doc),
- Views = couch_mrview_index:get(views, MRSt),
- case Views =/= [] of
- true ->
- [{mrview, DbName, MRSt}];
- false ->
- []
- end
- catch
- Tag:Err ->
- Msg = "~p couldn't get mrview index ~p ~p ~p:~p",
- couch_log:error(Msg, [?MODULE, DbName, Doc, Tag, Err]),
- []
- end.
-
-
-dreyfus_indices(DbName, Doc) ->
- try
- Indices = dreyfus_index:design_doc_to_indexes(Doc),
- [{dreyfus, DbName, Index} || Index <- Indices]
- catch
- Tag:Err ->
- Msg = "~p couldn't get dreyfus indices ~p ~p ~p:~p",
- couch_log:error(Msg, [?MODULE, DbName, Doc, Tag, Err]),
- []
- end.
-
-
-hastings_indices(DbName, Doc) ->
- try
- Indices = hastings_index:design_doc_to_indexes(Doc),
- [{hastings, DbName, Index} || Index <- Indices]
- catch
- Tag:Err ->
- Msg = "~p couldn't get hasting indices ~p ~p ~p:~p",
- couch_log:error(Msg, [?MODULE, DbName, Doc, Tag, Err]),
- []
- end.
-
-
-build_index({mrview, DbName, MRSt}) ->
- case couch_index_server:get_index(couch_mrview_index, MRSt) of
- {ok, Pid} ->
- Args = [Pid, get_update_seq(DbName)],
- WPid = spawn_link(couch_index, get_state, Args),
- {ok, WPid};
- Error ->
- Error
- end;
-
-build_index({dreyfus, DbName, Index})->
- case dreyfus_index_manager:get_index(DbName, Index) of
- {ok, Pid} ->
- Args = [Pid, get_update_seq(DbName)],
- WPid = spawn_link(dreyfus_index, await, Args),
- {ok, WPid};
- Error ->
- Error
- end;
-
-build_index({hastings, DbName, Index}) ->
- case hastings_index_manager:get_index(DbName, Index) of
- {ok, Pid} ->
- Args = [Pid, get_update_seq(DbName)],
- WPid = spawn_link(hastings_index, await, Args),
- {ok, WPid};
- Error ->
- Error
- end.
-
-
-has_app(App) ->
- code:lib_dir(App) /= {error, bad_name}.
-
-
-get_update_seq(DbName) ->
- couch_util:with_db(DbName, fun(Db) ->
- couch_db:get_update_seq(Db)
- end).
diff --git a/src/mem3/src/mem3_reshard_job.erl b/src/mem3/src/mem3_reshard_job.erl
deleted file mode 100644
index aedca21bb..000000000
--- a/src/mem3/src/mem3_reshard_job.erl
+++ /dev/null
@@ -1,716 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_job).
-
-
--export([
- start_link/1,
-
- checkpoint_done/1,
- jobfmt/1,
- pickfun/3
-]).
-
--export([
- init/1,
-
- initial_copy/1,
- initial_copy_impl/1,
-
- topoff/1,
- topoff_impl/1,
-
- build_indices/1,
-
- copy_local_docs/1,
- copy_local_docs_impl/1,
-
- update_shardmap/1,
-
- wait_source_close/1,
- wait_source_close_impl/1,
-
- source_delete/1,
- source_delete_impl/1,
-
- completed/1
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include("mem3_reshard.hrl").
-
-
-% Batch size for internal replication topoffs
--define(INTERNAL_REP_BATCH_SIZE, 2000).
-
-% The list of possible job states. The order of this
-% list is important as a job will progress linearly
-% through it. However, when starting a job we may
-% have to resume from an earlier state as listed
-% below in STATE_RESTART.
--define(SPLIT_STATES, [
- new,
- initial_copy,
- topoff1,
- build_indices,
- topoff2,
- copy_local_docs,
- update_shardmap,
- wait_source_close,
- topoff3,
- source_delete,
- completed
-]).
-
-
-% When a job starts it may be resuming from a partially
-% completed state. These state pairs list the state
-% we have to restart from for each possible state.
--define(STATE_RESTART, #{
- new => initial_copy,
- initial_copy => initial_copy,
- topoff1 => topoff1,
- build_indices => topoff1,
- topoff2 => topoff1,
- copy_local_docs => topoff1,
- update_shardmap => update_shardmap,
- wait_source_close => wait_source_close,
- topoff3 => wait_source_close,
- source_delete => wait_source_close,
- completed => completed
-}).
-
-
-% If we have a worker failing during any of these
-% states we need to clean up the targets
--define(CLEAN_TARGET_STATES, [
- initial_copy,
- topoff1,
- build_indices,
- topoff2,
- copy_local_docs
-]).
-
-
-start_link(#job{} = Job) ->
- proc_lib:start_link(?MODULE, init, [Job]).
-
-
-% This is called by the main proces after it has checkpointed the progress
-% of the job. After the new state is checkpointed, we signal the job to start
-% executing that state.
-checkpoint_done(#job{pid = Pid} = Job) ->
- couch_log:notice(" ~p : checkpoint done for ~p", [?MODULE, jobfmt(Job)]),
- Pid ! checkpoint_done,
- ok.
-
-
-% Formatting function, used for logging mostly
-jobfmt(#job{} = Job) ->
- #job{
- id = Id,
- source = #shard{name = Source},
- target = Target,
- split_state = State,
- job_state = JobState,
- pid = Pid
- } = Job,
- TargetCount = length(Target),
- Msg = "#job{~s ~s /~B job_state:~s split_state:~s pid:~p}",
- Fmt = io_lib:format(Msg, [Id, Source, TargetCount, JobState, State, Pid]),
- lists:flatten(Fmt).
-
-
-% This is the function which picks between various targets. It is used here as
-% well as in mem3_rep internal replicator and couch_db_split bulk copy logic.
-% Given a document id and list of ranges, and a hash function, it will pick one
-% of the range or return not_in_range atom.
-pickfun(DocId, [[B, E] | _] = Ranges, {_M, _F, _A} = HashFun) when
- is_integer(B), is_integer(E), B =< E ->
- HashKey = mem3_hash:calculate(HashFun, DocId),
- Pred = fun([Begin, End]) ->
- Begin =< HashKey andalso HashKey =< End
- end,
- case lists:filter(Pred, Ranges) of
- [] -> not_in_range;
- [Key] -> Key
- end.
-
-
-init(#job{} = Job0) ->
- process_flag(trap_exit, true),
- Job1 = set_start_state(Job0#job{
- pid = self(),
- start_time = mem3_reshard:now_sec(),
- workers = [],
- retries = 0
- }),
- Job2 = update_split_history(Job1),
- proc_lib:init_ack({ok, self()}),
- couch_log:notice("~p starting job ~s", [?MODULE, jobfmt(Job2)]),
- ok = checkpoint(Job2),
- run(Job2).
-
-
-run(#job{split_state = CurrState} = Job) ->
- StateFun = case CurrState of
- topoff1 -> topoff;
- topoff2 -> topoff;
- topoff3 -> topoff;
- _ -> CurrState
- end,
- NewJob = try
- Job1 = ?MODULE:StateFun(Job),
- Job2 = wait_for_workers(Job1),
- Job3 = switch_to_next_state(Job2),
- ok = checkpoint(Job3),
- Job3
- catch
- throw:{retry, RetryJob} ->
- RetryJob
- end,
- run(NewJob).
-
-
-set_start_state(#job{split_state = State} = Job) ->
- case maps:get(State, ?STATE_RESTART, undefined) of
- undefined ->
- Fmt1 = "~p recover : unknown state ~s",
- couch_log:error(Fmt1, [?MODULE, jobfmt(Job)]),
- erlang:error({invalid_split_job_recover_state, Job});
- StartState->
- Job#job{split_state = StartState}
- end.
-
-
-get_next_state(#job{split_state = State}) ->
- get_next_state(State, ?SPLIT_STATES).
-
-
-get_next_state(completed, _) ->
- completed;
-
-get_next_state(CurrState, [CurrState, NextState | _]) ->
- NextState;
-
-get_next_state(CurrState, [_ | Rest]) ->
- get_next_state(CurrState, Rest).
-
-
-switch_to_next_state(#job{} = Job0) ->
- Info0 = Job0#job.state_info,
- Info1 = info_delete(error, Info0),
- Info2 = info_delete(reason, Info1),
- Job1 = Job0#job{
- split_state = get_next_state(Job0),
- update_time = mem3_reshard:now_sec(),
- retries = 0,
- state_info = Info2,
- workers = []
- },
- Job2 = update_split_history(Job1),
- check_state(Job2).
-
-
-checkpoint(Job) ->
- % Ask main process to checkpoint. When it has finished it will notify us
- % by calling by checkpoint_done/1. The reason not to call the main process
- % via a gen_server:call is because the main process could be in the middle
- % of terminating the job and then it would deadlock (after sending us a
- % shutdown message) and it would end up using the whole supervisor
- % termination timeout before finally.
- ok = mem3_reshard:checkpoint(Job#job.manager, Job),
- Parent = parent(),
- receive
- {'EXIT', Parent, Reason} ->
- handle_exit(Job, Reason);
- checkpoint_done ->
- ok;
- Other ->
- handle_unknown_msg(Job, "checkpoint", Other)
- end.
-
-
-wait_for_workers(#job{workers = []} = Job) ->
- Job;
-
-wait_for_workers(#job{workers = Workers} = Job) ->
- Parent = parent(),
- receive
- {'EXIT', Parent, Reason} ->
- handle_exit(Job, Reason);
- {'EXIT', Pid, Reason} ->
- case lists:member(Pid, Workers) of
- true ->
- NewJob = handle_worker_exit(Job, Pid, Reason),
- wait_for_workers(NewJob);
- false ->
- handle_unknown_msg(Job, "wait_for_workers", {Pid, Reason})
- end;
- Other ->
- handle_unknown_msg(Job, "wait_for_workers", Other)
- end.
-
-
-handle_worker_exit(#job{workers = Workers} = Job, Pid, normal) ->
- Job#job{workers = Workers -- [Pid]};
-
-handle_worker_exit(#job{} = Job, _Pid, {error, missing_source}) ->
- Msg1 = "~p stopping worker due to source missing ~p",
- couch_log:error(Msg1, [?MODULE, jobfmt(Job)]),
- kill_workers(Job),
- case lists:member(Job#job.split_state, ?CLEAN_TARGET_STATES) of
- true ->
- Msg2 = "~p cleaning target after db was deleted ~p",
- couch_log:error(Msg2, [?MODULE, jobfmt(Job)]),
- reset_target(Job),
- exit({error, missing_source});
- false ->
- exit({error, missing_source})
- end;
-
-handle_worker_exit(#job{} = Job, _Pid, {error, missing_target}) ->
- Msg = "~p stopping worker due to target db missing ~p",
- couch_log:error(Msg, [?MODULE, jobfmt(Job)]),
- kill_workers(Job),
- exit({error, missing_target});
-
-handle_worker_exit(#job{} = Job0, _Pid, Reason) ->
- couch_log:error("~p worker error ~p ~p", [?MODULE, jobfmt(Job0), Reason]),
- kill_workers(Job0),
- Job1 = Job0#job{workers = []},
- case Job1#job.retries =< max_retries() of
- true ->
- retry_state(Job1, Reason);
- false ->
- exit(Reason)
- end.
-
-
-% Cleanup and exit when we receive an 'EXIT' message from our parent. In case
-% the shard map is being updated, try to wait some time for it to finish.
-handle_exit(#job{split_state = update_shardmap, workers = [WPid]} = Job,
- Reason) ->
- Timeout = update_shard_map_timeout_sec(),
- Msg1 = "~p job exit ~s ~p while shard map is updating, waiting ~p sec",
- couch_log:warning(Msg1, [?MODULE, jobfmt(Job), Reason, Timeout]),
- receive
- {'EXIT', WPid, normal} ->
- Msg2 = "~p ~s shard map finished updating successfully, exiting",
- couch_log:notice(Msg2, [?MODULE, jobfmt(Job)]),
- exit(Reason);
- {'EXIT', WPid, Error} ->
- Msg3 = "~p ~s shard map update failed with error ~p",
- couch_log:error(Msg3, [?MODULE, jobfmt(Job), Error]),
- exit(Reason)
- after Timeout * 1000->
- Msg4 = "~p ~s shard map update timeout exceeded ~p sec",
- couch_log:error(Msg4, [?MODULE, jobfmt(Job), Timeout]),
- kill_workers(Job),
- exit(Reason)
- end;
-
-handle_exit(#job{} = Job, Reason) ->
- kill_workers(Job),
- exit(Reason).
-
-
-retry_state(#job{retries = Retries, state_info = Info} = Job0, Error) ->
- Job1 = Job0#job{
- retries = Retries + 1,
- state_info = info_update(error, Error, Info)
- },
- couch_log:notice("~p retrying ~p ~p", [?MODULE, jobfmt(Job1), Retries]),
- Job2 = report(Job1),
- Timeout = retry_interval_sec(),
- Parent = parent(),
- receive
- {'EXIT', Parent, Reason} ->
- handle_exit(Job2, Reason);
- Other ->
- handle_unknown_msg(Job2, "retry_state", Other)
- after Timeout * 1000 ->
- ok
- end,
- throw({retry, Job2}).
-
-
-report(#job{manager = ManagerPid} = Job) ->
- Job1 = Job#job{update_time = mem3_reshard:now_sec()},
- ok = mem3_reshard:report(ManagerPid, Job1),
- Job1.
-
-
-kill_workers(#job{workers = Workers}) ->
- lists:foreach(fun(Worker) ->
- unlink(Worker),
- exit(Worker, kill)
- end, Workers),
- flush_worker_messages().
-
-
-flush_worker_messages() ->
- Parent = parent(),
- receive
- {'EXIT', Pid, _} when Pid =/= Parent ->
- flush_worker_messages()
- after 0 ->
- ok
- end.
-
-
-parent() ->
- case get('$ancestors') of
- [Pid | _] when is_pid(Pid) -> Pid;
- [Name | _] when is_atom(Name) -> whereis(Name);
- _ -> undefined
- end.
-
-
-handle_unknown_msg(Job, When, RMsg) ->
- LogMsg = "~p ~s received an unknown message ~p when in ~s",
- couch_log:error(LogMsg, [?MODULE, jobfmt(Job), RMsg, When]),
- erlang:error({invalid_split_job_message, Job#job.id, When, RMsg}).
-
-
-initial_copy(#job{} = Job) ->
- Pid = spawn_link(?MODULE, initial_copy_impl, [Job]),
- report(Job#job{workers = [Pid]}).
-
-
-initial_copy_impl(#job{source = Source, target = Targets0} = Job) ->
- #shard{name = SourceName} = Source,
- Targets = [{R, N} || #shard{range = R, name = N} <- Targets0],
- TMap = maps:from_list(Targets),
- LogMsg1 = "~p initial_copy started ~s",
- LogArgs1 = [?MODULE, shardsstr(Source, Targets0)],
- couch_log:notice(LogMsg1, LogArgs1),
- reset_target(Job),
- case couch_db_split:split(SourceName, TMap, fun pickfun/3) of
- {ok, Seq} ->
- LogMsg2 = "~p initial_copy of ~s finished @ seq:~p",
- LogArgs2 = [?MODULE, shardsstr(Source, Targets0), Seq],
- couch_log:notice(LogMsg2, LogArgs2),
- create_artificial_mem3_rep_checkpoints(Job, Seq);
- {error, Error} ->
- LogMsg3 = "~p initial_copy of ~p finished @ ~p",
- LogArgs3 = [?MODULE, shardsstr(Source, Targets0), Error],
- couch_log:notice(LogMsg3, LogArgs3),
- exit({error, Error})
- end.
-
-
-topoff(#job{} = Job) ->
- Pid = spawn_link(?MODULE, topoff_impl, [Job]),
- report(Job#job{workers = [Pid]}).
-
-
-topoff_impl(#job{source = #shard{} = Source, target = Targets}) ->
- couch_log:notice("~p topoff ~p", [?MODULE, shardsstr(Source, Targets)]),
- check_source_exists(Source, topoff),
- check_targets_exist(Targets, topoff),
- TMap = maps:from_list([{R, T} || #shard{range = R} = T <- Targets]),
- Opts = [{batch_size, ?INTERNAL_REP_BATCH_SIZE}, {batch_count, all}],
- case mem3_rep:go(Source, TMap, Opts) of
- {ok, Count} ->
- Args = [?MODULE, shardsstr(Source, Targets), Count],
- couch_log:notice("~p topoff done ~s, count: ~p", Args),
- ok;
- {error, Error} ->
- Args = [?MODULE, shardsstr(Source, Targets), Error],
- couch_log:error("~p topoff failed ~s, error: ~p", Args),
- exit({error, Error})
- end.
-
-
-build_indices(#job{} = Job) ->
- #job{
- source = #shard{name = SourceName} = Source,
- target = Targets,
- retries = Retries,
- state_info = Info
- } = Job,
- check_source_exists(Source, build_indices),
- {ok, DDocs} = mem3_reshard_index:design_docs(SourceName),
- Indices = mem3_reshard_index:target_indices(DDocs, Targets),
- case mem3_reshard_index:spawn_builders(Indices) of
- {ok, []} ->
- % Skip the log spam if this is a no-op
- Job#job{workers = []};
- {ok, Pids} ->
- report(Job#job{workers = Pids});
- {error, Error} ->
- case Job#job.retries =< max_retries() of
- true ->
- build_indices(Job#job{
- retries = Retries + 1,
- state_info = info_update(error, Error, Info)
- });
- false ->
- exit(Error)
- end
- end.
-
-
-copy_local_docs(#job{split_state = copy_local_docs} = Job) ->
- Pid = spawn_link(?MODULE, copy_local_docs_impl, [Job]),
- report(Job#job{workers = [Pid]}).
-
-
-copy_local_docs_impl(#job{source = Source, target = Targets0}) ->
- #shard{name = SourceName} = Source,
- Targets = [{R, N} || #shard{range = R, name = N} <- Targets0],
- TMap = maps:from_list(Targets),
- LogArg1 = [?MODULE, shardsstr(Source, Targets)],
- couch_log:notice("~p copy local docs start ~s", LogArg1),
- case couch_db_split:copy_local_docs(SourceName, TMap, fun pickfun/3) of
- ok ->
- couch_log:notice("~p copy local docs finished for ~s", LogArg1),
- ok;
- {error, Error} ->
- LogArg2 = [?MODULE, shardsstr(Source, Targets), Error],
- couch_log:error("~p copy local docs failed for ~s ~p", LogArg2),
- exit({error, Error})
- end.
-
-
-update_shardmap(#job{} = Job) ->
- Pid = spawn_link(mem3_reshard_dbdoc, update_shard_map, [Job]),
- report(Job#job{workers = [Pid]}).
-
-
-wait_source_close(#job{source = #shard{name = Name}} = Job) ->
- couch_event:notify(Name, deleted),
- Pid = spawn_link(?MODULE, wait_source_close_impl, [Job]),
- report(Job#job{workers = [Pid]}).
-
-
-wait_source_close_impl(#job{source = #shard{name = Name}, target = Targets}) ->
- Timeout = config:get_integer("reshard", "source_close_timeout_sec", 600),
- check_targets_exist(Targets, wait_source_close),
- case couch_db:open_int(Name, [?ADMIN_CTX]) of
- {ok, Db} ->
- Now = mem3_reshard:now_sec(),
- case wait_source_close(Db, 1, Now + Timeout) of
- true ->
- ok;
- false ->
- exit({error, source_db_close_timeout, Name, Timeout})
- end;
- {not_found, _} ->
- couch_log:warning("~p source already deleted ~p", [?MODULE, Name]),
- ok
- end.
-
-
-wait_source_close(Db, SleepSec, UntilSec) ->
- case couch_db:monitored_by(Db) -- [self()] of
- [] ->
- true;
- [_ | _] ->
- Now = mem3_reshard:now_sec(),
- case Now < UntilSec of
- true ->
- LogMsg = "~p : Waiting for source shard ~p to be closed",
- couch_log:notice(LogMsg, [?MODULE, couch_db:name(Db)]),
- timer:sleep(SleepSec * 1000),
- wait_source_close(Db, SleepSec, UntilSec);
- false ->
- false
- end
- end.
-
-
-source_delete(#job{} = Job) ->
- Pid = spawn_link(?MODULE, source_delete_impl, [Job]),
- report(Job#job{workers = [Pid]}).
-
-
-source_delete_impl(#job{source = #shard{name = Name}, target = Targets}) ->
- check_targets_exist(Targets, source_delete),
- case config:get_boolean("mem3_reshard", "delete_source", true) of
- true ->
- case couch_server:delete(Name, [?ADMIN_CTX]) of
- ok ->
- couch_log:notice("~p : deleted source shard ~p",
- [?MODULE, Name]);
- not_found ->
- couch_log:warning("~p : source was already deleted ~p",
- [?MODULE, Name])
- end;
- false ->
- % Emit deleted event even when not actually deleting the files this
- % is the second one emitted, the other one was before
- % wait_source_close. They should be idempotent. This one is just to
- % match the one that couch_server would emit had the config not
- % been set
- couch_event:notify(Name, deleted),
- LogMsg = "~p : according to configuration not deleting source ~p",
- couch_log:warning(LogMsg, [?MODULE, Name])
- end,
- TNames = [TName || #shard{name = TName} <- Targets],
- lists:foreach(fun(TName) -> couch_event:notify(TName, updated) end, TNames).
-
-
-completed(#job{} = Job) ->
- couch_log:notice("~p : ~p completed, exit normal", [?MODULE, jobfmt(Job)]),
- exit(normal).
-
-
-% This is for belt and suspenders really. Call periodically to validate the
-% state is one of the expected states.
--spec check_state(#job{}) -> #job{} | no_return().
-check_state(#job{split_state = State} = Job) ->
- case lists:member(State, ?SPLIT_STATES) of
- true ->
- Job;
- false ->
- erlang:error({invalid_shard_split_state, State, Job})
- end.
-
-
-create_artificial_mem3_rep_checkpoints(#job{} = Job, Seq) ->
- #job{source = Source = #shard{name = SourceName}, target = Targets} = Job,
- check_source_exists(Source, initial_copy),
- TNames = [TN || #shard{name = TN} <- Targets],
- Timestamp = list_to_binary(mem3_util:iso8601_timestamp()),
- couch_util:with_db(SourceName, fun(SDb) ->
- [couch_util:with_db(TName, fun(TDb) ->
- Doc = mem3_rep_checkpoint_doc(SDb, TDb, Timestamp, Seq),
- {ok, _} = couch_db:update_doc(SDb, Doc, []),
- {ok, _} = couch_db:update_doc(TDb, Doc, []),
- ok
- end) || TName <- TNames]
- end),
- ok.
-
-
-mem3_rep_checkpoint_doc(SourceDb, TargetDb, Timestamp, Seq) ->
- Node = atom_to_binary(node(), utf8),
- SourceUUID = couch_db:get_uuid(SourceDb),
- TargetUUID = couch_db:get_uuid(TargetDb),
- History = {[
- {<<"source_node">>, Node},
- {<<"source_uuid">>, SourceUUID},
- {<<"source_seq">>, Seq},
- {<<"timestamp">>, Timestamp},
- {<<"target_node">>, Node},
- {<<"target_uuid">>, TargetUUID},
- {<<"target_seq">>, Seq}
- ]},
- Body = {[
- {<<"seq">>, Seq},
- {<<"target_uuid">>, TargetUUID},
- {<<"history">>, {[{Node, [History]}]}}
- ]},
- Id = mem3_rep:make_local_id(SourceUUID, TargetUUID),
- #doc{id = Id, body = Body}.
-
-
-check_source_exists(#shard{name = Name}, StateName) ->
- case couch_server:exists(Name) of
- true ->
- ok;
- false ->
- ErrMsg = "~p source ~p is unexpectedly missing in ~p",
- couch_log:error(ErrMsg, [?MODULE, Name, StateName]),
- exit({error, missing_source})
- end.
-
-
-check_targets_exist(Targets, StateName) ->
- lists:foreach(fun(#shard{name = Name}) ->
- case couch_server:exists(Name) of
- true ->
- ok;
- false ->
- ErrMsg = "~p target ~p is unexpectedly missing in ~p",
- couch_log:error(ErrMsg, [?MODULE, Name, StateName]),
- exit({error, missing_target})
- end
- end, Targets).
-
-
--spec max_retries() -> integer().
-max_retries() ->
- config:get_integer("reshard", "max_retries", 1).
-
-
--spec retry_interval_sec() -> integer().
-retry_interval_sec() ->
- config:get_integer("reshard", "retry_interval_sec", 10).
-
-
--spec update_shard_map_timeout_sec() -> integer().
-update_shard_map_timeout_sec() ->
- config:get_integer("reshard", "update_shardmap_timeout_sec", 60).
-
-
--spec info_update(atom(), any(), [tuple()]) -> [tuple()].
-info_update(Key, Val, StateInfo) ->
- lists:keystore(Key, 1, StateInfo, {Key, Val}).
-
-
--spec info_delete(atom(), [tuple()]) -> [tuple()].
-info_delete(Key, StateInfo) ->
- lists:keydelete(Key, 1, StateInfo).
-
-
--spec shardsstr(#shard{}, #shard{} | [#shard{}]) -> string().
-shardsstr(#shard{name = SourceName}, #shard{name = TargetName}) ->
- lists:flatten(io_lib:format("~s -> ~s", [SourceName, TargetName]));
-
-shardsstr(#shard{name = SourceName}, Targets) ->
- TNames = [TN || #shard{name = TN} <- Targets],
- TargetsStr = string:join([binary_to_list(T) || T <- TNames], ","),
- lists:flatten(io_lib:format("~s -> ~s", [SourceName, TargetsStr])).
-
-
--spec reset_target(#job{}) -> #job{}.
-reset_target(#job{source = Source, target = Targets} = Job) ->
- ShardNames = try
- [N || #shard{name = N} <- mem3:local_shards(mem3:dbname(Source))]
- catch
- error:database_does_not_exist ->
- []
- end,
- lists:map(fun(#shard{name = Name}) ->
- case {couch_server:exists(Name), lists:member(Name, ShardNames)} of
- {_, true} ->
- % Should never get here but if we do crash and don't continue
- LogMsg = "~p : ~p target unexpectedly found in shard map ~p",
- couch_log:error(LogMsg, [?MODULE, jobfmt(Job), Name]),
- erlang:error({target_present_in_shard_map, Name});
- {true, false} ->
- LogMsg = "~p : ~p resetting ~p target",
- couch_log:warning(LogMsg, [?MODULE, jobfmt(Job), Name]),
- couch_db_split:cleanup_target(Source#shard.name, Name);
- {false, false} ->
- ok
- end
- end, Targets),
- Job.
-
-
--spec update_split_history(#job{}) -> #job{}.
-update_split_history(#job{split_state = St, update_time = Ts} = Job) ->
- Hist = Job#job.history,
- JobSt = case St of
- completed -> completed;
- failed -> failed;
- new -> new;
- stopped -> stopped;
- _ -> running
- end,
- Job#job{history = mem3_reshard:update_history(JobSt, St, Ts, Hist)}.
diff --git a/src/mem3/src/mem3_reshard_job_sup.erl b/src/mem3/src/mem3_reshard_job_sup.erl
deleted file mode 100644
index 3f1b3bfb4..000000000
--- a/src/mem3/src/mem3_reshard_job_sup.erl
+++ /dev/null
@@ -1,55 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_job_sup).
-
--behaviour(supervisor).
-
--export([
- start_link/0,
- start_child/1,
- terminate_child/1,
- count_children/0,
- init/1
-]).
-
-
--include("mem3_reshard.hrl").
-
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-
-start_child(Job) ->
- supervisor:start_child(?MODULE, [Job]).
-
-
-terminate_child(Pid) ->
- supervisor:terminate_child(?MODULE, Pid).
-
-
-count_children() ->
- Props = supervisor:count_children(?MODULE),
- proplists:get_value(active, Props).
-
-
-init(_Args) ->
- Children = [
- {mem3_reshard_job,
- {mem3_reshard_job, start_link, []},
- temporary,
- 60000,
- worker,
- [mem3_reshard_job]}
- ],
- {ok, {{simple_one_for_one, 10, 3}, Children}}.
diff --git a/src/mem3/src/mem3_reshard_store.erl b/src/mem3/src/mem3_reshard_store.erl
deleted file mode 100644
index c3534b374..000000000
--- a/src/mem3/src/mem3_reshard_store.erl
+++ /dev/null
@@ -1,286 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_store).
-
-
--export([
- init/3,
-
- store_job/2,
- load_job/2,
- delete_job/2,
- get_jobs/1,
-
- store_state/1,
- load_state/2,
- delete_state/1, % for debugging
-
- job_to_ejson_props/2,
- state_to_ejson_props/1
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include("mem3_reshard.hrl").
-
-
--spec init(#state{}, binary(), binary()) -> #state{}.
-init(#state{} = State, JobPrefix, StateDocId) ->
- State#state{
- job_prefix = <<?LOCAL_DOC_PREFIX, JobPrefix/binary>>,
- state_id = <<?LOCAL_DOC_PREFIX, StateDocId/binary>>
- }.
-
-
--spec store_job(#state{}, #job{}) -> ok.
-store_job(#state{job_prefix = Prefix}, #job{id = Id} = Job) ->
- with_shards_db(fun(Db) ->
- DocId = <<Prefix/binary, Id/binary>>,
- ok = update_doc(Db, DocId, job_to_ejson_props(Job))
- end).
-
-
--spec load_job(#state{}, binary()) -> {ok, {[_]}} | not_found.
-load_job(#state{job_prefix = Prefix}, Id) ->
- with_shards_db(fun(Db) ->
- case load_doc(Db, <<Prefix/binary, Id/binary>>) of
- {ok, DocBody} ->
- {ok, job_from_ejson(DocBody)};
- not_found ->
- not_found
- end
- end).
-
-
--spec delete_job(#state{}, binary()) -> ok.
-delete_job(#state{job_prefix = Prefix}, Id) ->
- with_shards_db(fun(Db) ->
- DocId = <<Prefix/binary, Id/binary>>,
- ok = delete_doc(Db, DocId)
- end).
-
-
--spec get_jobs(#state{}) -> [#job{}].
-get_jobs(#state{job_prefix = Prefix}) ->
- with_shards_db(fun(Db) ->
- PrefixLen = byte_size(Prefix),
- FoldFun = fun(#doc{id = Id, body = Body}, Acc) ->
- case Id of
- <<Prefix:PrefixLen/binary, _/binary>> ->
- {ok, [job_from_ejson(Body) | Acc]};
- _ ->
- {stop, Acc}
- end
- end,
- Opts = [{start_key, Prefix}],
- {ok, Jobs} = couch_db:fold_local_docs(Db, FoldFun, [], Opts),
- lists:reverse(Jobs)
- end).
-
-
--spec store_state(#state{}) -> ok.
-store_state(#state{state_id = DocId} = State) ->
- with_shards_db(fun(Db) ->
- ok = update_doc(Db, DocId, state_to_ejson_props(State))
- end).
-
-
--spec load_state(#state{}, atom()) -> #state{}.
-load_state(#state{state_id = DocId} = State, Default) ->
- with_shards_db(fun(Db) ->
- case load_doc(Db, DocId) of
- {ok, DocBody} ->
- state_from_ejson(State, DocBody);
- not_found ->
- State#state{state = Default}
- end
- end).
-
-
--spec delete_state(#state{}) -> ok.
-delete_state(#state{state_id = DocId}) ->
- with_shards_db(fun(Db) ->
- ok = delete_doc(Db, DocId)
- end).
-
-
-job_to_ejson_props(#job{source = Source, target = Targets} = Job, Opts) ->
- Iso8601 = proplists:get_value(iso8601, Opts),
- History = history_to_ejson(Job#job.history, Iso8601),
- StartTime = case Iso8601 of
- true -> iso8601(Job#job.start_time);
- _ -> Job#job.start_time
- end,
- UpdateTime = case Iso8601 of
- true -> iso8601(Job#job.update_time);
- _ -> Job#job.update_time
- end,
- [
- {id, Job#job.id},
- {type, Job#job.type},
- {source, Source#shard.name},
- {target, [T#shard.name || T <- Targets]},
- {job_state, Job#job.job_state},
- {split_state, Job#job.split_state},
- {state_info, state_info_to_ejson(Job#job.state_info)},
- {node, atom_to_binary(Job#job.node, utf8)},
- {start_time, StartTime},
- {update_time, UpdateTime},
- {history, History}
- ].
-
-
-state_to_ejson_props(#state{} = State) ->
- [
- {state, atom_to_binary(State#state.state, utf8)},
- {state_info, state_info_to_ejson(State#state.state_info)},
- {update_time, State#state.update_time},
- {node, atom_to_binary(State#state.node, utf8)}
- ].
-
-
-% Private API
-
-with_shards_db(Fun) ->
- DbName = config:get("mem3", "shards_db", "_dbs"),
- case mem3_util:ensure_exists(DbName) of
- {ok, Db} ->
- try
- Fun(Db)
- after
- catch couch_db:close(Db)
- end;
- Else ->
- throw(Else)
- end.
-
-
-delete_doc(Db, DocId) ->
- case couch_db:open_doc(Db, DocId, []) of
- {ok, #doc{revs = {_, Revs}}} ->
- {ok, _} = couch_db:delete_doc(Db, DocId, Revs),
- ok;
- {not_found, _} ->
- ok
- end.
-
-
-update_doc(Db, DocId, Body) ->
- DocProps = [{<<"_id">>, DocId}] ++ Body,
- Body1 = ?JSON_DECODE(?JSON_ENCODE({DocProps})),
- BaseDoc = couch_doc:from_json_obj(Body1),
- Doc = case couch_db:open_doc(Db, DocId, []) of
- {ok, #doc{revs = Revs}} ->
- BaseDoc#doc{revs = Revs};
- {not_found, _} ->
- BaseDoc
- end,
- case store_state() of
- true ->
- {ok, _} = couch_db:update_doc(Db, Doc, []),
- couch_log:debug("~p updated doc ~p ~p", [?MODULE, DocId, Body]),
- ok;
- false ->
- couch_log:debug("~p not storing state in ~p", [?MODULE, DocId]),
- ok
- end.
-
-
-load_doc(Db, DocId) ->
- case couch_db:open_doc(Db, DocId, [ejson_body]) of
- {ok, #doc{body = Body}} ->
- couch_log:debug("~p loaded doc ~p ~p", [?MODULE, DocId, Body]),
- {ok, Body};
- {not_found, _} ->
- not_found
- end.
-
-
-job_to_ejson_props(#job{} = Job) ->
- job_to_ejson_props(Job, []).
-
-
-job_from_ejson({Props}) ->
- Id = couch_util:get_value(<<"id">>, Props),
- Type = couch_util:get_value(<<"type">>, Props),
- Source = couch_util:get_value(<<"source">>, Props),
- Target = couch_util:get_value(<<"target">>, Props),
- JobState = couch_util:get_value(<<"job_state">>, Props),
- SplitState = couch_util:get_value(<<"split_state">>, Props),
- StateInfo = couch_util:get_value(<<"state_info">>, Props),
- TStarted = couch_util:get_value(<<"start_time">>, Props),
- TUpdated = couch_util:get_value(<<"update_time">>, Props),
- History = couch_util:get_value(<<"history">>, Props),
- #job{
- id = Id,
- type = binary_to_atom(Type, utf8),
- job_state = binary_to_atom(JobState, utf8),
- split_state = binary_to_atom(SplitState, utf8),
- state_info = state_info_from_ejson(StateInfo),
- node = node(),
- start_time = TStarted,
- update_time = TUpdated,
- source = mem3_reshard:shard_from_name(Source),
- target = [mem3_reshard:shard_from_name(T) || T <- Target],
- history = history_from_ejson(History)
- }.
-
-
-state_from_ejson(#state{} = State, {Props}) ->
- StateVal = couch_util:get_value(<<"state">>, Props),
- StateInfo = couch_util:get_value(<<"state_info">>, Props),
- TUpdated = couch_util:get_value(<<"update_time">>, Props),
- State#state{
- state = binary_to_atom(StateVal, utf8),
- state_info = state_info_from_ejson(StateInfo),
- node = node(),
- update_time = TUpdated
- }.
-
-
-state_info_from_ejson({Props}) ->
- Props1 = [{binary_to_atom(K, utf8), couch_util:to_binary(V)}
- || {K, V} <- Props],
- lists:sort(Props1).
-
-
-history_to_ejson(Hist, true) when is_list(Hist) ->
- [{[{timestamp, iso8601(T)}, {type, S}, {detail, D}]} || {T, S, D} <- Hist];
-
-history_to_ejson(Hist, _) when is_list(Hist) ->
- [{[{timestamp, T}, {type, S}, {detail, D}]} || {T, S, D} <- Hist].
-
-
-history_from_ejson(HistoryEJson) when is_list(HistoryEJson) ->
- lists:map(fun({EventProps}) ->
- Timestamp = couch_util:get_value(<<"timestamp">>, EventProps),
- State = couch_util:get_value(<<"type">>, EventProps),
- Detail = couch_util:get_value(<<"detail">>, EventProps),
- {Timestamp, binary_to_atom(State, utf8), Detail}
- end, HistoryEJson).
-
-
-state_info_to_ejson(Props) ->
- {lists:sort([{K, couch_util:to_binary(V)} || {K, V} <- Props])}.
-
-
-store_state() ->
- config:get_boolean("reshard", "store_state", true).
-
-
-iso8601(UnixSec) ->
- Mega = UnixSec div 1000000,
- Sec = UnixSec rem 1000000,
- {{Y, M, D}, {H, Min, S}} = calendar:now_to_universal_time({Mega, Sec, 0}),
- Format = "~B-~2..0B-~2..0BT~2..0B:~2..0B:~2..0BZ",
- iolist_to_binary(io_lib:format(Format, [Y, M, D, H, Min, S])).
diff --git a/src/mem3/src/mem3_reshard_sup.erl b/src/mem3/src/mem3_reshard_sup.erl
deleted file mode 100644
index 6349a4041..000000000
--- a/src/mem3/src/mem3_reshard_sup.erl
+++ /dev/null
@@ -1,47 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_sup).
-
--behaviour(supervisor).
-
--export([
- start_link/0,
- init/1
-]).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-
-init(_Args) ->
- Children = [
- {mem3_reshard_dbdoc,
- {mem3_reshard_dbdoc, start_link, []},
- permanent,
- infinity,
- worker,
- [mem3_reshard_dbdoc]},
- {mem3_reshard_job_sup,
- {mem3_reshard_job_sup, start_link, []},
- permanent,
- infinity,
- supervisor,
- [mem3_reshard_job_sup]},
- {mem3_reshard,
- {mem3_reshard, start_link, []},
- permanent,
- brutal_kill,
- worker,
- [mem3_reshard]}
- ],
- {ok, {{one_for_all, 5, 5}, Children}}.
diff --git a/src/mem3/src/mem3_reshard_validate.erl b/src/mem3/src/mem3_reshard_validate.erl
deleted file mode 100644
index aa8df3e16..000000000
--- a/src/mem3/src/mem3_reshard_validate.erl
+++ /dev/null
@@ -1,126 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_validate).
-
--export([
- start_args/2,
- source/1,
- targets/2
-]).
-
--include_lib("mem3/include/mem3.hrl").
-
-
--spec start_args(#shard{}, any()) -> ok | {error, term()}.
-start_args(Source, Split) ->
- first_error([
- check_split(Split),
- check_range(Source, Split),
- check_node(Source),
- source(Source),
- check_shard_map(Source)
- ]).
-
-
--spec source(#shard{}) -> ok | {error, term()}.
-source(#shard{name = Name}) ->
- case couch_server:exists(Name) of
- true ->
- ok;
- false ->
- {error, {source_shard_not_found, Name}}
- end.
-
-
--spec check_shard_map(#shard{}) -> ok | {error, term()}.
-check_shard_map(#shard{name = Name}) ->
- DbName = mem3:dbname(Name),
- AllShards = mem3:shards(DbName),
- case mem3_util:calculate_max_n(AllShards) of
- N when is_integer(N), N >= 1 ->
- ok;
- N when is_integer(N), N < 1 ->
- {error, {not_enough_shard_copies, DbName}}
- end.
-
-
--spec targets(#shard{}, [#shard{}]) -> ok | {error, term()}.
-targets(#shard{} = Source, Targets) ->
- first_error([
- target_ranges(Source, Targets)
- ]).
-
-
--spec check_split(any()) -> ok | {error, term()}.
-check_split(Split) when is_integer(Split), Split > 1 ->
- ok;
-check_split(Split) ->
- {error, {invalid_split_parameter, Split}}.
-
-
--spec check_range(#shard{}, any()) -> ok | {error, term()}.
-check_range(#shard{range = Range = [B, E]}, Split) ->
- case (E + 1 - B) >= Split of
- true ->
- ok;
- false ->
- {error, {shard_range_cannot_be_split, Range, Split}}
- end.
-
-
--spec check_node(#shard{}) -> ok | {error, term()}.
-check_node(#shard{node = undefined}) ->
- ok;
-
-check_node(#shard{node = Node}) when Node =:= node() ->
- ok;
-
-check_node(#shard{node = Node}) ->
- {error, {source_shard_node_is_not_current_node, Node}}.
-
-
--spec target_ranges(#shard{}, [#shard{}]) -> ok | {error, any()}.
-target_ranges(#shard{range = [Begin, End]}, Targets) ->
- Ranges = [R || #shard{range = R} <- Targets],
- SortFun = fun([B1, _], [B2, _]) -> B1 =< B2 end,
- [First | RestRanges] = lists:sort(SortFun, Ranges),
- try
- TotalRange = lists:foldl(fun([B2, E2], [B1, E1]) ->
- case B2 =:= E1 + 1 of
- true ->
- ok;
- false ->
- throw({range_error, {B2, E1}})
- end,
- [B1, E2]
- end, First, RestRanges),
- case [Begin, End] =:= TotalRange of
- true ->
- ok;
- false ->
- throw({range_error, {[Begin, End], TotalRange}})
- end
- catch
- throw:{range_error, Error} ->
- {error, {shard_range_error, Error}}
- end.
-
-
--spec first_error([ok | {error, term()}]) -> ok | {error, term()}.
-first_error(Results) ->
- case [Res || Res <- Results, Res =/= ok] of
- [] ->
- ok;
- [FirstError | _] ->
- FirstError
- end.
diff --git a/src/mem3/src/mem3_rpc.erl b/src/mem3/src/mem3_rpc.erl
deleted file mode 100644
index 5d1c62c06..000000000
--- a/src/mem3/src/mem3_rpc.erl
+++ /dev/null
@@ -1,711 +0,0 @@
-% Copyright 2013 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_rpc).
-
-
--export([
- find_common_seq/4,
- get_missing_revs/4,
- update_docs/4,
- pull_replication/1,
- load_checkpoint/4,
- load_checkpoint/5,
- save_checkpoint/6,
-
- load_purge_infos/4,
- save_purge_checkpoint/4,
- purge_docs/4,
-
- replicate/4
-]).
-
-% Private RPC callbacks
--export([
- find_common_seq_rpc/3,
- load_checkpoint_rpc/3,
- pull_replication_rpc/1,
- load_checkpoint_rpc/4,
- save_checkpoint_rpc/5,
-
- load_purge_infos_rpc/3,
- save_purge_checkpoint_rpc/3,
-
- replicate_rpc/2
-
-]).
-
-
--include("mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--define(BATCH_SIZE, 1000).
--define(REXI_CALL_TIMEOUT_MSEC, 600000).
-
-
-% "Pull" is a bit of a misnomer here, as what we're actually doing is
-% issuing an RPC request and telling the remote node to push updates to
-% us. This lets us reuse all of the battle-tested machinery of mem3_rpc.
-pull_replication(Seed) ->
- rexi_call(Seed, {mem3_rpc, pull_replication_rpc, [node()]}).
-
-get_missing_revs(Node, DbName, IdsRevs, Options) ->
- rexi_call(Node, {fabric_rpc, get_missing_revs, [DbName, IdsRevs, Options]}).
-
-
-update_docs(Node, DbName, Docs, Options) ->
- rexi_call(Node, {fabric_rpc, update_docs, [DbName, Docs, Options]}).
-
-load_checkpoint(Node, DbName, SourceNode, SourceUUID, <<>>) ->
- % Upgrade clause for a mixed cluster for old nodes that don't have
- % load_checkpoint_rpc/4 yet. FilterHash is currently not
- % used and so defaults to <<>> everywhere
- load_checkpoint(Node, DbName, SourceNode, SourceUUID);
-load_checkpoint(Node, DbName, SourceNode, SourceUUID, FilterHash) ->
- Args = [DbName, SourceNode, SourceUUID, FilterHash],
- rexi_call(Node, {mem3_rpc, load_checkpoint_rpc, Args}).
-
-
-load_checkpoint(Node, DbName, SourceNode, SourceUUID) ->
- Args = [DbName, SourceNode, SourceUUID],
- rexi_call(Node, {mem3_rpc, load_checkpoint_rpc, Args}).
-
-
-save_checkpoint(Node, DbName, DocId, Seq, Entry, History) ->
- Args = [DbName, DocId, Seq, Entry, History],
- rexi_call(Node, {mem3_rpc, save_checkpoint_rpc, Args}).
-
-
-find_common_seq(Node, DbName, SourceUUID, SourceEpochs) ->
- Args = [DbName, SourceUUID, SourceEpochs],
- rexi_call(Node, {mem3_rpc, find_common_seq_rpc, Args}).
-
-
-load_purge_infos(Node, DbName, SourceUUID, Count) ->
- Args = [DbName, SourceUUID, Count],
- rexi_call(Node, {mem3_rpc, load_purge_infos_rpc, Args}).
-
-
-save_purge_checkpoint(Node, DbName, PurgeDocId, Body) ->
- Args = [DbName, PurgeDocId, Body],
- rexi_call(Node, {mem3_rpc, save_purge_checkpoint_rpc, Args}).
-
-
-purge_docs(Node, DbName, PurgeInfos, Options) ->
- rexi_call(Node, {fabric_rpc, purge_docs, [DbName, PurgeInfos, Options]}).
-
-
-replicate(Source, Target, DbName, Timeout)
- when is_atom(Source), is_atom(Target), is_binary(DbName) ->
- Args = [DbName, Target],
- rexi_call(Source, {mem3_rpc, replicate_rpc, Args}, Timeout).
-
-
-load_checkpoint_rpc(DbName, SourceNode, SourceUUID) ->
- load_checkpoint_rpc(DbName, SourceNode, SourceUUID, <<>>).
-
-
-load_checkpoint_rpc(DbName, SourceNode, SourceUUID, FilterHash) ->
- erlang:put(io_priority, {internal_repl, DbName}),
- case get_or_create_db(DbName, [?ADMIN_CTX]) of
- {ok, Db} ->
- TargetUUID = couch_db:get_uuid(Db),
- NewId = mem3_rep:make_local_id(SourceUUID, TargetUUID, FilterHash),
- case couch_db:open_doc(Db, NewId, []) of
- {ok, Doc} ->
- rexi:reply({ok, {NewId, Doc}});
- {not_found, _} ->
- OldId = mem3_rep:make_local_id(SourceNode, node()),
- case couch_db:open_doc(Db, OldId, []) of
- {ok, Doc} ->
- rexi:reply({ok, {NewId, Doc}});
- {not_found, _} ->
- rexi:reply({ok, {NewId, #doc{id = NewId}}})
- end
- end;
- Error ->
- rexi:reply(Error)
- end.
-
-
-save_checkpoint_rpc(DbName, Id, SourceSeq, NewEntry0, History0) ->
- erlang:put(io_priority, {internal_repl, DbName}),
- case get_or_create_db(DbName, [?ADMIN_CTX]) of
- {ok, Db} ->
- NewEntry = {[
- {<<"target_node">>, atom_to_binary(node(), utf8)},
- {<<"target_uuid">>, couch_db:get_uuid(Db)},
- {<<"target_seq">>, couch_db:get_update_seq(Db)}
- ] ++ NewEntry0},
- Body = {[
- {<<"seq">>, SourceSeq},
- {<<"target_uuid">>, couch_db:get_uuid(Db)},
- {<<"history">>, add_checkpoint(NewEntry, History0)}
- ]},
- Doc = #doc{id = Id, body = Body},
- rexi:reply(try couch_db:update_doc(Db, Doc, []) of
- {ok, _} ->
- {ok, Body};
- Else ->
- {error, Else}
- catch
- Exception ->
- Exception;
- error:Reason ->
- {error, Reason}
- end);
- Error ->
- rexi:reply(Error)
- end.
-
-find_common_seq_rpc(DbName, SourceUUID, SourceEpochs) ->
- erlang:put(io_priority, {internal_repl, DbName}),
- case get_or_create_db(DbName, [?ADMIN_CTX]) of
- {ok, Db} ->
- case couch_db:get_uuid(Db) of
- SourceUUID ->
- TargetEpochs = couch_db:get_epochs(Db),
- Seq = compare_epochs(SourceEpochs, TargetEpochs),
- rexi:reply({ok, Seq});
- _Else ->
- rexi:reply({ok, 0})
- end;
- Error ->
- rexi:reply(Error)
- end.
-
-pull_replication_rpc(Target) ->
- Dbs = mem3_sync:local_dbs(),
- Opts = [{batch_size, 1000}, {batch_count, 50}],
- Repl = fun(Db) -> {Db, mem3_rep:go(Db, Target, Opts)} end,
- rexi:reply({ok, lists:map(Repl, Dbs)}).
-
-
-load_purge_infos_rpc(DbName, SrcUUID, BatchSize) ->
- erlang:put(io_priority, {internal_repl, DbName}),
- case get_or_create_db(DbName, [?ADMIN_CTX]) of
- {ok, Db} ->
- TgtUUID = couch_db:get_uuid(Db),
- PurgeDocId = mem3_rep:make_purge_id(SrcUUID, TgtUUID),
- StartSeq = case couch_db:open_doc(Db, PurgeDocId, []) of
- {ok, #doc{body = {Props}}} ->
- couch_util:get_value(<<"purge_seq">>, Props);
- {not_found, _} ->
- Oldest = couch_db:get_oldest_purge_seq(Db),
- erlang:max(0, Oldest - 1)
- end,
- FoldFun = fun({PSeq, UUID, Id, Revs}, {Count, Infos, _}) ->
- NewCount = Count + length(Revs),
- NewInfos = [{UUID, Id, Revs} | Infos],
- Status = if NewCount < BatchSize -> ok; true -> stop end,
- {Status, {NewCount, NewInfos, PSeq}}
- end,
- InitAcc = {0, [], StartSeq},
- {ok, {_, PurgeInfos, ThroughSeq}} =
- couch_db:fold_purge_infos(Db, StartSeq, FoldFun, InitAcc),
- PurgeSeq = couch_db:get_purge_seq(Db),
- Remaining = PurgeSeq - ThroughSeq,
- rexi:reply({ok, {PurgeDocId, PurgeInfos, ThroughSeq, Remaining}});
- Else ->
- rexi:reply(Else)
- end.
-
-
-save_purge_checkpoint_rpc(DbName, PurgeDocId, Body) ->
- erlang:put(io_priority, {internal_repl, DbName}),
- case get_or_create_db(DbName, [?ADMIN_CTX]) of
- {ok, Db} ->
- Doc = #doc{id = PurgeDocId, body = Body},
- Resp = try couch_db:update_doc(Db, Doc, []) of
- Resp0 -> Resp0
- catch T:R ->
- {T, R}
- end,
- rexi:reply(Resp);
- Error ->
- rexi:reply(Error)
- end.
-
-
-replicate_rpc(DbName, Target) ->
- rexi:reply(try
- Opts = [{batch_size, ?BATCH_SIZE}, {batch_count, all}],
- {ok, mem3_rep:go(DbName, Target, Opts)}
- catch
- Tag:Error ->
- {Tag, Error}
- end).
-
-
-%% @doc Return the sequence where two files with the same UUID diverged.
-compare_epochs(SourceEpochs, TargetEpochs) ->
- compare_rev_epochs(
- lists:reverse(SourceEpochs),
- lists:reverse(TargetEpochs)
- ).
-
-
-compare_rev_epochs([{Node, Seq} | SourceRest], [{Node, Seq} | TargetRest]) ->
- % Common history, fast-forward
- compare_epochs(SourceRest, TargetRest);
-compare_rev_epochs([], [{_, TargetSeq} | _]) ->
- % Source has not moved, start from seq just before the target took over
- TargetSeq - 1;
-compare_rev_epochs([{_, SourceSeq} | _], []) ->
- % Target has not moved, start from seq where source diverged
- SourceSeq;
-compare_rev_epochs([{_, SourceSeq} | _], [{_, TargetSeq} | _]) ->
- % The source was moved to a new location independently, take the minimum
- erlang:min(SourceSeq, TargetSeq) - 1.
-
-
-%% @doc This adds a new update sequence checkpoint to the replication
-%% history. Checkpoints are keyed by the source node so that we
-%% aren't mixing history between source shard moves.
-add_checkpoint({Props}, {History}) ->
- % Extract the source and target seqs for reference
- SourceSeq = couch_util:get_value(<<"source_seq">>, Props),
- TargetSeq = couch_util:get_value(<<"target_seq">>, Props),
-
- % Get the history relevant to the source node.
- SourceNode = couch_util:get_value(<<"source_node">>, Props),
- SourceHistory = couch_util:get_value(SourceNode, History, []),
-
- % If either the source or target shard has been truncated
- % we need to filter out any history that was stored for
- % any larger update seq than we're currently recording.
- FilteredHistory = filter_history(SourceSeq, TargetSeq, SourceHistory),
-
- % Re-bucket our history based on the most recent source
- % sequence. This is where we drop old checkpoints to
- % maintain the exponential distribution.
- {_, RebucketedHistory} = rebucket(FilteredHistory, SourceSeq, 0),
- NewSourceHistory = [{Props} | RebucketedHistory],
-
- % Finally update the source node history and we're done.
- NodeRemoved = lists:keydelete(SourceNode, 1, History),
- {[{SourceNode, NewSourceHistory} | NodeRemoved]}.
-
-
-filter_history(SourceSeqThresh, TargetSeqThresh, History) ->
- SourceFilter = fun({Entry}) ->
- SourceSeq = couch_util:get_value(<<"source_seq">>, Entry),
- SourceSeq < SourceSeqThresh
- end,
- TargetFilter = fun({Entry}) ->
- TargetSeq = couch_util:get_value(<<"target_seq">>, Entry),
- TargetSeq < TargetSeqThresh
- end,
- SourceFiltered = lists:filter(SourceFilter, History),
- lists:filter(TargetFilter, SourceFiltered).
-
-
-%% @doc This function adjusts our history to maintain a
-%% history of checkpoints that follow an exponentially
-%% increasing age from the most recent checkpoint.
-%%
-%% The terms newest and oldest used in these comments
-%% refers to the (NewSeq - CurSeq) difference where smaller
-%% values are considered newer.
-%%
-%% It works by assigning each entry to a bucket and keeping
-%% the newest and oldest entry in each bucket. Keeping
-%% both the newest and oldest means that we won't end up
-%% with empty buckets as checkpoints are promoted to new
-%% buckets.
-%%
-%% The return value of this function is a two-tuple of the
-%% form `{BucketId, History}` where BucketId is the id of
-%% the bucket for the first entry in History. This is used
-%% when recursing to detect the oldest value in a given
-%% bucket.
-%%
-%% This function expects the provided history to be sorted
-%% in descending order of source_seq values.
-rebucket([], _NewSeq, Bucket) ->
- {Bucket+1, []};
-rebucket([{Entry} | RestHistory], NewSeq, Bucket) ->
- CurSeq = couch_util:get_value(<<"source_seq">>, Entry),
- case find_bucket(NewSeq, CurSeq, Bucket) of
- Bucket ->
- % This entry is in an existing bucket which means
- % we will only keep it if its the oldest value
- % in the bucket. To detect this we rebucket the
- % rest of the list and only include Entry if the
- % rest of the list is in a bigger bucket.
- case rebucket(RestHistory, NewSeq, Bucket) of
- {Bucket, NewHistory} ->
- % There's another entry in this bucket so we drop the
- % current entry.
- {Bucket, NewHistory};
- {NextBucket, NewHistory} when NextBucket > Bucket ->
- % The rest of the history was rebucketed into a larger
- % bucket so this is the oldest entry in the current
- % bucket.
- {Bucket, [{Entry} | NewHistory]}
- end;
- NextBucket when NextBucket > Bucket ->
- % This entry is the newest in NextBucket so we add it
- % to our history and continue rebucketing.
- {_, NewHistory} = rebucket(RestHistory, NewSeq, NextBucket),
- {NextBucket, [{Entry} | NewHistory]}
- end.
-
-
-%% @doc Find the bucket id for the given sequence pair.
-find_bucket(NewSeq, CurSeq, Bucket) ->
- % The +1 constant in this comparison is a bit subtle. The
- % reason for it is to make sure that the first entry in
- % the history is guaranteed to have a BucketId of 1. This
- % also relies on never having a duplicated update
- % sequence so adding 1 here guarantees a difference >= 2.
- if (NewSeq - CurSeq + 1) > (2 bsl Bucket) ->
- find_bucket(NewSeq, CurSeq, Bucket+1);
- true ->
- Bucket
- end.
-
-
-rexi_call(Node, MFA) ->
- rexi_call(Node, MFA, ?REXI_CALL_TIMEOUT_MSEC).
-
-
-rexi_call(Node, MFA, Timeout) ->
- Mon = rexi_monitor:start([rexi_utils:server_pid(Node)]),
- Ref = rexi:cast(Node, self(), MFA, [sync]),
- try
- receive {Ref, {ok, Reply}} ->
- Reply;
- {Ref, Error} ->
- erlang:error(Error);
- {rexi_DOWN, Mon, _, Reason} ->
- erlang:error({rexi_DOWN, {Node, Reason}})
- after Timeout ->
- erlang:error(timeout)
- end
- after
- rexi_monitor:stop(Mon)
- end.
-
-
-get_or_create_db(DbName, Options) ->
- mem3_util:get_or_create_db(DbName, Options).
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-
--define(SNODE, <<"src@localhost">>).
--define(TNODE, <<"tgt@localhost">>).
--define(SNODE_KV, {<<"source_node">>, ?SNODE}).
--define(TNODE_KV, {<<"target_node">>, ?TNODE}).
--define(SSEQ, <<"source_seq">>).
--define(TSEQ, <<"target_seq">>).
--define(ENTRY(S, T), {[?SNODE_KV, {?SSEQ, S}, ?TNODE_KV, {?TSEQ, T}]}).
-
-
-filter_history_data() ->
- [
- ?ENTRY(13, 15),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ].
-
-
-filter_history_remove_none_test() ->
- ?assertEqual(filter_history(20, 20, filter_history_data()), [
- ?ENTRY(13, 15),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]).
-
-
-filter_history_remove_all_test() ->
- ?assertEqual(filter_history(1, 1, filter_history_data()), []).
-
-
-filter_history_remove_equal_test() ->
- ?assertEqual(filter_history(10, 10, filter_history_data()), [
- ?ENTRY(2, 3)
- ]),
- ?assertEqual(filter_history(11, 9, filter_history_data()), [
- ?ENTRY(2, 3)
- ]).
-
-
-filter_history_remove_for_source_and_target_test() ->
- ?assertEqual(filter_history(11, 20, filter_history_data()), [
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]),
- ?assertEqual(filter_history(14, 14, filter_history_data()), [
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]).
-
-
-filter_history_remove_for_both_test() ->
- ?assertEqual(filter_history(11, 11, filter_history_data()), [
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]).
-
-
-filter_history_remove_for_both_again_test() ->
- ?assertEqual(filter_history(3, 4, filter_history_data()), [
- ?ENTRY(2, 3)
- ]).
-
-
-add_first_checkpoint_test() ->
- History = {[]},
- ?assertEqual(add_checkpoint(?ENTRY(2, 3), History), {[
- {?SNODE, [
- ?ENTRY(2, 3)
- ]}
- ]}).
-
-
-add_first_checkpoint_to_empty_test() ->
- History = {[{?SNODE, []}]},
- ?assertEqual(add_checkpoint(?ENTRY(2, 3), History), {[
- {?SNODE, [
- ?ENTRY(2, 3)
- ]}
- ]}).
-
-
-add_second_checkpoint_test() ->
- History = {[{?SNODE, [?ENTRY(2, 3)]}]},
- ?assertEqual(add_checkpoint(?ENTRY(10, 9), History), {[
- {?SNODE, [
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}
- ]}).
-
-
-add_third_checkpoint_test() ->
- History = {[{?SNODE, [
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}]},
- ?assertEqual(add_checkpoint(?ENTRY(11, 10), History), {[
- {?SNODE, [
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}
- ]}).
-
-
-add_fourth_checkpoint_test() ->
- History = {[{?SNODE, [
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}]},
- ?assertEqual(add_checkpoint(?ENTRY(12, 13), History), {[
- {?SNODE, [
- ?ENTRY(12, 13),
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}
- ]}).
-
-
-add_checkpoint_with_replacement_test() ->
- History = {[{?SNODE, [
- ?ENTRY(12, 13),
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}]},
- % Picking a source_seq of 16 to force 10, 11, and 12
- % into the same bucket to show we drop the 11 entry.
- ?assertEqual(add_checkpoint(?ENTRY(16, 16), History), {[
- {?SNODE, [
- ?ENTRY(16, 16),
- ?ENTRY(12, 13),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}
- ]}).
-
-add_checkpoint_drops_redundant_checkpoints_test() ->
- % I've added comments showing the bucket ID based
- % on the ?ENTRY passed to add_checkpoint
- History = {[{?SNODE, [
- ?ENTRY(15, 15), % Bucket 0
- ?ENTRY(14, 14), % Bucket 1
- ?ENTRY(13, 13), % Bucket 1
- ?ENTRY(12, 12), % Bucket 2
- ?ENTRY(11, 11), % Bucket 2
- ?ENTRY(10, 10), % Bucket 2
- ?ENTRY(9, 9), % Bucket 2
- ?ENTRY(8, 8), % Bucket 3
- ?ENTRY(7, 7), % Bucket 3
- ?ENTRY(6, 6), % Bucket 3
- ?ENTRY(5, 5), % Bucket 3
- ?ENTRY(4, 4), % Bucket 3
- ?ENTRY(3, 3), % Bucket 3
- ?ENTRY(2, 2), % Bucket 3
- ?ENTRY(1, 1) % Bucket 3
- ]}]},
- ?assertEqual(add_checkpoint(?ENTRY(16, 16), History), {[
- {?SNODE, [
- ?ENTRY(16, 16), % Bucket 0
- ?ENTRY(15, 15), % Bucket 0
- ?ENTRY(14, 14), % Bucket 1
- ?ENTRY(13, 13), % Bucket 1
- ?ENTRY(12, 12), % Bucket 2
- ?ENTRY(9, 9), % Bucket 2
- ?ENTRY(8, 8), % Bucket 3
- ?ENTRY(1, 1) % Bucket 3
- ]}
- ]}).
-
-
-add_checkpoint_show_not_always_a_drop_test() ->
- % Depending on the edge conditions of buckets we
- % may not always drop values when adding new
- % checkpoints. In this case 12 stays because there's
- % no longer a value for 10 or 11.
- %
- % I've added comments showing the bucket ID based
- % on the ?ENTRY passed to add_checkpoint
- History = {[{?SNODE, [
- ?ENTRY(16, 16), % Bucket 0
- ?ENTRY(15, 15), % Bucket 1
- ?ENTRY(14, 14), % Bucket 1
- ?ENTRY(13, 13), % Bucket 2
- ?ENTRY(12, 12), % Bucket 2
- ?ENTRY(9, 9), % Bucket 3
- ?ENTRY(8, 8), % Bucket 3
- ?ENTRY(1, 1) % Bucket 4
- ]}]},
- ?assertEqual(add_checkpoint(?ENTRY(17, 17), History), {[
- {?SNODE, [
- ?ENTRY(17, 17), % Bucket 0
- ?ENTRY(16, 16), % Bucket 0
- ?ENTRY(15, 15), % Bucket 1
- ?ENTRY(14, 14), % Bucket 1
- ?ENTRY(13, 13), % Bucket 2
- ?ENTRY(12, 12), % Bucket 2
- ?ENTRY(9, 9), % Bucket 3
- ?ENTRY(8, 8), % Bucket 3
- ?ENTRY(1, 1) % Bucket 4
- ]}
- ]}).
-
-
-add_checkpoint_big_jump_show_lots_drop_test() ->
- % I've added comments showing the bucket ID based
- % on the ?ENTRY passed to add_checkpoint
- History = {[{?SNODE, [
- ?ENTRY(16, 16), % Bucket 4
- ?ENTRY(15, 15), % Bucket 4
- ?ENTRY(14, 14), % Bucket 4
- ?ENTRY(13, 13), % Bucket 4
- ?ENTRY(12, 12), % Bucket 4
- ?ENTRY(9, 9), % Bucket 4
- ?ENTRY(8, 8), % Bucket 4
- ?ENTRY(1, 1) % Bucket 4
- ]}]},
- ?assertEqual(add_checkpoint(?ENTRY(32, 32), History), {[
- {?SNODE, [
- ?ENTRY(32, 32), % Bucket 0
- ?ENTRY(16, 16), % Bucket 4
- ?ENTRY(1, 1) % Bucket 4
- ]}
- ]}).
-
-
-add_checkpoint_show_filter_history_test() ->
- History = {[{?SNODE, [
- ?ENTRY(16, 16),
- ?ENTRY(15, 15),
- ?ENTRY(14, 14),
- ?ENTRY(13, 13),
- ?ENTRY(12, 12),
- ?ENTRY(9, 9),
- ?ENTRY(8, 8),
- ?ENTRY(1, 1)
- ]}]},
- % Drop for both
- ?assertEqual(add_checkpoint(?ENTRY(10, 10), History), {[
- {?SNODE, [
- ?ENTRY(10, 10),
- ?ENTRY(9, 9),
- ?ENTRY(8, 8),
- ?ENTRY(1, 1)
- ]}
- ]}),
- % Drop four source
- ?assertEqual(add_checkpoint(?ENTRY(10, 200), History), {[
- {?SNODE, [
- ?ENTRY(10, 200),
- ?ENTRY(9, 9),
- ?ENTRY(8, 8),
- ?ENTRY(1, 1)
- ]}
- ]}),
- % Drop for target. Obviously a source_seq of 200
- % will end up droping the 8 entry.
- ?assertEqual(add_checkpoint(?ENTRY(200, 10), History), {[
- {?SNODE, [
- ?ENTRY(200, 10),
- ?ENTRY(9, 9),
- ?ENTRY(1, 1)
- ]}
- ]}).
-
-
-add_checkpoint_from_other_node_test() ->
- History = {[{<<"not_the_source">>, [
- ?ENTRY(12, 13),
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}]},
- % No filtering
- ?assertEqual(add_checkpoint(?ENTRY(1, 1), History), {[
- {?SNODE, [
- ?ENTRY(1, 1)
- ]},
- {<<"not_the_source">>, [
- ?ENTRY(12, 13),
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}
- ]}),
- % No dropping
- ?assertEqual(add_checkpoint(?ENTRY(200, 200), History), {[
- {?SNODE, [
- ?ENTRY(200, 200)
- ]},
- {<<"not_the_source">>, [
- ?ENTRY(12, 13),
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}
- ]}).
-
-
--endif.
diff --git a/src/mem3/src/mem3_seeds.erl b/src/mem3/src/mem3_seeds.erl
deleted file mode 100644
index f1aceb996..000000000
--- a/src/mem3/src/mem3_seeds.erl
+++ /dev/null
@@ -1,162 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_seeds).
--behaviour(gen_server).
-
--export([
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3,
- terminate/2
-]).
-
--export([
- start_link/0,
- get_seeds/0,
- get_status/0
-]).
-
--record(st, {
- ready = false,
- seeds = [],
- jobref = nil,
- status = [] % nested proplist keyed on node name
-}).
-
--define(REPLICATION_INTERVAL, 60000).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-get_seeds() ->
- case config:get("cluster", "seedlist") of
- undefined ->
- [];
- List ->
- Nodes = string:tokens(List, ","),
- Seeds = [list_to_atom(Node) || Node <- Nodes] -- [node()],
- mem3_util:rotate_list(node(), Seeds)
- end.
-
-get_status() ->
- gen_server:call(?MODULE, get_status).
-
-init([]) ->
- Seeds = get_seeds(),
- InitStatus = [{Seed, {[]}} || Seed <- Seeds],
- State = #st{
- seeds = Seeds,
- ready = case Seeds of [] -> true; _ -> false end,
- jobref = start_replication(Seeds),
- status = InitStatus
- },
- {ok, State}.
-
-handle_call(get_status, _From, St) ->
- Status = {[
- {status, case St#st.ready of true -> ok; false -> seeding end},
- {seeds, {St#st.status}}
- ]},
- {reply, {ok, Status}, St}.
-
-handle_cast(_Msg, St) ->
- {noreply, St}.
-
-handle_info(start_replication, #st{jobref=nil} = St) ->
- JobRef = start_replication(St#st.seeds),
- {noreply, St#st{jobref = JobRef}};
-
-handle_info({'DOWN', Ref, _, Pid, Output}, #st{jobref = {Pid, Ref}} = St) ->
- {noreply, update_state(St, Output)};
-
-handle_info(_Msg, St) ->
- {noreply, St}.
-
-terminate(_Reason, _St) ->
- ok.
-
-code_change(_OldVsn, St, _Extra) ->
- {ok, St}.
-
-% internal functions
-
-start_replication([]) ->
- nil;
-start_replication([Seed | _]) ->
- spawn_monitor(fun() ->
- Reply = mem3_rpc:pull_replication(Seed),
- exit({ok, Reply})
- end).
-
-update_state(State, {ok, Data}) ->
- #st{seeds = [Current | Tail], status = Status} = State,
- Report = {[
- {timestamp, list_to_binary(mem3_util:iso8601_timestamp())},
- {last_replication_status, ok},
- format_data(Data)
- ]},
- NewStatus = lists:ukeymerge(1, [{Current, Report}], Status),
- Ready = is_ready(State#st.ready, Data),
- case Ready of
- true ->
- Seeds = Tail ++ [Current],
- Job = nil;
- false ->
- % Try to progress this same seed again
- Seeds = [Current | Tail],
- Job = start_replication([Current | Tail])
- end,
- State#st{
- seeds = Seeds,
- jobref = Job,
- ready = Ready,
- status = NewStatus
- };
-update_state(State, {_Error, _Stack}) ->
- #st{seeds = [Current | Tail], status = Status} = State,
- Report = {[
- {timestamp, list_to_binary(mem3_util:iso8601_timestamp())},
- {last_replication_status, error}
- ]},
- NewStatus = lists:ukeymerge(1, [{Current, Report}], Status),
- Seeds = Tail ++ [Current],
- if not State#st.ready ->
- erlang:send_after(1000, self(), start_replication);
- true ->
- ok
- end,
- State#st{
- seeds = Seeds,
- jobref = nil,
- status = NewStatus
- }.
-
-is_ready(true, _) ->
- true;
-is_ready(false, Data) ->
- lists:all(fun({_DbName, Pending}) -> Pending =:= {ok, 0} end, Data).
-
-format_data(Data) ->
- Formatted = lists:map(fun({DbName, Status}) ->
- case Status of
- {ok, Pending} when is_number(Pending) ->
- {DbName, Pending};
- {error, Tag} ->
- {DbName, list_to_binary(io_lib:format("~p", [Tag]))};
- _Else ->
- {DbName, unknown_error}
- end
- end, Data),
- {pending_updates, {Formatted}}.
diff --git a/src/mem3/src/mem3_shards.erl b/src/mem3/src/mem3_shards.erl
deleted file mode 100644
index 4f3323740..000000000
--- a/src/mem3/src/mem3_shards.erl
+++ /dev/null
@@ -1,766 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_shards).
--behaviour(gen_server).
--vsn(3).
--behaviour(config_listener).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
--export([handle_config_change/5, handle_config_terminate/3]).
-
--export([start_link/0]).
--export([opts_for_db/1]).
--export([for_db/1, for_db/2, for_docid/2, for_docid/3, get/3, local/1, fold/2]).
--export([for_shard_range/1]).
--export([set_max_size/1]).
--export([get_changes_pid/0]).
-
--record(st, {
- max_size = 25000,
- cur_size = 0,
- changes_pid,
- update_seq,
- write_timeout
-}).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(DBS, mem3_dbs).
--define(SHARDS, mem3_shards).
--define(ATIMES, mem3_atimes).
--define(OPENERS, mem3_openers).
--define(RELISTEN_DELAY, 5000).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-opts_for_db(DbName) ->
- {ok, Db} = mem3_util:ensure_exists(mem3_sync:shards_db()),
- case couch_db:open_doc(Db, DbName, [ejson_body]) of
- {ok, #doc{body = {Props}}} ->
- mem3_util:get_shard_opts(Props);
- {not_found, _} ->
- erlang:error(database_does_not_exist, ?b2l(DbName))
- end.
-
-for_db(DbName) ->
- for_db(DbName, []).
-
-for_db(DbName, Options) ->
- Shards = try ets:lookup(?SHARDS, DbName) of
- [] ->
- load_shards_from_disk(DbName);
- Else ->
- gen_server:cast(?MODULE, {cache_hit, DbName}),
- Else
- catch error:badarg ->
- load_shards_from_disk(DbName)
- end,
- case lists:member(ordered, Options) of
- true -> Shards;
- false -> mem3_util:downcast(Shards)
- end.
-
-for_docid(DbName, DocId) ->
- for_docid(DbName, DocId, []).
-
-for_docid(DbName, DocId, Options) ->
- HashKey = mem3_hash:calculate(DbName, DocId),
- ShardHead = #shard{
- dbname = DbName,
- range = ['$1', '$2'],
- _ = '_'
- },
- OrderedShardHead = #ordered_shard{
- dbname = DbName,
- range = ['$1', '$2'],
- _ = '_'
- },
- Conditions = [{'=<', '$1', HashKey}, {'=<', HashKey, '$2'}],
- ShardSpec = {ShardHead, Conditions, ['$_']},
- OrderedShardSpec = {OrderedShardHead, Conditions, ['$_']},
- Shards = try ets:select(?SHARDS, [ShardSpec, OrderedShardSpec]) of
- [] ->
- load_shards_from_disk(DbName, DocId);
- Else ->
- gen_server:cast(?MODULE, {cache_hit, DbName}),
- Else
- catch error:badarg ->
- load_shards_from_disk(DbName, DocId)
- end,
- case lists:member(ordered, Options) of
- true -> Shards;
- false -> mem3_util:downcast(Shards)
- end.
-
-for_shard_range(ShardName) ->
- DbName = mem3:dbname(ShardName),
- [B, E] = mem3:range(ShardName),
- ShardHead = #shard{
- dbname = DbName,
- range = ['$1', '$2'],
- _ = '_'
- },
- OrderedShardHead = #ordered_shard{
- dbname = DbName,
- range = ['$1', '$2'],
- _ = '_'
- },
- % see mem3_util:range_overlap/2 for an explanation how it works
- Conditions = [{'=<', '$1', E}, {'=<', B, '$2'}],
- ShardSpec = {ShardHead, Conditions, ['$_']},
- OrderedShardSpec = {OrderedShardHead, Conditions, ['$_']},
- Shards = try ets:select(?SHARDS, [ShardSpec, OrderedShardSpec]) of
- [] ->
- filter_shards_by_range([B, E], load_shards_from_disk(DbName));
- Else ->
- gen_server:cast(?MODULE, {cache_hit, DbName}),
- Else
- catch error:badarg ->
- filter_shards_by_range([B, E], load_shards_from_disk(DbName))
- end,
- mem3_util:downcast(Shards).
-
-
-get(DbName, Node, Range) ->
- Res = lists:foldl(fun(#shard{node=N, range=R}=S, Acc) ->
- case {N, R} of
- {Node, Range} -> [S | Acc];
- _ -> Acc
- end
- end, [], for_db(DbName)),
- case Res of
- [] -> {error, not_found};
- [Shard] -> {ok, Shard};
- [_|_] -> {error, duplicates}
- end.
-
-local(DbName) when is_list(DbName) ->
- local(list_to_binary(DbName));
-local(DbName) ->
- Pred = fun(#shard{node=Node}) when Node == node() -> true; (_) -> false end,
- lists:filter(Pred, for_db(DbName)).
-
-fold(Fun, Acc) ->
- {ok, Db} = mem3_util:ensure_exists(mem3_sync:shards_db()),
- FAcc = {Db, Fun, Acc},
- try
- {ok, LastAcc} = couch_db:fold_docs(Db, fun fold_fun/2, FAcc),
- {_Db, _UFun, UAcc} = LastAcc,
- UAcc
- after
- couch_db:close(Db)
- end.
-
-set_max_size(Size) when is_integer(Size), Size > 0 ->
- gen_server:call(?MODULE, {set_max_size, Size}).
-
-get_changes_pid() ->
- gen_server:call(?MODULE, get_changes_pid).
-
-handle_config_change("mem3", "shard_cache_size", SizeList, _, _) ->
- Size = list_to_integer(SizeList),
- {ok, gen_server:call(?MODULE, {set_max_size, Size}, infinity)};
-handle_config_change("mem3", "shards_db", _DbName, _, _) ->
- {ok, gen_server:call(?MODULE, shard_db_changed, infinity)};
-handle_config_change("mem3", "shard_write_timeout", Timeout, _, _) ->
- Timeout = try
- list_to_integer(Timeout)
- catch _:_ ->
- 1000
- end,
- {ok, gen_server:call(?MODULE, {set_write_timeout, Timeout})};
-handle_config_change(_, _, _, _, _) ->
- {ok, nil}.
-
-handle_config_terminate(_, stop, _) ->
- ok;
-handle_config_terminate(_Server, _Reason, _State) ->
- erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
-
-init([]) ->
- couch_util:set_mqd_off_heap(?MODULE),
- ets:new(?SHARDS, [
- bag,
- public,
- named_table,
- {keypos,#shard.dbname},
- {read_concurrency, true}
- ]),
- ets:new(?DBS, [set, protected, named_table]),
- ets:new(?ATIMES, [ordered_set, protected, named_table]),
- ets:new(?OPENERS, [bag, public, named_table]),
- ok = config:listen_for_changes(?MODULE, nil),
- SizeList = config:get("mem3", "shard_cache_size", "25000"),
- WriteTimeout = config:get_integer("mem3", "shard_write_timeout", 1000),
- UpdateSeq = get_update_seq(),
- {ok, #st{
- max_size = list_to_integer(SizeList),
- cur_size = 0,
- changes_pid = start_changes_listener(UpdateSeq),
- update_seq = UpdateSeq,
- write_timeout = WriteTimeout
- }}.
-
-handle_call({set_max_size, Size}, _From, St) ->
- {reply, ok, cache_free(St#st{max_size=Size})};
-handle_call(shard_db_changed, _From, St) ->
- exit(St#st.changes_pid, shard_db_changed),
- {reply, ok, St};
-handle_call({set_write_timeout, Timeout}, _From, St) ->
- {reply, ok, St#st{write_timeout = Timeout}};
-handle_call(get_changes_pid, _From, St) ->
- {reply, {ok, St#st.changes_pid}, St};
-handle_call(_Call, _From, St) ->
- {noreply, St}.
-
-handle_cast({cache_hit, DbName}, St) ->
- couch_stats:increment_counter([mem3, shard_cache, hit]),
- cache_hit(DbName),
- {noreply, St};
-handle_cast({cache_insert, DbName, Writer, UpdateSeq}, St) ->
- % This comparison correctly uses the `<` operator
- % and not `=<`. The easiest way to understand why is
- % to think of when a _dbs db doesn't change. If it used
- % `=<` it would be impossible to insert anything into
- % the cache.
- NewSt = case UpdateSeq < St#st.update_seq of
- true ->
- Writer ! cancel,
- St;
- false ->
- cache_free(cache_insert(St, DbName, Writer, St#st.write_timeout))
- end,
- {noreply, NewSt};
-handle_cast({cache_remove, DbName}, St) ->
- couch_stats:increment_counter([mem3, shard_cache, eviction]),
- {noreply, cache_remove(St, DbName)};
-handle_cast({cache_insert_change, DbName, Writer, UpdateSeq}, St) ->
- Msg = {cache_insert, DbName, Writer, UpdateSeq},
- {noreply, NewSt} = handle_cast(Msg, St),
- {noreply, NewSt#st{update_seq = UpdateSeq}};
-handle_cast({cache_remove_change, DbName, UpdateSeq}, St) ->
- {noreply, NewSt} = handle_cast({cache_remove, DbName}, St),
- {noreply, NewSt#st{update_seq = UpdateSeq}};
-handle_cast(_Msg, St) ->
- {noreply, St}.
-
-handle_info({'DOWN', _, _, Pid, Reason}, #st{changes_pid=Pid}=St) ->
- {NewSt, Seq} = case Reason of
- {seq, EndSeq} ->
- {St, EndSeq};
- shard_db_changed ->
- {cache_clear(St), get_update_seq()};
- _ ->
- couch_log:notice("~p changes listener died ~p", [?MODULE, Reason]),
- {St, get_update_seq()}
- end,
- erlang:send_after(5000, self(), {start_listener, Seq}),
- {noreply, NewSt#st{changes_pid=undefined}};
-handle_info({start_listener, Seq}, St) ->
- {noreply, St#st{
- changes_pid = start_changes_listener(Seq)
- }};
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State};
-handle_info(_Msg, St) ->
- {noreply, St}.
-
-terminate(_Reason, #st{changes_pid=Pid}) ->
- exit(Pid, kill),
- ok.
-
-code_change(_OldVsn, #st{}=St, _Extra) ->
- {ok, St}.
-
-%% internal functions
-
-start_changes_listener(SinceSeq) ->
- Self = self(),
- {Pid, _} = erlang:spawn_monitor(fun() ->
- erlang:spawn_link(fun() ->
- Ref = erlang:monitor(process, Self),
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
- end,
- exit(shutdown)
- end),
- listen_for_changes(SinceSeq)
- end),
- Pid.
-
-fold_fun(#full_doc_info{}=FDI, Acc) ->
- DI = couch_doc:to_doc_info(FDI),
- fold_fun(DI, Acc);
-fold_fun(#doc_info{}=DI, {Db, UFun, UAcc}) ->
- case couch_db:open_doc(Db, DI, [ejson_body, conflicts]) of
- {ok, Doc} ->
- {Props} = Doc#doc.body,
- Shards = mem3_util:build_shards(Doc#doc.id, Props),
- NewUAcc = lists:foldl(UFun, UAcc, Shards),
- {ok, {Db, UFun, NewUAcc}};
- _ ->
- {ok, {Db, UFun, UAcc}}
- end.
-
-get_update_seq() ->
- {ok, Db} = mem3_util:ensure_exists(mem3_sync:shards_db()),
- Seq = couch_db:get_update_seq(Db),
- couch_db:close(Db),
- Seq.
-
-listen_for_changes(Since) ->
- {ok, Db} = mem3_util:ensure_exists(mem3_sync:shards_db()),
- Args = #changes_args{
- feed = "continuous",
- since = Since,
- heartbeat = true,
- include_docs = true
- },
- ChangesFun = couch_changes:handle_db_changes(Args, Since, Db),
- ChangesFun(fun changes_callback/2).
-
-changes_callback(start, Acc) ->
- {ok, Acc};
-changes_callback({stop, EndSeq}, _) ->
- exit({seq, EndSeq});
-changes_callback({change, {Change}, _}, _) ->
- DbName = couch_util:get_value(<<"id">>, Change),
- Seq = couch_util:get_value(<<"seq">>, Change),
- case DbName of <<"_design/", _/binary>> -> ok; _Else ->
- case mem3_util:is_deleted(Change) of
- true ->
- gen_server:cast(?MODULE, {cache_remove_change, DbName, Seq});
- false ->
- case couch_util:get_value(doc, Change) of
- {error, Reason} ->
- couch_log:error("missing partition table for ~s: ~p",
- [DbName, Reason]);
- {Doc} ->
- Shards = mem3_util:build_ordered_shards(DbName, Doc),
- IdleTimeout = config:get_integer(
- "mem3", "writer_idle_timeout", 30000),
- Writer = spawn_shard_writer(DbName, Shards, IdleTimeout),
- ets:insert(?OPENERS, {DbName, Writer}),
- Msg = {cache_insert_change, DbName, Writer, Seq},
- gen_server:cast(?MODULE, Msg),
- [create_if_missing(mem3:name(S), mem3:engine(S)) || S
- <- Shards, mem3:node(S) =:= node()]
- end
- end
- end,
- {ok, Seq};
-changes_callback(timeout, _) ->
- ok.
-
-load_shards_from_disk(DbName) when is_binary(DbName) ->
- couch_stats:increment_counter([mem3, shard_cache, miss]),
- {ok, Db} = mem3_util:ensure_exists(mem3_sync:shards_db()),
- try
- load_shards_from_db(Db, DbName)
- after
- couch_db:close(Db)
- end.
-
-load_shards_from_db(ShardDb, DbName) ->
- case couch_db:open_doc(ShardDb, DbName, [ejson_body]) of
- {ok, #doc{body = {Props}}} ->
- Seq = couch_db:get_update_seq(ShardDb),
- Shards = mem3_util:build_ordered_shards(DbName, Props),
- IdleTimeout = config:get_integer("mem3", "writer_idle_timeout", 30000),
- case maybe_spawn_shard_writer(DbName, Shards, IdleTimeout) of
- Writer when is_pid(Writer) ->
- case ets:insert_new(?OPENERS, {DbName, Writer}) of
- true ->
- Msg = {cache_insert, DbName, Writer, Seq},
- gen_server:cast(?MODULE, Msg);
- false ->
- Writer ! cancel
- end;
- ignore ->
- ok
- end,
- Shards;
- {not_found, _} ->
- erlang:error(database_does_not_exist, ?b2l(DbName))
- end.
-
-load_shards_from_disk(DbName, DocId)->
- Shards = load_shards_from_disk(DbName),
- HashKey = mem3_hash:calculate(hd(Shards), DocId),
- [S || S <- Shards, in_range(S, HashKey)].
-
-in_range(Shard, HashKey) ->
- [B, E] = mem3:range(Shard),
- B =< HashKey andalso HashKey =< E.
-
-create_if_missing(Name, Options) ->
- case couch_server:exists(Name) of
- true ->
- ok;
- false ->
- case couch_server:create(Name, [?ADMIN_CTX] ++ Options) of
- {ok, Db} ->
- couch_db:close(Db);
- Error ->
- couch_log:error("~p tried to create ~s, got ~p",
- [?MODULE, Name, Error])
- end
- end.
-
-cache_insert(#st{cur_size=Cur}=St, DbName, Writer, Timeout) ->
- NewATime = couch_util:unique_monotonic_integer(),
- true = ets:delete(?SHARDS, DbName),
- flush_write(DbName, Writer, Timeout),
- case ets:lookup(?DBS, DbName) of
- [{DbName, ATime}] ->
- true = ets:delete(?ATIMES, ATime),
- true = ets:insert(?ATIMES, {NewATime, DbName}),
- true = ets:insert(?DBS, {DbName, NewATime}),
- St;
- [] ->
- true = ets:insert(?ATIMES, {NewATime, DbName}),
- true = ets:insert(?DBS, {DbName, NewATime}),
- St#st{cur_size=Cur + 1}
- end.
-
-cache_remove(#st{cur_size=Cur}=St, DbName) ->
- true = ets:delete(?SHARDS, DbName),
- case ets:lookup(?DBS, DbName) of
- [{DbName, ATime}] ->
- true = ets:delete(?DBS, DbName),
- true = ets:delete(?ATIMES, ATime),
- St#st{cur_size=Cur-1};
- [] ->
- St
- end.
-
-cache_hit(DbName) ->
- case ets:lookup(?DBS, DbName) of
- [{DbName, ATime}] ->
- NewATime = couch_util:unique_monotonic_integer(),
- true = ets:delete(?ATIMES, ATime),
- true = ets:insert(?ATIMES, {NewATime, DbName}),
- true = ets:insert(?DBS, {DbName, NewATime});
- [] ->
- ok
- end.
-
-cache_free(#st{max_size=Max, cur_size=Cur}=St) when Max =< Cur ->
- ATime = ets:first(?ATIMES),
- [{ATime, DbName}] = ets:lookup(?ATIMES, ATime),
- true = ets:delete(?ATIMES, ATime),
- true = ets:delete(?DBS, DbName),
- true = ets:delete(?SHARDS, DbName),
- cache_free(St#st{cur_size=Cur-1});
-cache_free(St) ->
- St.
-
-cache_clear(St) ->
- true = ets:delete_all_objects(?DBS),
- true = ets:delete_all_objects(?SHARDS),
- true = ets:delete_all_objects(?ATIMES),
- St#st{cur_size=0}.
-
-maybe_spawn_shard_writer(DbName, Shards, IdleTimeout) ->
- case ets:member(?OPENERS, DbName) of
- true ->
- ignore;
- false ->
- spawn_shard_writer(DbName, Shards, IdleTimeout)
- end.
-
-spawn_shard_writer(DbName, Shards, IdleTimeout) ->
- erlang:spawn(fun() -> shard_writer(DbName, Shards, IdleTimeout) end).
-
-shard_writer(DbName, Shards, IdleTimeout) ->
- try
- receive
- write ->
- true = ets:insert(?SHARDS, Shards);
- cancel ->
- ok
- after IdleTimeout ->
- ok
- end
- after
- true = ets:delete_object(?OPENERS, {DbName, self()})
- end.
-
-flush_write(DbName, Writer, WriteTimeout) ->
- Ref = erlang:monitor(process, Writer),
- Writer ! write,
- receive
- {'DOWN', Ref, _, _, normal} ->
- ok;
- {'DOWN', Ref, _, _, Error} ->
- erlang:exit({mem3_shards_bad_write, Error})
- after WriteTimeout ->
- erlang:exit({mem3_shards_write_timeout, DbName})
- end.
-
-
-filter_shards_by_range(Range, Shards)->
- lists:filter(fun
- (#ordered_shard{range = R}) -> mem3_util:range_overlap(Range, R);
- (#shard{range = R}) -> mem3_util:range_overlap(Range, R)
- end, Shards).
-
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
--define(DB, <<"eunit_db_name">>).
--define(INFINITY, 99999999).
-
-
-mem3_shards_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_maybe_spawn_shard_writer_already_exists(),
- t_maybe_spawn_shard_writer_new(),
- t_flush_writer_exists_normal(),
- t_flush_writer_times_out(),
- t_flush_writer_crashes(),
- t_writer_deletes_itself_when_done(),
- t_writer_does_not_delete_other_writers_for_same_shard(),
- t_spawn_writer_in_load_shards_from_db(),
- t_cache_insert_takes_new_update(),
- t_cache_insert_ignores_stale_update_and_kills_worker()
- ]
- }
- }.
-
-
-setup_all() ->
- ets:new(?SHARDS, [bag, public, named_table, {keypos, #shard.dbname}]),
- ets:new(?OPENERS, [bag, public, named_table]),
- ets:new(?DBS, [set, public, named_table]),
- ets:new(?ATIMES, [ordered_set, public, named_table]),
- meck:expect(config, get, ["mem3", "shards_db", '_'], "_dbs"),
- ok.
-
-
-teardown_all(_) ->
- meck:unload(),
- ets:delete(?ATIMES),
- ets:delete(?DBS),
- ets:delete(?OPENERS),
- ets:delete(?SHARDS).
-
-
-setup() ->
- ets:delete_all_objects(?ATIMES),
- ets:delete_all_objects(?DBS),
- ets:delete_all_objects(?OPENERS),
- ets:delete_all_objects(?SHARDS).
-
-
-teardown(_) ->
- ok.
-
-
-t_maybe_spawn_shard_writer_already_exists() ->
- ?_test(begin
- ets:insert(?OPENERS, {?DB, self()}),
- Shards = mock_shards(),
- WRes = maybe_spawn_shard_writer(?DB, Shards, ?INFINITY),
- ?assertEqual(ignore, WRes)
- end).
-
-
-t_maybe_spawn_shard_writer_new() ->
- ?_test(begin
- Shards = mock_shards(),
- WPid = maybe_spawn_shard_writer(?DB, Shards, 1000),
- WRef = erlang:monitor(process, WPid),
- ?assert(is_pid(WPid)),
- ?assert(is_process_alive(WPid)),
- WPid ! write,
- ?assertEqual(normal, wait_writer_result(WRef)),
- ?assertEqual(Shards, ets:tab2list(?SHARDS))
- end).
-
-
-t_flush_writer_exists_normal() ->
- ?_test(begin
- Shards = mock_shards(),
- WPid = spawn_link_mock_writer(?DB, Shards, ?INFINITY),
- ?assertEqual(ok, flush_write(?DB, WPid, ?INFINITY)),
- ?assertEqual(Shards, ets:tab2list(?SHARDS))
- end).
-
-
-t_flush_writer_times_out() ->
- ?_test(begin
- WPid = spawn(fun() -> receive will_never_receive_this -> ok end end),
- Error = {mem3_shards_write_timeout, ?DB},
- ?assertExit(Error, flush_write(?DB, WPid, 100)),
- exit(WPid, kill)
- end).
-
-
-t_flush_writer_crashes() ->
- ?_test(begin
- WPid = spawn(fun() -> receive write -> exit('kapow!') end end),
- Error = {mem3_shards_bad_write, 'kapow!'},
- ?assertExit(Error, flush_write(?DB, WPid, 1000))
- end).
-
-
-t_writer_deletes_itself_when_done() ->
- ?_test(begin
- Shards = mock_shards(),
- WPid = spawn_link_mock_writer(?DB, Shards, ?INFINITY),
- WRef = erlang:monitor(process, WPid),
- ets:insert(?OPENERS, {?DB, WPid}),
- WPid ! write,
- ?assertEqual(normal, wait_writer_result(WRef)),
- ?assertEqual(Shards, ets:tab2list(?SHARDS)),
- ?assertEqual([], ets:tab2list(?OPENERS))
- end).
-
-
-t_writer_does_not_delete_other_writers_for_same_shard() ->
- ?_test(begin
- Shards = mock_shards(),
- WPid = spawn_link_mock_writer(?DB, Shards, ?INFINITY),
- WRef = erlang:monitor(process, WPid),
- ets:insert(?OPENERS, {?DB, WPid}),
- ets:insert(?OPENERS, {?DB, self()}), % should not be deleted
- WPid ! write,
- ?assertEqual(normal, wait_writer_result(WRef)),
- ?assertEqual(Shards, ets:tab2list(?SHARDS)),
- ?assertEqual(1, ets:info(?OPENERS, size)),
- ?assertEqual([{?DB, self()}], ets:tab2list(?OPENERS))
- end).
-
-
-t_spawn_writer_in_load_shards_from_db() ->
- ?_test(begin
- meck:expect(couch_db, open_doc, 3, {ok, #doc{body = {[]}}}),
- meck:expect(couch_db, get_update_seq, 1, 1),
- meck:expect(mem3_util, build_ordered_shards, 2, mock_shards()),
- erlang:register(?MODULE, self()), % register to get cache_insert cast
- load_shards_from_db(test_util:fake_db([{name, <<"testdb">>}]), ?DB),
- meck:validate(couch_db),
- meck:validate(mem3_util),
- Cast = receive
- {'$gen_cast', Msg} -> Msg
- after 1000 ->
- timeout
- end,
- ?assertMatch({cache_insert, ?DB, Pid, 1} when is_pid(Pid), Cast),
- {cache_insert, _, WPid, _} = Cast,
- exit(WPid, kill),
- ?assertEqual([{?DB, WPid}], ets:tab2list(?OPENERS)),
- meck:unload(couch_db),
- meck:unload(mem3_util)
- end).
-
-
-t_cache_insert_takes_new_update() ->
- ?_test(begin
- Shards = mock_shards(),
- WPid = spawn_link_mock_writer(?DB, Shards, ?INFINITY),
- Msg = {cache_insert, ?DB, WPid, 2},
- {noreply, NewState} = handle_cast(Msg, mock_state(1)),
- ?assertMatch(#st{cur_size = 1}, NewState),
- ?assertEqual(Shards, ets:tab2list(?SHARDS)),
- ?assertEqual([], ets:tab2list(?OPENERS))
- end).
-
-
-t_cache_insert_ignores_stale_update_and_kills_worker() ->
- ?_test(begin
- Shards = mock_shards(),
- WPid = spawn_link_mock_writer(?DB, Shards, ?INFINITY),
- WRef = erlang:monitor(process, WPid),
- Msg = {cache_insert, ?DB, WPid, 1},
- {noreply, NewState} = handle_cast(Msg, mock_state(2)),
- ?assertEqual(normal, wait_writer_result(WRef)),
- ?assertMatch(#st{cur_size = 0}, NewState),
- ?assertEqual([], ets:tab2list(?SHARDS)),
- ?assertEqual([], ets:tab2list(?OPENERS))
- end).
-
-
-mock_state(UpdateSeq) ->
- #st{
- update_seq = UpdateSeq,
- changes_pid = self(),
- write_timeout = 1000
- }.
-
-
-mock_shards() ->
- [
- #ordered_shard{
- name = <<"testshardname">>,
- node = node(),
- dbname = ?DB,
- range = [0,1],
- order = 1
- }
- ].
-
-
-wait_writer_result(WRef) ->
- receive
- {'DOWN', WRef, _, _, Result} ->
- Result
- after 1000 ->
- timeout
- end.
-
-
-spawn_link_mock_writer(Db, Shards, Timeout) ->
- erlang:spawn_link(fun() -> shard_writer(Db, Shards, Timeout) end).
-
-
-
-mem3_shards_changes_test_() -> {
- "Test mem3_shards changes listener",
- {
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- [
- fun should_kill_changes_listener_on_shutdown/0
- ]
- }
-}.
-
-
-should_kill_changes_listener_on_shutdown() ->
- {ok, Pid} = ?MODULE:start_link(),
- {ok, ChangesPid} = get_changes_pid(),
- ?assert(is_process_alive(ChangesPid)),
- true = erlang:unlink(Pid),
- true = test_util:stop_sync_throw(
- ChangesPid, fun() -> exit(Pid, shutdown) end, wait_timeout),
- ?assertNot(is_process_alive(ChangesPid)),
- exit(Pid, shutdown).
-
--endif.
diff --git a/src/mem3/src/mem3_sup.erl b/src/mem3/src/mem3_sup.erl
deleted file mode 100644
index 3a1a3ca5a..000000000
--- a/src/mem3/src/mem3_sup.erl
+++ /dev/null
@@ -1,40 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sup).
--behaviour(supervisor).
--export([start_link/0, init/1]).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init(_Args) ->
- Children = [
- child(mem3_events),
- child(mem3_nodes),
- child(mem3_seeds),
- child(mem3_sync_nodes), % Order important?
- child(mem3_sync),
- child(mem3_shards),
- child(mem3_sync_event_listener),
- child(mem3_reshard_sup)
- ],
- {ok, {{one_for_one,10,1}, couch_epi:register_service(mem3_epi, Children)}}.
-
-child(mem3_events) ->
- MFA = {gen_event, start_link, [{local, mem3_events}]},
- {mem3_events, MFA, permanent, 1000, worker, dynamic};
-child(mem3_reshard_sup = Child) ->
- MFA = {Child, start_link, []},
- {Child, MFA, permanent, infinity, supervisor, [Child]};
-child(Child) ->
- {Child, {Child, start_link, []}, permanent, 1000, worker, [Child]}.
diff --git a/src/mem3/src/mem3_sync.erl b/src/mem3/src/mem3_sync.erl
deleted file mode 100644
index 8170f3c1a..000000000
--- a/src/mem3/src/mem3_sync.erl
+++ /dev/null
@@ -1,323 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync).
--behaviour(gen_server).
--vsn(1).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
-
--export([start_link/0, get_active/0, get_queue/0, push/1, push/2,
- remove_node/1, remove_shard/1, initial_sync/1, get_backlog/0, nodes_db/0,
- shards_db/0, users_db/0, find_next_node/0]).
--export([
- local_dbs/0
-]).
-
--import(queue, [in/2, out/1, to_list/1, join/2, from_list/1, is_empty/1]).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--record(state, {
- active = [],
- count = 0,
- limit,
- dict = dict:new(),
- waiting = queue:new()
-}).
-
--record(job, {name, node, count=nil, pid=nil}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-get_active() ->
- gen_server:call(?MODULE, get_active).
-
-get_queue() ->
- gen_server:call(?MODULE, get_queue).
-
-get_backlog() ->
- gen_server:call(?MODULE, get_backlog).
-
-push(#shard{name = Name}, Target) ->
- push(Name, Target);
-push(Name, #shard{node=Node}) ->
- push(Name, Node);
-push(Name, Node) ->
- push(#job{name = Name, node = Node}).
-
-push(#job{node = Node} = Job) when Node =/= node() ->
- gen_server:cast(?MODULE, {push, Job});
-push(_) ->
- ok.
-
-remove_node(Node) ->
- gen_server:cast(?MODULE, {remove_node, Node}).
-
-remove_shard(Shard) ->
- gen_server:cast(?MODULE, {remove_shard, Shard}).
-
-init([]) ->
- process_flag(trap_exit, true),
- Concurrency = config:get("mem3", "sync_concurrency", "10"),
- gen_event:add_handler(mem3_events, mem3_sync_event, []),
- initial_sync(),
- {ok, #state{limit = list_to_integer(Concurrency)}}.
-
-handle_call({push, Job}, From, State) ->
- handle_cast({push, Job#job{pid = From}}, State);
-
-handle_call(get_active, _From, State) ->
- {reply, State#state.active, State};
-
-handle_call(get_queue, _From, State) ->
- {reply, to_list(State#state.waiting), State};
-
-handle_call(get_backlog, _From, #state{active=A, waiting=WQ} = State) ->
- CA = lists:sum([C || #job{count=C} <- A, is_integer(C)]),
- CW = lists:sum([C || #job{count=C} <- to_list(WQ), is_integer(C)]),
- {reply, CA+CW, State}.
-
-handle_cast({push, DbName, Node}, State) ->
- handle_cast({push, #job{name = DbName, node = Node}}, State);
-
-handle_cast({push, Job}, #state{count=Count, limit=Limit} = State)
- when Count >= Limit ->
- {noreply, add_to_queue(State, Job)};
-
-handle_cast({push, Job}, State) ->
- #state{active = L, count = C} = State,
- #job{name = DbName, node = Node} = Job,
- case is_running(DbName, Node, L) of
- true ->
- {noreply, add_to_queue(State, Job)};
- false ->
- Pid = start_push_replication(Job),
- {noreply, State#state{active=[Job#job{pid=Pid}|L], count=C+1}}
- end;
-
-handle_cast({remove_node, Node}, #state{waiting = W0} = State) ->
- {Alive, Dead} = lists:partition(fun(#job{node=N}) -> N =/= Node end, to_list(W0)),
- Dict = remove_entries(State#state.dict, Dead),
- [exit(Pid, die_now) || #job{node=N, pid=Pid} <- State#state.active,
- N =:= Node],
- {noreply, State#state{dict = Dict, waiting = from_list(Alive)}};
-
-handle_cast({remove_shard, Shard}, #state{waiting = W0} = State) ->
- {Alive, Dead} = lists:partition(fun(#job{name=S}) ->
- S =/= Shard end, to_list(W0)),
- Dict = remove_entries(State#state.dict, Dead),
- [exit(Pid, die_now) || #job{name=S, pid=Pid} <- State#state.active,
- S =:= Shard],
- {noreply, State#state{dict = Dict, waiting = from_list(Alive)}}.
-
-handle_info({'EXIT', Active, normal}, State) ->
- handle_replication_exit(State, Active);
-
-handle_info({'EXIT', Active, die_now}, State) ->
- % we forced this one ourselves, do not retry
- handle_replication_exit(State, Active);
-
-handle_info({'EXIT', Active, {{not_found, no_db_file}, _Stack}}, State) ->
- % target doesn't exist, do not retry
- handle_replication_exit(State, Active);
-
-handle_info({'EXIT', Active, Reason}, State) ->
- NewState = case lists:keyfind(Active, #job.pid, State#state.active) of
- #job{name=OldDbName, node=OldNode} = Job ->
- couch_log:warning("~s ~s ~s ~w", [?MODULE, OldDbName, OldNode, Reason]),
- case Reason of {pending_changes, Count} ->
- maybe_resubmit(State, Job#job{pid = nil, count = Count});
- _ ->
- case mem3:db_is_current(Job#job.name) of
- true ->
- timer:apply_after(5000, ?MODULE, push, [Job#job{pid=nil}]);
- false ->
- % no need to retry (db deleted or recreated)
- ok
- end,
- State
- end;
- false -> State end,
- handle_replication_exit(NewState, Active);
-
-handle_info(Msg, State) ->
- couch_log:notice("unexpected msg at replication manager ~p", [Msg]),
- {noreply, State}.
-
-terminate(_Reason, State) ->
- [exit(Pid, shutdown) || #job{pid=Pid} <- State#state.active],
- ok.
-
-code_change(_, #state{waiting = WaitingList} = State, _) when is_list(WaitingList) ->
- {ok, State#state{waiting = from_list(WaitingList)}};
-
-code_change(_, State, _) ->
- {ok, State}.
-
-maybe_resubmit(State, #job{name=DbName, node=Node} = Job) ->
- case lists:member(DbName, local_dbs()) of
- true ->
- case find_next_node() of
- Node ->
- add_to_queue(State, Job);
- _ ->
- State % don't resubmit b/c we have a new replication target
- end;
- false ->
- add_to_queue(State, Job)
- end.
-
-handle_replication_exit(State, Pid) ->
- #state{active=Active, limit=Limit, dict=D, waiting=Waiting} = State,
- Active1 = lists:keydelete(Pid, #job.pid, Active),
- case is_empty(Waiting) of
- true ->
- {noreply, State#state{active=Active1, count=length(Active1)}};
- _ ->
- Count = length(Active1),
- NewState = if Count < Limit ->
- case next_replication(Active1, Waiting, queue:new()) of
- nil -> % all waiting replications are also active
- State#state{active = Active1, count = Count};
- {#job{name=DbName, node=Node} = Job, StillWaiting} ->
- NewPid = start_push_replication(Job),
- State#state{
- active = [Job#job{pid = NewPid} | Active1],
- count = Count+1,
- dict = dict:erase({DbName,Node}, D),
- waiting = StillWaiting
- }
- end;
- true ->
- State#state{active = Active1, count=Count}
- end,
- {noreply, NewState}
- end.
-
-start_push_replication(#job{name=Name, node=Node, pid=From}) ->
- if From =/= nil -> gen_server:reply(From, ok); true -> ok end,
- spawn_link(fun() ->
- case mem3_rep:go(Name, maybe_redirect(Node)) of
- {ok, Pending} when Pending > 0 ->
- exit({pending_changes, Pending});
- _ ->
- ok
- end
- end).
-
-add_to_queue(State, #job{name=DbName, node=Node, pid=From} = Job) ->
- #state{dict=D, waiting=WQ} = State,
- case dict:is_key({DbName, Node}, D) of
- true ->
- if From =/= nil -> gen_server:reply(From, ok); true -> ok end,
- State;
- false ->
- couch_log:debug("adding ~s -> ~p to mem3_sync queue", [DbName, Node]),
- State#state{
- dict = dict:store({DbName,Node}, ok, D),
- waiting = in(Job, WQ)
- }
- end.
-
-sync_nodes_and_dbs() ->
- Node = find_next_node(),
- [push(Db, Node) || Db <- local_dbs()].
-
-initial_sync() ->
- [net_kernel:connect_node(Node) || Node <- mem3:nodes()],
- mem3_sync_nodes:add(nodes()).
-
-initial_sync(Live) ->
- sync_nodes_and_dbs(),
- Acc = {node(), Live, []},
- {_, _, Shards} = mem3_shards:fold(fun initial_sync_fold/2, Acc),
- submit_replication_tasks(node(), Live, Shards).
-
-initial_sync_fold(#shard{dbname = Db} = Shard, {LocalNode, Live, AccShards}) ->
- case AccShards of
- [#shard{dbname = AccDb} | _] when Db =/= AccDb ->
- submit_replication_tasks(LocalNode, Live, AccShards),
- {LocalNode, Live, [Shard]};
- _ ->
- {LocalNode, Live, [Shard|AccShards]}
- end.
-
-submit_replication_tasks(LocalNode, Live, Shards) ->
- SplitFun = fun(#shard{node = Node}) -> Node =:= LocalNode end,
- {Local, Remote} = lists:partition(SplitFun, Shards),
- lists:foreach(fun(#shard{name = ShardName}) ->
- [sync_push(ShardName, N) || #shard{node=N, name=Name} <- Remote,
- Name =:= ShardName, lists:member(N, Live)]
- end, Local).
-
-sync_push(ShardName, N) ->
- gen_server:call(mem3_sync, {push, #job{name=ShardName, node=N}}, infinity).
-
-
-
-find_next_node() ->
- LiveNodes = [node()|nodes()],
- AllNodes0 = lists:sort(mem3:nodes()),
- AllNodes1 = [X || X <- AllNodes0, lists:member(X, LiveNodes)],
- AllNodes = AllNodes1 ++ [hd(AllNodes1)],
- [_Self, Next| _] = lists:dropwhile(fun(N) -> N =/= node() end, AllNodes),
- Next.
-
-%% @doc Finds the next {DbName,Node} pair in the list of waiting replications
-%% which does not correspond to an already running replication
--spec next_replication([#job{}], queue:queue(_), queue:queue(_)) ->
- {#job{}, queue:queue(_)} | nil.
-next_replication(Active, Waiting, WaitingAndRunning) ->
- case is_empty(Waiting) of
- true ->
- nil;
- false ->
- {{value, #job{name=S, node=N} = Job}, RemQ} = out(Waiting),
- case is_running(S,N,Active) of
- true ->
- next_replication(Active, RemQ, in(Job, WaitingAndRunning));
- false ->
- {Job, join(RemQ, WaitingAndRunning)}
- end
- end.
-
-is_running(DbName, Node, ActiveList) ->
- [] =/= [true || #job{name=S, node=N} <- ActiveList, S=:=DbName, N=:=Node].
-
-remove_entries(Dict, Entries) ->
- lists:foldl(fun(#job{name=S, node=N}, D) ->
- dict:erase({S, N}, D)
- end, Dict, Entries).
-
-local_dbs() ->
- [nodes_db(), shards_db(), users_db()].
-
-nodes_db() ->
- ?l2b(config:get("mem3", "nodes_db", "_nodes")).
-
-shards_db() ->
- ?l2b(config:get("mem3", "shards_db", "_dbs")).
-
-users_db() ->
- ?l2b(config:get("couch_httpd_auth", "authentication_db", "_users")).
-
-maybe_redirect(Node) ->
- case config:get("mem3.redirects", atom_to_list(Node)) of
- undefined ->
- Node;
- Redirect ->
- couch_log:debug("Redirecting push from ~p to ~p", [Node, Redirect]),
- list_to_existing_atom(Redirect)
- end.
diff --git a/src/mem3/src/mem3_sync_event.erl b/src/mem3/src/mem3_sync_event.erl
deleted file mode 100644
index 7bca23086..000000000
--- a/src/mem3/src/mem3_sync_event.erl
+++ /dev/null
@@ -1,86 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync_event).
--behaviour(gen_event).
--vsn(1).
-
--export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2,
- code_change/3]).
-
-init(_) ->
- net_kernel:monitor_nodes(true),
- {ok, nil}.
-
-handle_event({add_node, Node}, State) when Node =/= node() ->
- net_kernel:connect_node(Node),
- mem3_sync_nodes:add([Node]),
- {ok, State};
-
-handle_event({remove_node, Node}, State) ->
- mem3_sync:remove_node(Node),
- {ok, State};
-
-handle_event(_Event, State) ->
- {ok, State}.
-
-handle_call(_Request, State) ->
- {ok, ok, State}.
-
-handle_info({nodeup, Node}, State) ->
- Nodes0 = lists:usort([node() | drain_nodeups([Node])]),
- Nodes = lists:filter(fun(N) -> lists:member(N, mem3:nodes()) end, Nodes0),
- wait_for_rexi(Nodes, 5),
- {ok, State};
-
-handle_info({nodedown, Node}, State) ->
- mem3_sync:remove_node(Node),
- {ok, State};
-
-handle_info(_Info, State) ->
- {ok, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-drain_nodeups(Acc) ->
- receive
- {nodeup, Node} ->
- drain_nodeups([Node | Acc])
- after 0 ->
- Acc
- end.
-
-wait_for_rexi([], _Retries) ->
- ok;
-wait_for_rexi(Waiting, Retries) ->
- % Hack around rpc:multicall/4 so that we can
- % be sure which nodes gave which response
- Msg = {call, rexi_server_mon, status, [], group_leader()},
- {Resp, _Bad} = gen_server:multi_call(Waiting, rex, Msg, 1000),
- Up = [N || {N, R} <- Resp, R == ok],
- NotUp = Waiting -- Up,
- case length(Up) > 0 of
- true ->
- mem3_sync_nodes:add(Up);
- false -> ok
- end,
- case length(NotUp) > 0 andalso Retries > 0 of
- true ->
- timer:sleep(1000),
- wait_for_rexi(NotUp, Retries-1);
- false ->
- ok
- end.
diff --git a/src/mem3/src/mem3_sync_event_listener.erl b/src/mem3/src/mem3_sync_event_listener.erl
deleted file mode 100644
index cad34225d..000000000
--- a/src/mem3/src/mem3_sync_event_listener.erl
+++ /dev/null
@@ -1,353 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync_event_listener).
--behavior(couch_event_listener).
--vsn(1).
-
--export([
- start_link/0
-]).
-
--export([
- init/1,
- terminate/2,
- handle_event/3,
- handle_cast/2,
- handle_info/2
-]).
-
--include_lib("mem3/include/mem3.hrl").
-
--ifdef(TEST).
--define(RELISTEN_DELAY, 500).
--else.
--define(RELISTEN_DELAY, 5000).
--endif.
-
--record(state, {
- nodes,
- shards,
- users,
- delay,
- frequency,
- last_push,
- buckets
-}).
-
-%% Calling mem3_sync:push/2 on every update has a measurable performance cost,
-%% so we'd like to coalesce multiple update messages from couch_event in to a
-%% single push call. Doing this while ensuring both correctness (i.e., no lost
-%% updates) and an even load profile is somewhat subtle. This implementation
-%% groups updated shards in a list of "buckets" (see bucket_shard/2) and
-%% guarantees that each shard is in no more than one bucket at a time - i.e.,
-%% any update messages received before the shard's current bucket has been
-%% pushed will be ignored - thereby reducing the frequency with which a single
-%% shard will be pushed. mem3_sync:push/2 is called on all shards in the
-%% *oldest* bucket roughly every mem3.sync_frequency milliseconds (see
-%% maybe_push_shards/1) to even out the load on mem3_sync.
-
-start_link() ->
- couch_event_listener:start_link(?MODULE, [], [all_dbs]).
-
-init(_) ->
- ok = subscribe_for_config(),
- Delay = config:get_integer("mem3", "sync_delay", 5000),
- Frequency = config:get_integer("mem3", "sync_frequency", 500),
- Buckets = lists:duplicate(Delay div Frequency + 1, sets:new()),
- St = #state{
- nodes = mem3_sync:nodes_db(),
- shards = mem3_sync:shards_db(),
- users = mem3_sync:users_db(),
- delay = Delay,
- frequency = Frequency,
- buckets = Buckets
- },
- {ok, St}.
-
-terminate(_Reason, _State) ->
- ok.
-
-handle_event(NodesDb, updated, #state{nodes = NodesDb} = St) ->
- Nodes = mem3:nodes(),
- Live = nodes(),
- [mem3_sync:push(NodesDb, N) || N <- Nodes, lists:member(N, Live)],
- maybe_push_shards(St);
-handle_event(ShardsDb, updated, #state{shards = ShardsDb} = St) ->
- mem3_sync:push(ShardsDb, mem3_sync:find_next_node()),
- maybe_push_shards(St);
-handle_event(UsersDb, updated, #state{users = UsersDb} = St) ->
- mem3_sync:push(UsersDb, mem3_sync:find_next_node()),
- maybe_push_shards(St);
-handle_event(<<"shards/", _/binary>> = ShardName, updated, St) ->
- Buckets = bucket_shard(ShardName, St#state.buckets),
- maybe_push_shards(St#state{buckets=Buckets});
-handle_event(<<"shards/", _:18/binary, _/binary>> = ShardName, deleted, St) ->
- mem3_sync:remove_shard(ShardName),
- maybe_push_shards(St);
-handle_event(_DbName, _Event, St) ->
- maybe_push_shards(St).
-
-handle_cast({set_frequency, Frequency}, St) ->
- #state{delay = Delay, buckets = Buckets0} = St,
- Buckets1 = rebucket_shards(Delay, Frequency, Buckets0),
- maybe_push_shards(St#state{frequency=Frequency, buckets=Buckets1});
-handle_cast({set_delay, Delay}, St) ->
- #state{frequency = Frequency, buckets = Buckets0} = St,
- Buckets1 = rebucket_shards(Delay, Frequency, Buckets0),
- maybe_push_shards(St#state{delay=Delay, buckets=Buckets1});
-handle_cast(Msg, St) ->
- couch_log:notice("unexpected cast to mem3_sync_event_listener: ~p", [Msg]),
- maybe_push_shards(St).
-
-handle_info(timeout, St) ->
- maybe_push_shards(St);
-handle_info({config_change, "mem3", "sync_delay", Value, _}, St) ->
- set_config(set_delay, Value, "ignoring bad value for mem3.sync_delay"),
- maybe_push_shards(St);
-handle_info({config_change, "mem3", "sync_frequency", Value, _}, St) ->
- set_config(set_frequency, Value, "ignoring bad value for mem3.sync_frequency"),
- maybe_push_shards(St);
-handle_info({gen_event_EXIT, _Handler, _Reason}, St) ->
- erlang:send_after(?RELISTEN_DELAY, self(), restart_config_listener),
- maybe_push_shards(St);
-handle_info(restart_config_listener, St) ->
- ok = subscribe_for_config(),
- maybe_push_shards(St);
-handle_info({get_state, Ref, Caller}, St) ->
- Caller ! {Ref, St},
- {ok, St};
-handle_info(Msg, St) ->
- couch_log:notice("unexpected info to mem3_sync_event_listener: ~p", [Msg]),
- maybe_push_shards(St).
-
-set_config(Cmd, Value, Error) ->
- try list_to_integer(Value) of
- IntegerValue ->
- couch_event_listener:cast(self(), {Cmd, IntegerValue})
- catch error:badarg ->
- couch_log:warning("~s: ~p", [Error, Value])
- end.
-
-bucket_shard(ShardName, [B|Bs]=Buckets0) ->
- case waiting(ShardName, Buckets0) of
- true -> Buckets0;
- false -> [sets:add_element(ShardName, B)|Bs]
- end.
-
-waiting(_, []) ->
- false;
-waiting(ShardName, [B|Bs]) ->
- case sets:is_element(ShardName, B) of
- true -> true;
- false -> waiting(ShardName, Bs)
- end.
-
-rebucket_shards(Frequency, Delay, Buckets0) ->
- case (Delay div Frequency + 1) - length(Buckets0) of
- 0 ->
- Buckets0;
- N when N < 0 ->
- %% Reduce the number of buckets by merging the last N + 1 together
- {ToMerge, [B|Buckets1]} = lists:split(abs(N), Buckets0),
- [sets:union([B|ToMerge])|Buckets1];
- M ->
- %% Extend the number of buckets by M
- lists:duplicate(M, sets:new()) ++ Buckets0
- end.
-
-%% To ensure that mem3_sync:push/2 is indeed called with roughly the frequency
-%% specified by #state.frequency, every message callback must return via a call
-%% to maybe_push_shards/1 rather than directly. All timing coordination - i.e.,
-%% calling mem3_sync:push/2 or setting a proper timeout to ensure that pending
-%% messages aren't dropped in case no further messages arrive - is handled here.
-maybe_push_shards(#state{last_push=undefined} = St) ->
- {ok, St#state{last_push=os:timestamp()}, St#state.frequency};
-maybe_push_shards(St) ->
- #state{frequency=Frequency, last_push=LastPush, buckets=Buckets0} = St,
- Now = os:timestamp(),
- Delta = timer:now_diff(Now, LastPush) div 1000,
- case Delta > Frequency of
- true ->
- {Buckets1, [ToPush]} = lists:split(length(Buckets0) - 1, Buckets0),
- Buckets2 = [sets:new()|Buckets1],
- %% There's no sets:map/2!
- sets:fold(
- fun(ShardName, _) -> push_shard(ShardName) end,
- undefined,
- ToPush
- ),
- {ok, St#state{last_push=Now, buckets=Buckets2}, Frequency};
- false ->
- {ok, St, Frequency - Delta}
- end.
-
-push_shard(ShardName) ->
- try mem3_shards:for_shard_range(ShardName) of
- Shards ->
- Live = nodes(),
- lists:foreach(
- fun(#shard{node=N}) ->
- case lists:member(N, Live) of
- true -> mem3_sync:push(ShardName, N);
- false -> ok
- end
- end,
- Shards
- )
- catch error:database_does_not_exist ->
- ok
- end.
-
-subscribe_for_config() ->
- config:subscribe_for_changes([
- {"mem3", "sync_delay"},
- {"mem3", "sync_frequency"}
- ]).
-
--ifdef(TEST).
--include_lib("couch/include/couch_eunit.hrl").
-
-setup_all() ->
- application:start(config),
-
- ok = meck:new(couch_event, [passthrough]),
- ok = meck:expect(couch_event, register_all, ['_'], ok),
-
- ok = meck:new(config_notifier, [passthrough]),
- ok = meck:expect(config_notifier, handle_event, [
- {[{'_', '_', '_', "error", '_'}, '_'], meck:raise(throw, raised_error)},
- {['_', '_'], meck:passthrough()}
- ]).
-
-teardown_all(_) ->
- meck:unload(),
- application:stop(config).
-
-setup() ->
- {ok, Pid} = ?MODULE:start_link(),
- erlang:unlink(Pid),
- wait_config_subscribed(Pid),
- Pid.
-
-teardown(Pid) ->
- exit(Pid, shutdown).
-
-subscribe_for_config_test_() ->
- {
- "Subscribe for configuration changes",
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun should_set_sync_delay/1,
- fun should_set_sync_frequency/1,
- fun should_restart_listener/1,
- fun should_terminate/1
- ]
- }
- }
- }.
-
-should_set_sync_delay(Pid) ->
- ?_test(begin
- config:set("mem3", "sync_delay", "123", false),
- wait_state(Pid, #state.delay, 123),
- ?assertMatch(#state{delay = 123}, get_state(Pid)),
- ok
- end).
-
-should_set_sync_frequency(Pid) ->
- ?_test(begin
- config:set("mem3", "sync_frequency", "456", false),
- wait_state(Pid, #state.frequency, 456),
- ?assertMatch(#state{frequency = 456}, get_state(Pid)),
- ok
- end).
-
-should_restart_listener(_Pid) ->
- ?_test(begin
- meck:reset(config_notifier),
- config:set("mem3", "sync_frequency", "error", false),
-
- meck:wait(config_notifier, subscribe, '_', 1000),
- ok
- end).
-
-should_terminate(Pid) ->
- ?_test(begin
- ?assert(is_process_alive(Pid)),
-
- EventMgr = whereis(config_event),
- EventMgrWasAlive = (catch is_process_alive(EventMgr)),
-
- Ref = erlang:monitor(process, Pid),
-
- RestartFun = fun() -> exit(EventMgr, kill) end,
- {_, _} = test_util:with_process_restart(config_event, RestartFun),
-
- ?assertNot(is_process_alive(EventMgr)),
-
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
- after 1000 ->
- ?debugFmt("~n XKCD should_terminate EventMgrWasAlive:~p MsgQueue:~p PInfo:~p ~n", [
- EventMgrWasAlive, process_info(self(), messages), process_info(Pid)
- ]),
- ?assert(false)
- end,
-
- ?assert(is_process_alive(whereis(config_event))),
- ok
- end).
-
-
-get_state(Pid) ->
- Ref = make_ref(),
- Pid ! {get_state, Ref, self()},
- receive
- {Ref, State} -> State
- after 500 ->
- timeout
- end.
-
-
-wait_state(Pid, Field, Val) when is_pid(Pid), is_integer(Field) ->
- WaitFun = fun() ->
- case get_state(Pid) of
- #state{} = S when element(Field, S) == Val ->
- true;
- _ ->
- wait
- end
- end,
- test_util:wait(WaitFun).
-
-
-wait_config_subscribed(Pid) ->
- WaitFun = fun() ->
- Handlers = gen_event:which_handlers(config_event),
- Pids = [Id || {config_notifier, Id} <- Handlers],
- case lists:member(Pid, Pids) of
- true -> true;
- false -> wait
- end
- end,
- test_util:wait(WaitFun).
-
--endif.
diff --git a/src/mem3/src/mem3_sync_nodes.erl b/src/mem3/src/mem3_sync_nodes.erl
deleted file mode 100644
index 0a4bffcd2..000000000
--- a/src/mem3/src/mem3_sync_nodes.erl
+++ /dev/null
@@ -1,115 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync_nodes).
--behaviour(gen_server).
--vsn(1).
-
-
--export([start_link/0]).
--export([add/1]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--export([monitor_sync/1]).
-
-
--record(st, {
- tid
-}).
-
-
--record(job, {
- nodes,
- pid,
- retry
-}).
-
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-add(Nodes) ->
- gen_server:cast(?MODULE, {add, Nodes}).
-
-
-init([]) ->
- {ok, #st{
- tid = ets:new(?MODULE, [set, protected, {keypos, #job.nodes}])
- }}.
-
-
-terminate(_Reason, St) ->
- [exit(Pid, kill) || #job{pid=Pid} <- ets:tab2list(St#st.tid)],
- ok.
-
-
-handle_call(Msg, _From, St) ->
- {stop, {invalid_call, Msg}, invalid_call, St}.
-
-
-handle_cast({add, Nodes}, #st{tid=Tid}=St) ->
- case ets:lookup(Tid, Nodes) of
- [] ->
- Pid = start_sync(Nodes),
- ets:insert(Tid, #job{nodes=Nodes, pid=Pid, retry=false});
- [#job{retry=false}=Job] ->
- ets:insert(Tid, Job#job{retry=true});
- _ ->
- ok
- end,
- {noreply, St};
-
-handle_cast(Msg, St) ->
- {stop, {invalid_cast, Msg}, St}.
-
-
-handle_info({'DOWN', _, _, _, {sync_done, Nodes}}, #st{tid=Tid}=St) ->
- case ets:lookup(Tid, Nodes) of
- [#job{retry=true}=Job] ->
- Pid = start_sync(Nodes),
- ets:insert(Tid, Job#job{pid=Pid, retry=false});
- _ ->
- ets:delete(Tid, Nodes)
- end,
- {noreply, St};
-
-handle_info({'DOWN', _, _, _, {sync_error, Nodes}}, #st{tid=Tid}=St) ->
- Pid = start_sync(Nodes),
- ets:insert(Tid, #job{nodes=Nodes, pid=Pid, retry=false}),
- {noreply, St};
-
-handle_info(Msg, St) ->
- {stop, {invalid_info, Msg}, St}.
-
-
-code_change(_OldVsn, St, _Extra) ->
- {ok, St}.
-
-
-start_sync(Nodes) ->
- {Pid, _} = spawn_monitor(?MODULE, monitor_sync, [Nodes]),
- Pid.
-
-
-monitor_sync(Nodes) ->
- process_flag(trap_exit, true),
- Pid = spawn_link(mem3_sync, initial_sync, [Nodes]),
- receive
- {'EXIT', Pid, normal} ->
- exit({sync_done, Nodes});
- _ ->
- exit({sync_error, Nodes})
- end.
-
diff --git a/src/mem3/src/mem3_sync_security.erl b/src/mem3/src/mem3_sync_security.erl
deleted file mode 100644
index 291e4e085..000000000
--- a/src/mem3/src/mem3_sync_security.erl
+++ /dev/null
@@ -1,117 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync_security).
-
--export([maybe_sync/2, maybe_sync_int/2]).
--export([go/0, go/1]).
-
--include_lib("mem3/include/mem3.hrl").
-
-
-maybe_sync(#shard{}=Src, #shard{}=Dst) ->
- case is_local(Src#shard.name) of
- false ->
- erlang:spawn(?MODULE, maybe_sync_int, [Src, Dst]);
- true ->
- ok
- end.
-
-maybe_sync_int(#shard{name=Name}=Src, Dst) ->
- DbName = mem3:dbname(Name),
- case fabric:get_all_security(DbName, [{shards, [Src, Dst]}]) of
- {ok, WorkerObjs} ->
- Objs = [Obj || {_Worker, Obj} <- WorkerObjs],
- case length(lists:usort(Objs)) of
- 1 -> ok;
- 2 -> go(DbName)
- end;
- {error, no_majority} ->
- go(DbName);
- Else ->
- Args = [DbName, Else],
- couch_log:error("Error checking security objects for ~s :: ~p", Args)
- end.
-
-go() ->
- {ok, Dbs} = fabric:all_dbs(),
- lists:foreach(fun handle_existing_db/1, Dbs).
-
-go(DbName) when is_binary(DbName) ->
- handle_existing_db(DbName).
-
-handle_existing_db(DbName) ->
- try handle_db(DbName) of
- _ -> ok
- catch
- error:database_does_not_exist->
- couch_log:error("Db was deleted while getting security"
- " object. DbName: ~p", [DbName]),
- ok
- end.
-
-handle_db(DbName) ->
- ShardCount = length(mem3:shards(DbName)),
- case get_all_security(DbName) of
- {ok, SecObjs} ->
- case is_ok(SecObjs, ShardCount) of
- ok ->
- ok;
- {fixable, SecObj} ->
- couch_log:info("Sync security object for ~p: ~p", [DbName, SecObj]),
- case fabric:set_security(DbName, SecObj) of
- ok -> ok;
- Error ->
- couch_log:error("Error setting security object in ~p: ~p",
- [DbName, Error])
- end;
- broken ->
- couch_log:error("Bad security object in ~p: ~p", [DbName, SecObjs])
- end;
- Error ->
- couch_log:error("Error getting security objects for ~p: ~p", [
- DbName, Error])
- end.
-
-get_all_security(DbName) ->
- case fabric:get_all_security(DbName) of
- {ok, SecObjs} ->
- SecObjsDict = lists:foldl(fun({_, SO}, Acc) ->
- dict:update_counter(SO, 1, Acc)
- end, dict:new(), SecObjs),
- {ok, dict:to_list(SecObjsDict)};
- Error ->
- Error
- end.
-
-is_ok([_], _) ->
- % One security object is the happy case
- ok;
-is_ok([_, _] = SecObjs0, ShardCount) ->
- % Figure out if we have a simple majority of security objects
- % and if so, use that as the correct value. Otherwise we abort
- % and rely on human intervention.
- {Count, SecObj} = lists:max([{C, O} || {O, C} <- SecObjs0]),
- case Count >= ((ShardCount div 2) + 1) of
- true -> {fixable, SecObj};
- false -> broken
- end;
-is_ok(_, _) ->
- % Anything else requires human intervention
- broken.
-
-
-is_local(<<"shards/", _/binary>>) ->
- false;
-is_local(_) ->
- true.
-
diff --git a/src/mem3/src/mem3_util.erl b/src/mem3/src/mem3_util.erl
deleted file mode 100644
index 28cb17778..000000000
--- a/src/mem3/src/mem3_util.erl
+++ /dev/null
@@ -1,650 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_util).
-
--export([name_shard/2, create_partition_map/5, build_shards/2,
- n_val/2, q_val/1, to_atom/1, to_integer/1, write_db_doc/1, delete_db_doc/1,
- shard_info/1, ensure_exists/1, open_db_doc/1, get_or_create_db/2]).
--export([is_deleted/1, rotate_list/2]).
--export([get_shard_opts/1, get_engine_opt/1, get_props_opt/1]).
--export([get_shard_props/1, find_dirty_shards/0]).
--export([
- iso8601_timestamp/0,
- live_nodes/0,
- replicate_dbs_to_all_nodes/1,
- replicate_dbs_from_all_nodes/1,
- range_overlap/2,
- get_ring/1,
- get_ring/2,
- get_ring/3,
- get_ring/4,
- non_overlapping_shards/1,
- non_overlapping_shards/3,
- calculate_max_n/1
-]).
-
-%% do not use outside mem3.
--export([build_ordered_shards/2, downcast/1]).
-
--export([create_partition_map/4, name_shard/1]).
--deprecated({create_partition_map, 4, eventually}).
--deprecated({name_shard, 1, eventually}).
-
--define(RINGTOP, 2 bsl 31). % CRC32 space
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-name_shard(Shard) ->
- name_shard(Shard, "").
-
-name_shard(#shard{dbname = DbName, range=Range} = Shard, Suffix) ->
- Name = make_name(DbName, Range, Suffix),
- Shard#shard{name = ?l2b(Name)};
-
-name_shard(#ordered_shard{dbname = DbName, range=Range} = Shard, Suffix) ->
- Name = make_name(DbName, Range, Suffix),
- Shard#ordered_shard{name = ?l2b(Name)}.
-
-make_name(DbName, [B,E], Suffix) ->
- ["shards/", couch_util:to_hex(<<B:32/integer>>), "-",
- couch_util:to_hex(<<E:32/integer>>), "/", DbName, Suffix].
-
-create_partition_map(DbName, N, Q, Nodes) ->
- create_partition_map(DbName, N, Q, Nodes, "").
-
-create_partition_map(DbName, N, Q, Nodes, Suffix) when Q > 0 ->
- UniqueShards = make_key_ranges((?RINGTOP) div Q, 0, []),
- Shards0 = lists:flatten([lists:duplicate(N, S) || S <- UniqueShards]),
- Shards1 = attach_nodes(Shards0, [], Nodes, []),
- [name_shard(S#shard{dbname=DbName}, Suffix) || S <- Shards1].
-
-make_key_ranges(I, CurrentPos, Acc) when I > 0, CurrentPos >= ?RINGTOP ->
- Acc;
-make_key_ranges(Increment, Start, Acc) when Increment > 0 ->
- case Start + 2*Increment of
- X when X > ?RINGTOP ->
- End = ?RINGTOP - 1;
- _ ->
- End = Start + Increment - 1
- end,
- make_key_ranges(Increment, End+1, [#shard{range=[Start, End]} | Acc]).
-
-attach_nodes([], Acc, _, _) ->
- lists:reverse(Acc);
-attach_nodes(Shards, Acc, [], UsedNodes) ->
- attach_nodes(Shards, Acc, lists:reverse(UsedNodes), []);
-attach_nodes([S | Rest], Acc, [Node | Nodes], UsedNodes) ->
- attach_nodes(Rest, [S#shard{node=Node} | Acc], Nodes, [Node | UsedNodes]).
-
-open_db_doc(DocId) ->
- {ok, Db} = couch_db:open(mem3_sync:shards_db(), [?ADMIN_CTX]),
- try couch_db:open_doc(Db, DocId, [ejson_body]) after couch_db:close(Db) end.
-
-write_db_doc(Doc) ->
- write_db_doc(mem3_sync:shards_db(), Doc, true).
-
-write_db_doc(DbName, #doc{id=Id, body=Body} = Doc, ShouldMutate) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- try couch_db:open_doc(Db, Id, [ejson_body]) of
- {ok, #doc{body = Body}} ->
- % the doc is already in the desired state, we're done here
- ok;
- {not_found, _} when ShouldMutate ->
- try couch_db:update_doc(Db, Doc, []) of
- {ok, _} ->
- ok
- catch conflict ->
- % check to see if this was a replication race or a different edit
- write_db_doc(DbName, Doc, false)
- end;
- _ ->
- % the doc already exists in a different state
- conflict
- after
- couch_db:close(Db)
- end.
-
-delete_db_doc(DocId) ->
- gen_server:cast(mem3_shards, {cache_remove, DocId}),
- delete_db_doc(mem3_sync:shards_db(), DocId, true).
-
-delete_db_doc(DbName, DocId, ShouldMutate) ->
- {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- {ok, Revs} = couch_db:open_doc_revs(Db, DocId, all, []),
- try [Doc#doc{deleted=true} || {ok, #doc{deleted=false}=Doc} <- Revs] of
- [] ->
- not_found;
- Docs when ShouldMutate ->
- try couch_db:update_docs(Db, Docs, []) of
- {ok, _} ->
- ok
- catch conflict ->
- % check to see if this was a replication race or if leafs survived
- delete_db_doc(DbName, DocId, false)
- end;
- _ ->
- % we have live leafs that we aren't allowed to delete. let's bail
- conflict
- after
- couch_db:close(Db)
- end.
-
-%% Always returns original #shard records.
--spec build_shards(binary(), list()) -> [#shard{}].
-build_shards(DbName, DocProps) ->
- build_shards_by_node(DbName, DocProps).
-
-%% Will return #ordered_shard records if by_node and by_range
-%% are symmetrical, #shard records otherwise.
--spec build_ordered_shards(binary(), list()) ->
- [#shard{}] | [#ordered_shard{}].
-build_ordered_shards(DbName, DocProps) ->
- ByNode = build_shards_by_node(DbName, DocProps),
- ByRange = build_shards_by_range(DbName, DocProps),
- Symmetrical = lists:sort(ByNode) =:= lists:sort(downcast(ByRange)),
- case Symmetrical of
- true -> ByRange;
- false -> ByNode
- end.
-
-build_shards_by_node(DbName, DocProps) ->
- {ByNode} = couch_util:get_value(<<"by_node">>, DocProps, {[]}),
- Suffix = couch_util:get_value(<<"shard_suffix">>, DocProps, ""),
- lists:flatmap(fun({Node, Ranges}) ->
- lists:map(fun(Range) ->
- [B,E] = string:tokens(?b2l(Range), "-"),
- Beg = httpd_util:hexlist_to_integer(B),
- End = httpd_util:hexlist_to_integer(E),
- name_shard(#shard{
- dbname = DbName,
- node = to_atom(Node),
- range = [Beg, End],
- opts = get_shard_opts(DocProps)
- }, Suffix)
- end, Ranges)
- end, ByNode).
-
-build_shards_by_range(DbName, DocProps) ->
- {ByRange} = couch_util:get_value(<<"by_range">>, DocProps, {[]}),
- Suffix = couch_util:get_value(<<"shard_suffix">>, DocProps, ""),
- lists:flatmap(fun({Range, Nodes}) ->
- lists:map(fun({Node, Order}) ->
- [B,E] = string:tokens(?b2l(Range), "-"),
- Beg = httpd_util:hexlist_to_integer(B),
- End = httpd_util:hexlist_to_integer(E),
- name_shard(#ordered_shard{
- dbname = DbName,
- node = to_atom(Node),
- range = [Beg, End],
- order = Order,
- opts = get_shard_opts(DocProps)
- }, Suffix)
- end, lists:zip(Nodes, lists:seq(1, length(Nodes))))
- end, ByRange).
-
-to_atom(Node) when is_binary(Node) ->
- list_to_atom(binary_to_list(Node));
-to_atom(Node) when is_atom(Node) ->
- Node.
-
-to_integer(N) when is_integer(N) ->
- N;
-to_integer(N) when is_binary(N) ->
- list_to_integer(binary_to_list(N));
-to_integer(N) when is_list(N) ->
- list_to_integer(N).
-
-get_shard_opts(DocProps) ->
- get_engine_opt(DocProps) ++ get_props_opt(DocProps).
-
-get_engine_opt(DocProps) ->
- case couch_util:get_value(<<"engine">>, DocProps) of
- Engine when is_binary(Engine) ->
- [{engine, Engine}];
- _ ->
- []
- end.
-
-get_props_opt(DocProps) ->
- case couch_util:get_value(<<"props">>, DocProps) of
- {Props} when is_list(Props) ->
- [{props, db_props_from_json(Props)}];
- _ ->
- []
- end.
-
-db_props_from_json([]) ->
- [];
-
-db_props_from_json([{<<"partitioned">>, Value} | Rest]) ->
- [{partitioned, Value} | db_props_from_json(Rest)];
-
-db_props_from_json([{<<"hash">>, [MBin, FBin, A]} | Rest]) ->
- M = binary_to_existing_atom(MBin, utf8),
- F = binary_to_existing_atom(FBin, utf8),
- [{hash, [M, F, A]} | db_props_from_json(Rest)];
-
-db_props_from_json([{K, V} | Rest]) ->
- [{K, V} | db_props_from_json(Rest)].
-
-n_val(undefined, NodeCount) ->
- n_val(config:get("cluster", "n", "3"), NodeCount);
-n_val(N, NodeCount) when is_list(N) ->
- n_val(list_to_integer(N), NodeCount);
-n_val(N, NodeCount) when is_integer(NodeCount), N > NodeCount ->
- couch_log:error("Request to create N=~p DB but only ~p node(s)", [N, NodeCount]),
- NodeCount;
-n_val(N, _) when N < 1 ->
- 1;
-n_val(N, _) ->
- N.
-
-q_val(Q) when is_list(Q) ->
- q_val(list_to_integer(Q));
-q_val(Q) when Q > 0 ->
- Q;
-q_val(_) ->
- throw({error, invalid_q_value}).
-
-shard_info(DbName) ->
- [{n, mem3:n(DbName)},
- {q, length(mem3:shards(DbName)) div mem3:n(DbName)}].
-
-ensure_exists(DbName) when is_list(DbName) ->
- ensure_exists(list_to_binary(DbName));
-ensure_exists(DbName) ->
- Options = [nologifmissing, sys_db, {create_if_missing, true}, ?ADMIN_CTX],
- case couch_db:open(DbName, Options) of
- {ok, Db} ->
- {ok, Db};
- file_exists ->
- couch_db:open(DbName, [sys_db, ?ADMIN_CTX])
- end.
-
-
-is_deleted(Change) ->
- case couch_util:get_value(<<"deleted">>, Change) of
- undefined ->
- % keep backwards compatibility for a while
- couch_util:get_value(deleted, Change, false);
- Else ->
- Else
- end.
-
-rotate_list(_Key, []) ->
- [];
-rotate_list(Key, List) when not is_binary(Key) ->
- rotate_list(term_to_binary(Key), List);
-rotate_list(Key, List) ->
- {H, T} = lists:split(erlang:crc32(Key) rem length(List), List),
- T ++ H.
-
-downcast(#shard{}=S) ->
- S;
-downcast(#ordered_shard{}=S) ->
- #shard{
- name = S#ordered_shard.name,
- node = S#ordered_shard.node,
- dbname = S#ordered_shard.dbname,
- range = S#ordered_shard.range,
- ref = S#ordered_shard.ref,
- opts = S#ordered_shard.opts
- };
-downcast(Shards) when is_list(Shards) ->
- [downcast(Shard) || Shard <- Shards].
-
-iso8601_timestamp() ->
- {_,_,Micro} = Now = os:timestamp(),
- {{Year,Month,Date},{Hour,Minute,Second}} = calendar:now_to_datetime(Now),
- Format = "~4.10.0B-~2.10.0B-~2.10.0BT~2.10.0B:~2.10.0B:~2.10.0B.~6.10.0BZ",
- io_lib:format(Format, [Year, Month, Date, Hour, Minute, Second, Micro]).
-
-
-live_nodes() ->
- LiveNodes = [node() | nodes()],
- Mem3Nodes = lists:sort(mem3:nodes()),
- [N || N <- Mem3Nodes, lists:member(N, LiveNodes)].
-
-
-% Replicate "dbs" db to all nodes. Basically push the changes to all the live
-% mem3:nodes(). Returns only after all current changes have been replicated,
-% which could be a while.
-%
-replicate_dbs_to_all_nodes(Timeout) ->
- DbName = mem3_sync:shards_db(),
- Targets= mem3_util:live_nodes() -- [node()],
- Res = [start_replication(node(), T, DbName, Timeout) || T <- Targets],
- collect_replication_results(Res, Timeout).
-
-
-% Replicate "dbs" db from all nodes to this node. Basically make an rpc call
-% to all the nodes an have them push their changes to this node. Then monitor
-% them until they are all done.
-%
-replicate_dbs_from_all_nodes(Timeout) ->
- DbName = mem3_sync:shards_db(),
- Sources = mem3_util:live_nodes() -- [node()],
- Res = [start_replication(S, node(), DbName, Timeout) || S <- Sources],
- collect_replication_results(Res, Timeout).
-
-
-% Spawn and monitor a single replication of a database to a target node.
-% Returns {ok, PidRef}. This function could be called locally or remotely from
-% mem3_rpc, for instance when replicating other nodes' data to this node.
-%
-start_replication(Source, Target, DbName, Timeout) ->
- spawn_monitor(fun() ->
- case mem3_rpc:replicate(Source, Target, DbName, Timeout) of
- {ok, 0} ->
- exit(ok);
- Other ->
- exit(Other)
- end
- end).
-
-
-collect_replication_results(Replications, Timeout) ->
- Res = [collect_replication_result(R, Timeout) || R <- Replications],
- case [R || R <- Res, R =/= ok] of
- [] ->
- ok;
- Errors ->
- {error, Errors}
- end.
-
-
-collect_replication_result({Pid, Ref}, Timeout) when is_pid(Pid) ->
- receive
- {'DOWN', Ref, _, _, Res} ->
- Res
- after Timeout ->
- demonitor(Pid, [flush]),
- exit(Pid, kill),
- {error, {timeout, Timeout, node(Pid)}}
- end;
-
-collect_replication_result(Error, _) ->
- {error, Error}.
-
-
-% Consider these cases:
-%
-% A-------B
-%
-% overlap:
-% X--------Y
-% X-Y
-% X-------Y
-% X-------------------Y
-%
-% no overlap:
-% X-Y because A !=< Y
-% X-Y because X !=< B
-%
-range_overlap([A, B], [X, Y]) when
- is_integer(A), is_integer(B),
- is_integer(X), is_integer(Y),
- A =< B, X =< Y ->
- A =< Y andalso X =< B.
-
-
-non_overlapping_shards(Shards) ->
- {Start, End} = lists:foldl(fun(Shard, {Min, Max}) ->
- [B, E] = mem3:range(Shard),
- {min(B, Min), max(E, Max)}
- end, {0, ?RING_END}, Shards),
- non_overlapping_shards(Shards, Start, End).
-
-
-non_overlapping_shards([], _, _) ->
- [];
-
-non_overlapping_shards(Shards, Start, End) ->
- Ranges = lists:map(fun(Shard) ->
- [B, E] = mem3:range(Shard),
- {B, E}
- end, Shards),
- Ring = get_ring(Ranges, fun sort_ranges_fun/2, Start, End),
- lists:filter(fun(Shard) ->
- [B, E] = mem3:range(Shard),
- lists:member({B, E}, Ring)
- end, Shards).
-
-
-% Given a list of shards, return the maximum number of copies
-% across all the ranges. If the ring is incomplete it will return 0.
-% If there it is an n = 1 database, it should return 1, etc.
-calculate_max_n(Shards) ->
- Ranges = lists:map(fun(Shard) ->
- [B, E] = mem3:range(Shard),
- {B, E}
- end, Shards),
- calculate_max_n(Ranges, get_ring(Ranges), 0).
-
-
-calculate_max_n(_Ranges, [], N) ->
- N;
-
-calculate_max_n(Ranges, Ring, N) ->
- NewRanges = Ranges -- Ring,
- calculate_max_n(NewRanges, get_ring(NewRanges), N + 1).
-
-
-get_ring(Ranges) ->
- get_ring(Ranges, fun sort_ranges_fun/2, 0, ?RING_END).
-
-
-get_ring(Ranges, SortFun) when is_function(SortFun, 2) ->
- get_ring(Ranges, SortFun, 0, ?RING_END).
-
-
-get_ring(Ranges, Start, End) when is_integer(Start), is_integer(End),
- Start >= 0, End >= 0, Start =< End ->
- get_ring(Ranges, fun sort_ranges_fun/2, Start, End).
-
-% Build a ring out of a list of possibly overlapping ranges. If a ring cannot
-% be built then [] is returned. Start and End supply a custom range such that
-% only intervals in that range will be considered. SortFun is a custom sorting
-% function to sort intervals before the ring is built. The custom sort function
-% can be used to prioritize how the ring is built, for example, whether to use
-% shortest ranges first (and thus have more total shards) or longer or any
-% other scheme.
-%
-get_ring([], _SortFun, _Start, _End) ->
- [];
-get_ring(Ranges, SortFun, Start, End) when is_function(SortFun, 2),
- is_integer(Start), is_integer(End),
- Start >= 0, End >= 0, Start =< End ->
- Sorted = lists:usort(SortFun, Ranges),
- case get_subring_int(Start, End, Sorted) of
- fail -> [];
- Ring -> Ring
- end.
-
-
-get_subring_int(_, _, []) ->
- fail;
-
-get_subring_int(Start, EndMax, [{Start, End} = Range | Tail]) ->
- case End =:= EndMax of
- true ->
- [Range];
- false ->
- case get_subring_int(End + 1, EndMax, Tail) of
- fail ->
- get_subring_int(Start, EndMax, Tail);
- Acc ->
- [Range | Acc]
- end
- end;
-
-get_subring_int(Start1, _, [{Start2, _} | _]) when Start2 > Start1 ->
- % Found a gap, this attempt is done
- fail;
-
-get_subring_int(Start1, EndMax, [{Start2, _} | Rest]) when Start2 < Start1 ->
- % We've overlapped the head, skip the shard
- get_subring_int(Start1, EndMax, Rest).
-
-
-% Sort ranges by starting point, then sort so that
-% the longest range comes first
-sort_ranges_fun({B, E1}, {B, E2}) ->
- E2 =< E1;
-
-sort_ranges_fun({B1, _}, {B2, _}) ->
- B1 =< B2.
-
-
-get_or_create_db(DbName, Options) ->
- case couch_db:open_int(DbName, Options) of
- {ok, _} = OkDb ->
- OkDb;
- {not_found, no_db_file} ->
- try
- DbOpts = case mem3:dbname(DbName) of
- DbName -> [];
- MDbName -> mem3_shards:opts_for_db(MDbName)
- end,
- Options1 = [{create_if_missing, true} | Options],
- Options2 = merge_opts(DbOpts, Options1),
- couch_db:open_int(DbName, Options2)
- catch error:database_does_not_exist ->
- throw({error, missing_target})
- end;
- Else ->
- Else
- end.
-
-
-%% merge two proplists, atom options only valid in Old
-merge_opts(New, Old) ->
- lists:foldl(fun({Key, Val}, Acc) ->
- lists:keystore(Key, 1, Acc, {Key, Val})
- end, Old, New).
-
-
-get_shard_props(ShardName) ->
- case couch_db:open_int(ShardName, []) of
- {ok, Db} ->
- Props = case couch_db_engine:get_props(Db) of
- undefined -> [];
- Else -> Else
- end,
- %% We don't normally store the default engine name
- EngineProps = case couch_db_engine:get_engine(Db) of
- couch_bt_engine ->
- [];
- EngineName ->
- [{engine, EngineName}]
- end,
- [{props, Props} | EngineProps];
- {not_found, _} ->
- not_found;
- Else ->
- Else
- end.
-
-
-find_dirty_shards() ->
- mem3_shards:fold(fun(#shard{node=Node, name=Name, opts=Opts}=Shard, Acc) ->
- case Opts of
- [] ->
- Acc;
- [{props, []}] ->
- Acc;
- _ ->
- Props = rpc:call(Node, ?MODULE, get_shard_props, [Name]),
- case Props =:= Opts of
- true ->
- Acc;
- false ->
- [{Shard, Props} | Acc]
- end
- end
- end, []).
-
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-range_overlap_test_() ->
- [?_assertEqual(Res, range_overlap(R1, R2)) || {R1, R2, Res} <- [
- {[2, 6], [1, 3], true},
- {[2, 6], [3, 4], true},
- {[2, 6], [4, 8], true},
- {[2, 6], [1, 9], true},
- {[2, 6], [1, 2], true},
- {[2, 6], [6, 7], true},
- {[2, 6], [0, 1], false},
- {[2, 6], [7, 9], false}
- ]].
-
-
-non_overlapping_shards_test() ->
- [?_assertEqual(Res, non_overlapping_shards(Shards)) || {Shards, Res} <- [
- {
- [shard(0, ?RING_END)],
- [shard(0, ?RING_END)]
- },
- {
- [shard(0, 1)],
- [shard(0, 1)]
- },
- {
- [shard(0, 1), shard(0, 1)],
- [shard(0, 1)]
- },
- {
- [shard(0, 1), shard(3, 4)],
- []
- },
- {
- [shard(0, 1), shard(1, 2), shard(2, 3)],
- [shard(0, 1), shard(2, 3)]
- },
- {
- [shard(1, 2), shard(0, 1)],
- [shard(0, 1), shard(1, 2)]
- },
- {
- [shard(0, 1), shard(0, 2), shard(2, 5), shard(3, 5)],
- [shard(0, 2), shard(2, 5)]
- },
- {
- [shard(0, 2), shard(4, 5), shard(1, 3)],
- []
- }
-
- ]].
-
-
-calculate_max_n_test_() ->
- [?_assertEqual(Res, calculate_max_n(Shards)) || {Res, Shards} <- [
- {0, []},
- {0, [shard(1, ?RING_END)]},
- {1, [shard(0, ?RING_END)]},
- {1, [shard(0, ?RING_END), shard(1, ?RING_END)]},
- {2, [shard(0, ?RING_END), shard(0, ?RING_END)]},
- {2, [shard(0, 1), shard(2, ?RING_END), shard(0, ?RING_END)]},
- {0, [shard(0, 3), shard(5, ?RING_END), shard(1, ?RING_END)]}
- ]].
-
-
-shard(Begin, End) ->
- #shard{range = [Begin, End]}.
-
--endif.
diff --git a/src/mem3/test/eunit/mem3_cluster_test.erl b/src/mem3/test/eunit/mem3_cluster_test.erl
deleted file mode 100644
index 4610d64bd..000000000
--- a/src/mem3/test/eunit/mem3_cluster_test.erl
+++ /dev/null
@@ -1,133 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_cluster_test).
-
--behavior(mem3_cluster).
-
--include_lib("eunit/include/eunit.hrl").
-
--export([
- cluster_unstable/1,
- cluster_stable/1
-]).
-
-
-% Mem3 cluster callbacks
-
-cluster_unstable(Server) ->
- Server ! cluster_unstable,
- Server.
-
-cluster_stable(Server) ->
- Server ! cluster_stable,
- Server.
-
-
-mem3_cluster_test_test_() ->
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_cluster_stable_during_startup_period(),
- t_cluster_unstable_delivered_on_nodeup(),
- t_cluster_unstable_delivered_on_nodedown(),
- t_wait_period_is_reset_after_last_change()
- ]
- }.
-
-
-t_cluster_stable_during_startup_period() ->
- ?_test(begin
- {ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 2),
- register(?MODULE, Pid),
- receive
- cluster_stable ->
- ?assert(true)
- after 1500 ->
- throw(timeout)
- end,
- unlink(Pid),
- exit(Pid, kill)
- end).
-
-
-t_cluster_unstable_delivered_on_nodeup() ->
- ?_test(begin
- {ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 2),
- register(?MODULE, Pid),
- Pid ! {nodeup, node()},
- receive
- cluster_unstable ->
- ?assert(true)
- after 1000 ->
- throw(timeout)
- end,
- unlink(Pid),
- exit(Pid, kill)
- end).
-
-
-t_cluster_unstable_delivered_on_nodedown() ->
- ?_test(begin
- {ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 2),
- register(?MODULE, Pid),
- Pid ! {nodedown, node()},
- receive
- cluster_unstable ->
- ?assert(true)
- after 1000 ->
- throw(timeout)
- end,
- unlink(Pid),
- exit(Pid, kill)
- end).
-
-
-t_wait_period_is_reset_after_last_change() ->
- ?_test(begin
- {ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 1),
- register(?MODULE, Pid),
- timer:sleep(800),
- Pid ! {nodeup, node()}, % after 800 sec send a nodeup
- receive
- cluster_stable ->
- ?assert(false)
- after 400 ->
- ?assert(true) % stability check should have been reset
- end,
- timer:sleep(1000),
- receive
- cluster_stable ->
- ?assert(true)
- after 0 ->
- ?assert(false) % cluster_stable arrives after enough quiet time
- end,
- unlink(Pid),
- exit(Pid, kill)
- end).
-
-
-% Test helper functions
-
-setup() ->
- ok.
-
-teardown(_) ->
- case whereis(?MODULE) of
- undefined ->
- ok;
- Pid when is_pid(Pid) ->
- unlink(Pid),
- exit(Pid, kill)
- end.
diff --git a/src/mem3/test/eunit/mem3_hash_test.erl b/src/mem3/test/eunit/mem3_hash_test.erl
deleted file mode 100644
index 7a40c5366..000000000
--- a/src/mem3/test/eunit/mem3_hash_test.erl
+++ /dev/null
@@ -1,23 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_hash_test).
-
--include_lib("eunit/include/eunit.hrl").
-
-hash_test() ->
- ?assertEqual(1624516141,mem3_hash:crc32(0)),
- ?assertEqual(3816901808,mem3_hash:crc32("0")),
- ?assertEqual(3523407757,mem3_hash:crc32(<<0>>)),
- ?assertEqual(4108050209,mem3_hash:crc32(<<"0">>)),
- ?assertEqual(3094724072,mem3_hash:crc32(zero)),
- ok.
diff --git a/src/mem3/test/eunit/mem3_rep_test.erl b/src/mem3/test/eunit/mem3_rep_test.erl
deleted file mode 100644
index 4a46e7b93..000000000
--- a/src/mem3/test/eunit/mem3_rep_test.erl
+++ /dev/null
@@ -1,321 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_rep_test).
-
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-
--define(ID, <<"_id">>).
--define(TIMEOUT, 60). % seconds
-
-setup() ->
- {AllSrc, AllTgt} = {?tempdb(), ?tempdb()},
- {PartSrc, PartTgt} = {?tempdb(), ?tempdb()},
- create_db(AllSrc, [{q, 1}, {n, 1}]),
- create_db(AllTgt, [{q, 2}, {n, 1}]),
- PartProps = [{partitioned, true}, {hash, [couch_partition, hash, []]}],
- create_db(PartSrc, [{q, 1}, {n, 1}, {props, PartProps}]),
- create_db(PartTgt, [{q, 2}, {n, 1}, {props, PartProps}]),
- #{allsrc => AllSrc, alltgt => AllTgt, partsrc => PartSrc, parttgt => PartTgt}.
-
-
-teardown(#{} = Dbs) ->
- maps:map(fun(_, Db) -> delete_db(Db) end, Dbs).
-
-
-start_couch() ->
- test_util:start_couch([mem3, fabric]).
-
-
-stop_couch(Ctx) ->
- test_util:stop_couch(Ctx).
-
-
-mem3_reshard_db_test_() ->
- {
- "mem3 rep db tests",
- {
- setup,
- fun start_couch/0, fun stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun replicate_basics/1,
- fun replicate_small_batches/1,
- fun replicate_low_batch_count/1,
- fun replicate_with_partitions/1
- ]
- }
- }
- }.
-
-
-replicate_basics(#{allsrc := AllSrc, alltgt := AllTgt}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocSpec = #{docs => 10, delete => [5, 9]},
- add_test_docs(AllSrc, DocSpec),
- SDocs = get_all_docs(AllSrc),
-
- [Src] = lists:sort(mem3:local_shards(AllSrc)),
- [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)),
- #shard{range = R1} = Tgt1,
- #shard{range = R2} = Tgt2,
- TMap = #{R1 => Tgt1, R2 => Tgt2},
- Opts = [{batch_size, 1000}, {batch_count, all}],
- ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)),
-
- ?assertEqual(SDocs, get_all_docs(AllTgt))
- end)}.
-
-
-replicate_small_batches(#{allsrc := AllSrc, alltgt := AllTgt}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocSpec = #{docs => 10, delete => [5, 9]},
- add_test_docs(AllSrc, DocSpec),
- SDocs = get_all_docs(AllSrc),
-
- [Src] = lists:sort(mem3:local_shards(AllSrc)),
- [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)),
- #shard{range = R1} = Tgt1,
- #shard{range = R2} = Tgt2,
- TMap = #{R1 => Tgt1, R2 => Tgt2},
- Opts = [{batch_size, 2}, {batch_count, all}],
- ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)),
-
- ?assertEqual(SDocs, get_all_docs(AllTgt))
- end)}.
-
-
-replicate_low_batch_count(#{allsrc := AllSrc, alltgt := AllTgt}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocSpec = #{docs => 10, delete => [5, 9]},
- add_test_docs(AllSrc, DocSpec),
- SDocs = get_all_docs(AllSrc),
-
- [Src] = lists:sort(mem3:local_shards(AllSrc)),
- [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)),
- #shard{range = R1} = Tgt1,
- #shard{range = R2} = Tgt2,
- TMap = #{R1 => Tgt1, R2 => Tgt2},
-
- Opts1 = [{batch_size, 2}, {batch_count, 1}],
- ?assertMatch({ok, 8}, mem3_rep:go(Src, TMap, Opts1)),
-
- Opts2 = [{batch_size, 1}, {batch_count, 2}],
- ?assertMatch({ok, 6}, mem3_rep:go(Src, TMap, Opts2)),
-
- Opts3 = [{batch_size, 1000}, {batch_count, all}],
- ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts3)),
-
- ?assertEqual(SDocs, get_all_docs(AllTgt))
- end)}.
-
-
-replicate_with_partitions(#{partsrc := PartSrc, parttgt := PartTgt}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocSpec = #{
- pdocs => #{
- <<"PX">> => 15,
- <<"PY">> => 19
- }
- },
- add_test_docs(PartSrc, DocSpec),
- SDocs = get_all_docs(PartSrc),
- PXSrc = get_partition_info(PartSrc, <<"PX">>),
- PYSrc = get_partition_info(PartSrc, <<"PY">>),
-
- [Src] = lists:sort(mem3:local_shards(PartSrc)),
- [Tgt1, Tgt2] = lists:sort(mem3:local_shards(PartTgt)),
- #shard{range = R1} = Tgt1,
- #shard{range = R2} = Tgt2,
- TMap = #{R1 => Tgt1, R2 => Tgt2},
- Opts = [{batch_size, 1000}, {batch_count, all}],
- ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)),
-
- ?assertEqual(PXSrc, get_partition_info(PartTgt, <<"PX">>)),
- ?assertEqual(PYSrc, get_partition_info(PartTgt, <<"PY">>)),
- ?assertEqual(SDocs, get_all_docs(PartTgt))
- end)}.
-
-
-get_partition_info(DbName, Partition) ->
- with_proc(fun() ->
- {ok, PInfo} = fabric:get_partition_info(DbName, Partition),
- maps:with([
- <<"doc_count">>, <<"doc_del_count">>, <<"partition">>
- ], to_map(PInfo))
- end).
-
-
-get_all_docs(DbName) ->
- get_all_docs(DbName, #mrargs{}).
-
-
-get_all_docs(DbName, #mrargs{} = QArgs0) ->
- GL = erlang:group_leader(),
- with_proc(fun() ->
- Cb = fun
- ({row, Props}, Acc) ->
- Doc = to_map(couch_util:get_value(doc, Props)),
- #{?ID := Id} = Doc,
- {ok, Acc#{Id => Doc}};
- ({meta, _}, Acc) -> {ok, Acc};
- (complete, Acc) -> {ok, Acc}
- end,
- QArgs = QArgs0#mrargs{include_docs = true},
- {ok, Docs} = fabric:all_docs(DbName, Cb, #{}, QArgs),
- Docs
- end, GL).
-
-
-to_map([_ | _] = Props) ->
- to_map({Props});
-
-to_map({[_ | _]} = EJson) ->
- jiffy:decode(jiffy:encode(EJson), [return_maps]).
-
-
-create_db(DbName, Opts) ->
- GL = erlang:group_leader(),
- with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL).
-
-
-delete_db(DbName) ->
- GL = erlang:group_leader(),
- with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL).
-
-
-with_proc(Fun) ->
- with_proc(Fun, undefined, 30000).
-
-
-with_proc(Fun, GroupLeader) ->
- with_proc(Fun, GroupLeader, 30000).
-
-
-with_proc(Fun, GroupLeader, Timeout) ->
- {Pid, Ref} = spawn_monitor(fun() ->
- case GroupLeader of
- undefined -> ok;
- _ -> erlang:group_leader(GroupLeader, self())
- end,
- exit({with_proc_res, Fun()})
- end),
- receive
- {'DOWN', Ref, process, Pid, {with_proc_res, Res}} ->
- Res;
- {'DOWN', Ref, process, Pid, Error} ->
- error(Error)
- after Timeout ->
- erlang:demonitor(Ref, [flush]),
- exit(Pid, kill),
- error({with_proc_timeout, Fun, Timeout})
- end.
-
-
-add_test_docs(DbName, #{} = DocSpec) ->
- Docs = docs(maps:get(docs, DocSpec, []))
- ++ pdocs(maps:get(pdocs, DocSpec, #{})),
- Res = update_docs(DbName, Docs),
- Docs1 = lists:map(fun({Doc, {ok, {RevPos, Rev}}}) ->
- Doc#doc{revs = {RevPos, [Rev]}}
- end, lists:zip(Docs, Res)),
- case delete_docs(maps:get(delete, DocSpec, []), Docs1) of
- [] -> ok;
- [_ | _] = Deleted -> update_docs(DbName, Deleted)
- end,
- ok.
-
-
-update_docs(DbName, Docs) ->
- with_proc(fun() ->
- case fabric:update_docs(DbName, Docs, [?ADMIN_CTX]) of
- {accepted, Res} -> Res;
- {ok, Res} -> Res
- end
- end).
-
-
-delete_docs([S, E], Docs) when E >= S ->
- ToDelete = [doc_id(<<"">>, I) || I <- lists:seq(S, E)],
- lists:filtermap(fun(#doc{id = Id} = Doc) ->
- case lists:member(Id, ToDelete) of
- true -> {true, Doc#doc{deleted = true}};
- false -> false
- end
- end, Docs);
-delete_docs(_, _) ->
- [].
-
-
-pdocs(#{} = PMap) ->
- maps:fold(fun(Part, DocSpec, DocsAcc) ->
- docs(DocSpec, <<Part/binary, ":">>) ++ DocsAcc
- end, [], PMap).
-
-
-docs(DocSpec) ->
- docs(DocSpec, <<"">>).
-
-
-docs(N, Prefix) when is_integer(N), N > 0 ->
- docs([0, N - 1], Prefix);
-docs([S, E], Prefix) when E >= S ->
- [doc(Prefix, I) || I <- lists:seq(S, E)];
-docs(_, _) ->
- [].
-
-
-doc(Pref, Id) ->
- Body = bodyprops(),
- doc(Pref, Id, Body, 42).
-
-
-doc(Pref, Id, BodyProps, AttSize) ->
- #doc{
- id = doc_id(Pref, Id),
- body = {BodyProps},
- atts = atts(AttSize)
- }.
-
-
-doc_id(Pref, Id) ->
- IdBin = iolist_to_binary(io_lib:format("~5..0B", [Id])),
- <<Pref/binary, IdBin/binary>>.
-
-
-bodyprops() ->
- [
- {<<"g">>, {[
- {<<"type">>, <<"Polygon">>},
- {<<"coordinates">>, [[[-71.0, 48.4], [-70.0, 48.4], [-71.0, 48.4]]]}
- ]}}
- ].
-
-
-atts(0) ->
- [];
-
-atts(Size) when is_integer(Size), Size >= 1 ->
- Data = << <<"x">> || _ <- lists:seq(1, Size) >>,
- [couch_att:new([
- {name, <<"att">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, Data}
- ])].
diff --git a/src/mem3/test/eunit/mem3_reshard_api_test.erl b/src/mem3/test/eunit/mem3_reshard_api_test.erl
deleted file mode 100644
index c4df24ad3..000000000
--- a/src/mem3/test/eunit/mem3_reshard_api_test.erl
+++ /dev/null
@@ -1,847 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_api_test).
-
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/src/mem3_reshard.hrl").
-
-
--define(USER, "mem3_reshard_api_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(JSON, {"Content-Type", "application/json"}).
--define(RESHARD, "_reshard/").
--define(JOBS, "_reshard/jobs/").
--define(STATE, "_reshard/state").
--define(ID, <<"id">>).
--define(OK, <<"ok">>).
--define(TIMEOUT, 60). % seconds
-
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- Url = lists:concat(["http://", Addr, ":", Port, "/"]),
- {Db1, Db2, Db3} = {?tempdb(), ?tempdb(), ?tempdb()},
- create_db(Url, Db1, "?q=1&n=1"),
- create_db(Url, Db2, "?q=1&n=1"),
- create_db(Url, Db3, "?q=2&n=1"),
- {Url, {Db1, Db2, Db3}}.
-
-
-teardown({Url, {Db1, Db2, Db3}}) ->
- mem3_reshard:reset_state(),
- application:unset_env(mem3, reshard_disabled),
- delete_db(Url, Db1),
- delete_db(Url, Db2),
- delete_db(Url, Db3),
- ok = config:delete("reshard", "max_jobs", _Persist=false),
- ok = config:delete("reshard", "require_node_param", _Persist=false),
- ok = config:delete("reshard", "require_range_param", _Persist=false),
- ok = config:delete("admins", ?USER, _Persist=false),
- meck:unload().
-
-
-start_couch() ->
- test_util:start_couch([mem3, chttpd]).
-
-
-stop_couch(Ctx) ->
- test_util:stop_couch(Ctx).
-
-
-mem3_reshard_api_test_() ->
- {
- "mem3 shard split api tests",
- {
- setup,
- fun start_couch/0, fun stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun basics/1,
- fun create_job_basic/1,
- fun create_two_jobs/1,
- fun create_multiple_jobs_from_one_post/1,
- fun start_stop_cluster_basic/1,
- fun test_disabled/1,
- fun start_stop_cluster_with_a_job/1,
- fun individual_job_start_stop/1,
- fun individual_job_stop_when_cluster_stopped/1,
- fun create_job_with_invalid_arguments/1,
- fun create_job_with_db/1,
- fun create_job_with_shard_name/1,
- fun completed_job_handling/1,
- fun handle_db_deletion_in_initial_copy/1,
- fun handle_db_deletion_in_topoff1/1,
- fun handle_db_deletion_in_copy_local_docs/1,
- fun handle_db_deletion_in_build_indices/1,
- fun handle_db_deletion_in_update_shard_map/1,
- fun handle_db_deletion_in_wait_source_close/1,
- fun recover_in_initial_copy/1,
- fun recover_in_topoff1/1,
- fun recover_in_copy_local_docs/1,
- fun recover_in_build_indices/1,
- fun recover_in_update_shard_map/1,
- fun recover_in_wait_source_close/1,
- fun recover_in_topoff3/1,
- fun recover_in_source_delete/1,
- fun check_max_jobs/1,
- fun check_node_and_range_required_params/1,
- fun cleanup_completed_jobs/1
- ]
- }
- }
- }.
-
-
-basics({Top, _}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % GET /_reshard
- ?assertMatch({200, #{
- <<"state">> := <<"running">>,
- <<"state_reason">> := null,
- <<"completed">> := 0,
- <<"failed">> := 0,
- <<"running">> := 0,
- <<"stopped">> := 0,
- <<"total">> := 0
- }}, req(get, Top ++ ?RESHARD)),
-
- % GET _reshard/state
- ?assertMatch({200, #{<<"state">> := <<"running">>}},
- req(get, Top ++ ?STATE)),
-
- % GET _reshard/jobs
- ?assertMatch({200, #{
- <<"jobs">> := [],
- <<"offset">> := 0,
- <<"total_rows">> := 0
- }}, req(get, Top ++ ?JOBS)),
-
- % Some invalid paths and methods
- ?assertMatch({404, _}, req(get, Top ++ ?RESHARD ++ "/invalidpath")),
- ?assertMatch({405, _}, req(put, Top ++ ?RESHARD, #{dont => thinkso})),
- ?assertMatch({405, _}, req(post, Top ++ ?RESHARD, #{nope => nope}))
- end)}.
-
-
-create_job_basic({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % POST /_reshard/jobs
- {C1, R1} = req(post, Top ++ ?JOBS, #{type => split, db => Db1}),
- ?assertEqual(201, C1),
- ?assertMatch([#{?OK := true, ?ID := J, <<"shard">> := S}]
- when is_binary(J) andalso is_binary(S), R1),
- [#{?ID := Id, <<"shard">> := Shard}] = R1,
-
- % GET /_reshard/jobs
- ?assertMatch({200, #{
- <<"jobs">> := [#{?ID := Id, <<"type">> := <<"split">>}],
- <<"offset">> := 0,
- <<"total_rows">> := 1
- }}, req(get, Top ++ ?JOBS)),
-
- % GET /_reshard/job/$jobid
- {C2, R2} = req(get, Top ++ ?JOBS ++ ?b2l(Id)),
- ?assertEqual(200, C2),
- ThisNode = atom_to_binary(node(), utf8),
- ?assertMatch(#{?ID := Id}, R2),
- ?assertMatch(#{<<"type">> := <<"split">>}, R2),
- ?assertMatch(#{<<"source">> := Shard}, R2),
- ?assertMatch(#{<<"history">> := History} when length(History) > 1, R2),
- ?assertMatch(#{<<"node">> := ThisNode}, R2),
- ?assertMatch(#{<<"split_state">> := SSt} when is_binary(SSt), R2),
- ?assertMatch(#{<<"job_state">> := JSt} when is_binary(JSt), R2),
- ?assertMatch(#{<<"state_info">> := #{}}, R2),
- ?assertMatch(#{<<"target">> := Target} when length(Target) == 2, R2),
-
- % GET /_reshard/job/$jobid/state
- ?assertMatch({200, #{<<"state">> := S, <<"reason">> := R}}
- when is_binary(S) andalso (is_binary(R) orelse R =:= null),
- req(get, Top ++ ?JOBS ++ ?b2l(Id) ++ "/state")),
-
- % GET /_reshard
- ?assertMatch({200, #{<<"state">> := <<"running">>, <<"total">> := 1}},
- req(get, Top ++ ?RESHARD)),
-
- % DELETE /_reshard/jobs/$jobid
- ?assertMatch({200, #{?OK := true}},
- req(delete, Top ++ ?JOBS ++ ?b2l(Id))),
-
- % GET _reshard/jobs
- ?assertMatch({200, #{<<"jobs">> := [], <<"total_rows">> := 0}},
- req(get, Top ++ ?JOBS)),
-
- % GET /_reshard/job/$jobid should be a 404
- ?assertMatch({404, #{}}, req(get, Top ++ ?JOBS ++ ?b2l(Id))),
-
- % DELETE /_reshard/jobs/$jobid should be a 404 as well
- ?assertMatch({404, #{}}, req(delete, Top ++ ?JOBS ++ ?b2l(Id)))
- end)}.
-
-
-create_two_jobs({Top, {Db1, Db2, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Jobs = Top ++ ?JOBS,
-
- ?assertMatch({201, [#{?OK := true}]},
- req(post, Jobs, #{type => split, db => Db1})),
- ?assertMatch({201, [#{?OK := true}]},
- req(post, Jobs, #{type => split, db => Db2})),
-
- ?assertMatch({200, #{<<"total">> := 2}}, req(get, Top ++ ?RESHARD)),
-
- ?assertMatch({200, #{
- <<"jobs">> := [#{?ID := Id1}, #{?ID := Id2}],
- <<"offset">> := 0,
- <<"total_rows">> := 2
- }} when Id1 =/= Id2, req(get, Jobs)),
-
- {200, #{<<"jobs">> := [#{?ID := Id1}, #{?ID := Id2}]}} = req(get, Jobs),
-
- {200, #{?OK := true}} = req(delete, Jobs ++ ?b2l(Id1)),
- ?assertMatch({200, #{<<"total">> := 1}}, req(get, Top ++ ?RESHARD)),
- {200, #{?OK := true}} = req(delete, Jobs ++ ?b2l(Id2)),
- ?assertMatch({200, #{<<"total">> := 0}}, req(get, Top ++ ?RESHARD))
- end)}.
-
-
-create_multiple_jobs_from_one_post({Top, {_, _, Db3}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Jobs = Top ++ ?JOBS,
- {C1, R1} = req(post, Jobs, #{type => split, db => Db3}),
- ?assertMatch({201, [#{?OK := true}, #{?OK := true}]}, {C1, R1}),
- ?assertMatch({200, #{<<"total">> := 2}}, req(get, Top ++ ?RESHARD))
- end)}.
-
-
-start_stop_cluster_basic({Top, _}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Url = Top ++ ?STATE,
-
- ?assertMatch({200, #{
- <<"state">> := <<"running">>,
- <<"reason">> := null
- }}, req(get, Url)),
-
- ?assertMatch({200, _}, req(put, Url, #{state => stopped})),
- ?assertMatch({200, #{
- <<"state">> := <<"stopped">>,
- <<"reason">> := R
- }} when is_binary(R), req(get, Url)),
-
- ?assertMatch({200, _}, req(put, Url, #{state => running})),
-
- % Make sure the reason shows in the state GET request
- Reason = <<"somereason">>,
- ?assertMatch({200, _}, req(put, Url, #{state => stopped,
- reason => Reason})),
- ?assertMatch({200, #{<<"state">> := <<"stopped">>,
- <<"reason">> := Reason}}, req(get, Url)),
-
- % Top level summary also shows the reason
- ?assertMatch({200, #{
- <<"state">> := <<"stopped">>,
- <<"state_reason">> := Reason
- }}, req(get, Top ++ ?RESHARD)),
- ?assertMatch({200, _}, req(put, Url, #{state => running})),
- ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, Url))
- end)}.
-
-
-test_disabled({Top, _}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- application:set_env(mem3, reshard_disabled, true),
- ?assertMatch({501, _}, req(get, Top ++ ?RESHARD)),
- ?assertMatch({501, _}, req(put, Top ++ ?STATE, #{state => running})),
-
- application:unset_env(mem3, reshard_disabled),
- ?assertMatch({200, _}, req(get, Top ++ ?RESHARD)),
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running}))
- end)}.
-
-
-start_stop_cluster_with_a_job({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Url = Top ++ ?STATE,
-
- ?assertMatch({200, _}, req(put, Url, #{state => stopped})),
- ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, Url)),
-
- % Can add jobs with global state stopped, they just won't be running
- {201, R1} = req(post, Top ++ ?JOBS, #{type => split, db => Db1}),
- ?assertMatch([#{?OK := true}], R1),
- [#{?ID := Id1}] = R1,
- {200, J1} = req(get, Top ++ ?JOBS ++ ?b2l(Id1)),
- ?assertMatch(#{?ID := Id1, <<"job_state">> := <<"stopped">>}, J1),
- % Check summary stats
- ?assertMatch({200, #{
- <<"state">> := <<"stopped">>,
- <<"running">> := 0,
- <<"stopped">> := 1,
- <<"total">> := 1
- }}, req(get, Top ++ ?RESHARD)),
-
- % Can delete the job when stopped
- {200, #{?OK := true}} = req(delete, Top ++ ?JOBS ++ ?b2l(Id1)),
- ?assertMatch({200, #{
- <<"state">> := <<"stopped">>,
- <<"running">> := 0,
- <<"stopped">> := 0,
- <<"total">> := 0
- }}, req(get, Top ++ ?RESHARD)),
-
- % Add same job again
- {201, [#{?ID := Id2}]} = req(post, Top ++ ?JOBS, #{type => split,
- db => Db1}),
- ?assertMatch({200, #{?ID := Id2, <<"job_state">> := <<"stopped">>}},
- req(get, Top ++ ?JOBS ++ ?b2l(Id2))),
-
- % Job should start after resharding is started on the cluster
- ?assertMatch({200, _}, req(put, Url, #{state => running})),
- ?assertMatch({200, #{?ID := Id2, <<"job_state">> := JSt}}
- when JSt =/= <<"stopped">>, req(get, Top ++ ?JOBS ++ ?b2l(Id2)))
- end)}.
-
-
-individual_job_start_stop({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- intercept_state(topoff1),
-
- Body = #{type => split, db => Db1},
- {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
-
- JobUrl = Top ++ ?JOBS ++ ?b2l(Id),
- StUrl = JobUrl ++ "/state",
-
- % Wait for the the job to start running and intercept it in topoff1 state
- receive {JobPid, topoff1} -> ok end,
- % Tell the intercept to never finish checkpointing so job is left hanging
- % forever in running state
- JobPid ! cancel,
- ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)),
-
- {200, _} = req(put, StUrl, #{state => stopped}),
- wait_state(StUrl, <<"stopped">>),
-
- % Stop/start resharding globally and job should still stay stopped
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
- ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)),
-
- % Start the job again
- ?assertMatch({200, _}, req(put, StUrl, #{state => running})),
- % Wait for the the job to start running and intercept it in topoff1 state
- receive {JobPid2, topoff1} -> ok end,
- ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)),
- % Let it continue running and it should complete eventually
- JobPid2 ! continue,
- wait_state(StUrl, <<"completed">>)
- end)}.
-
-
-individual_job_stop_when_cluster_stopped({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- intercept_state(topoff1),
-
- Body = #{type => split, db => Db1},
- {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
-
- JobUrl = Top ++ ?JOBS ++ ?b2l(Id),
- StUrl = JobUrl ++ "/state",
-
- % Wait for the the job to start running and intercept in topoff1
- receive {JobPid, topoff1} -> ok end,
- % Tell the intercept to never finish checkpointing so job is left
- % hanging forever in running state
- JobPid ! cancel,
- ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)),
-
- % Stop resharding globally
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
- wait_state(StUrl, <<"stopped">>),
-
- % Stop the job specifically
- {200, _} = req(put, StUrl, #{state => stopped}),
- % Job stays stopped
- ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)),
-
- % Set cluster to running again
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
-
- % The job should stay stopped
- ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)),
-
- % It should be possible to resume job and it should complete
- ?assertMatch({200, _}, req(put, StUrl, #{state => running})),
-
- % Wait for the the job to start running and intercept in topoff1 state
- receive {JobPid2, topoff1} -> ok end,
- ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)),
-
- % Let it continue running and it should complete eventually
- JobPid2 ! continue,
- wait_state(StUrl, <<"completed">>)
- end)}.
-
-
-create_job_with_invalid_arguments({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Jobs = Top ++ ?JOBS,
-
- % Nothing in the body
- ?assertMatch({400, _}, req(post, Jobs, #{})),
-
- % Missing type
- ?assertMatch({400, _}, req(post, Jobs, #{db => Db1})),
-
- % Have type but no db and no shard
- ?assertMatch({400, _}, req(post, Jobs, #{type => split})),
-
- % Have type and db but db is invalid
- ?assertMatch({400, _}, req(post, Jobs, #{db => <<"baddb">>,
- type => split})),
-
- % Have type and shard but shard is not an existing database
- ?assertMatch({404, _}, req(post, Jobs, #{type => split,
- shard => <<"shards/80000000-ffffffff/baddb.1549492084">>})),
-
- % Bad range values, too large, different types, inverted
- ?assertMatch({400, _}, req(post, Jobs, #{db => Db1, range => 42,
- type => split})),
- ?assertMatch({400, _}, req(post, Jobs, #{db => Db1,
- range => <<"x">>, type => split})),
- ?assertMatch({400, _}, req(post, Jobs, #{db => Db1,
- range => <<"ffffffff-80000000">>, type => split})),
- ?assertMatch({400, _}, req(post, Jobs, #{db => Db1,
- range => <<"00000000-fffffffff">>, type => split})),
-
- % Can't have both db and shard
- ?assertMatch({400, _}, req(post, Jobs, #{type => split, db => Db1,
- shard => <<"blah">>}))
- end)}.
-
-
-create_job_with_db({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Jobs = Top ++ ?JOBS,
- Body1 = #{type => split, db => Db1},
-
- % Node with db
- N = atom_to_binary(node(), utf8),
- {C1, R1} = req(post, Jobs, Body1#{node => N}),
- ?assertMatch({201, [#{?OK := true}]}, {C1, R1}),
- wait_to_complete_then_cleanup(Top, R1),
-
- % Range and db
- {C2, R2} = req(post, Jobs, Body1#{range => <<"00000000-7fffffff">>}),
- ?assertMatch({201, [#{?OK := true}]}, {C2, R2}),
- wait_to_complete_then_cleanup(Top, R2),
-
- % Node, range and db
- Range = <<"80000000-ffffffff">>,
- {C3, R3} = req(post, Jobs, Body1#{range => Range, node => N}),
- ?assertMatch({201, [#{?OK := true}]}, {C3, R3}),
- wait_to_complete_then_cleanup(Top, R3),
-
- ?assertMatch([
- [16#00000000, 16#3fffffff],
- [16#40000000, 16#7fffffff],
- [16#80000000, 16#bfffffff],
- [16#c0000000, 16#ffffffff]
- ], [mem3:range(S) || S <- lists:sort(mem3:shards(Db1))])
- end)}.
-
-
-create_job_with_shard_name({Top, {_, _, Db3}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Jobs = Top ++ ?JOBS,
- [S1, S2] = [mem3:name(S) || S <- lists:sort(mem3:shards(Db3))],
-
- % Shard only
- {C1, R1} = req(post, Jobs, #{type => split, shard => S1}),
- ?assertMatch({201, [#{?OK := true}]}, {C1, R1}),
- wait_to_complete_then_cleanup(Top, R1),
-
- % Shard with a node
- N = atom_to_binary(node(), utf8),
- {C2, R2} = req(post, Jobs, #{type => split, shard => S2, node => N}),
- ?assertMatch({201, [#{?OK := true}]}, {C2, R2}),
- wait_to_complete_then_cleanup(Top, R2),
-
- ?assertMatch([
- [16#00000000, 16#3fffffff],
- [16#40000000, 16#7fffffff],
- [16#80000000, 16#bfffffff],
- [16#c0000000, 16#ffffffff]
- ], [mem3:range(S) || S <- lists:sort(mem3:shards(Db3))])
- end)}.
-
-
-completed_job_handling({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Jobs = Top ++ ?JOBS,
-
- % Run job to completion
- {C1, R1} = req(post, Jobs, #{type => split, db => Db1}),
- ?assertMatch({201, [#{?OK := true}]}, {C1, R1}),
- [#{?ID := Id}] = R1,
- wait_to_complete(Top, R1),
-
- % Check top level stats
- ?assertMatch({200, #{
- <<"state">> := <<"running">>,
- <<"state_reason">> := null,
- <<"completed">> := 1,
- <<"failed">> := 0,
- <<"running">> := 0,
- <<"stopped">> := 0,
- <<"total">> := 1
- }}, req(get, Top ++ ?RESHARD)),
-
- % Job state itself
- JobUrl = Jobs ++ ?b2l(Id),
- ?assertMatch({200, #{
- <<"split_state">> := <<"completed">>,
- <<"job_state">> := <<"completed">>
- }}, req(get, JobUrl)),
-
- % Job's state endpoint
- StUrl = Jobs ++ ?b2l(Id) ++ "/state",
- ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
-
- % Try to stop it and it should stay completed
- {200, _} = req(put, StUrl, #{state => stopped}),
- ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
-
- % Try to resume it and it should stay completed
- {200, _} = req(put, StUrl, #{state => running}),
- ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
-
- % Stop resharding globally and job should still stay completed
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
- ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
-
- % Start resharding and job stays completed
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
- ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
-
- ?assertMatch({200, #{?OK := true}}, req(delete, JobUrl))
- end)}.
-
-
-handle_db_deletion_in_topoff1({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = delete_source_in_state(Top, Db1, topoff1),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
-
-handle_db_deletion_in_initial_copy({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = delete_source_in_state(Top, Db1, initial_copy),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
-
-handle_db_deletion_in_copy_local_docs({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = delete_source_in_state(Top, Db1, copy_local_docs),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
-
-handle_db_deletion_in_build_indices({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = delete_source_in_state(Top, Db1, build_indices),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
-
-handle_db_deletion_in_update_shard_map({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = delete_source_in_state(Top, Db1, update_shardmap),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
-
-handle_db_deletion_in_wait_source_close({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = delete_source_in_state(Top, Db1, wait_source_close),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
-
-recover_in_topoff1({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = recover_in_state(Top, Db1, topoff1),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
-
-recover_in_initial_copy({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = recover_in_state(Top, Db1, initial_copy),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
-
-recover_in_copy_local_docs({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = recover_in_state(Top, Db1, copy_local_docs),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
-
-recover_in_build_indices({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = recover_in_state(Top, Db1, build_indices),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
-
-recover_in_update_shard_map({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = recover_in_state(Top, Db1, update_shardmap),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
-
-recover_in_wait_source_close({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = recover_in_state(Top, Db1, wait_source_close),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
-
-recover_in_topoff3({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = recover_in_state(Top, Db1, topoff3),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
-
-recover_in_source_delete({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = recover_in_state(Top, Db1, source_delete),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
-
-check_max_jobs({Top, {Db1, Db2, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Jobs = Top ++ ?JOBS,
-
- config:set("reshard", "max_jobs", "0", _Persist=false),
- {C1, R1} = req(post, Jobs, #{type => split, db => Db1}),
- ?assertMatch({500, [#{<<"error">> := <<"max_jobs_exceeded">>}]}, {C1, R1}),
-
- config:set("reshard", "max_jobs", "1", _Persist=false),
- {201, R2} = req(post, Jobs, #{type => split, db => Db1}),
- wait_to_complete(Top, R2),
-
- % Stop clustering so jobs are not started anymore and ensure max jobs
- % is enforced even if jobs are stopped
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
-
- {C3, R3} = req(post, Jobs, #{type => split, db => Db2}),
- ?assertMatch({500, [#{<<"error">> := <<"max_jobs_exceeded">>}]},
- {C3, R3}),
-
- % Allow the job to be created by raising max_jobs
- config:set("reshard", "max_jobs", "2", _Persist=false),
-
- {C4, R4} = req(post, Jobs, #{type => split, db => Db2}),
- ?assertEqual(201, C4),
-
- % Lower max_jobs after job is created but it's not running
- config:set("reshard", "max_jobs", "1", _Persist=false),
-
- % Start resharding again
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
-
- % Jobs that have been created already are not removed if max jobs is lowered
- % so make sure the job completes
- wait_to_complete(Top, R4)
- end)}.
-
-
-check_node_and_range_required_params({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Jobs = Top ++ ?JOBS,
-
- Node = atom_to_binary(node(), utf8),
- Range = <<"00000000-ffffffff">>,
-
- config:set("reshard", "require_node_param", "true", _Persist=false),
- {C1, R1} = req(post, Jobs, #{type => split, db => Db1}),
- NodeRequiredErr = <<"`node` prameter is required">>,
- ?assertEqual({400, #{<<"error">> => <<"bad_request">>,
- <<"reason">> => NodeRequiredErr}}, {C1, R1}),
-
- config:set("reshard", "require_range_param", "true", _Persist=false),
- {C2, R2} = req(post, Jobs, #{type => split, db => Db1, node => Node}),
- RangeRequiredErr = <<"`range` prameter is required">>,
- ?assertEqual({400, #{<<"error">> => <<"bad_request">>,
- <<"reason">> => RangeRequiredErr}}, {C2, R2}),
-
- Body = #{type => split, db => Db1, range => Range, node => Node},
- {C3, R3} = req(post, Jobs, Body),
- ?assertMatch({201, [#{?OK := true}]}, {C3, R3}),
- wait_to_complete_then_cleanup(Top, R3)
- end)}.
-
-
-cleanup_completed_jobs({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Body = #{type => split, db => Db1},
- {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
- JobUrl = Top ++ ?JOBS ++ ?b2l(Id),
- wait_state(JobUrl ++ "/state", <<"completed">>),
- delete_db(Top, Db1),
- wait_for_http_code(JobUrl, 404)
- end)}.
-
-
-% Test help functions
-
-wait_to_complete_then_cleanup(Top, Jobs) ->
- JobsUrl = Top ++ ?JOBS,
- lists:foreach(fun(#{?ID := Id}) ->
- wait_state(JobsUrl ++ ?b2l(Id) ++ "/state", <<"completed">>),
- {200, _} = req(delete, JobsUrl ++ ?b2l(Id))
- end, Jobs).
-
-
-wait_to_complete(Top, Jobs) ->
- JobsUrl = Top ++ ?JOBS,
- lists:foreach(fun(#{?ID := Id}) ->
- wait_state(JobsUrl ++ ?b2l(Id) ++ "/state", <<"completed">>)
- end, Jobs).
-
-
-intercept_state(State) ->
- TestPid = self(),
- meck:new(mem3_reshard_job, [passthrough]),
- meck:expect(mem3_reshard_job, checkpoint_done, fun(Job) ->
- case Job#job.split_state of
- State ->
- TestPid ! {self(), State},
- receive
- continue -> meck:passthrough([Job]);
- cancel -> ok
- end;
- _ ->
- meck:passthrough([Job])
- end
- end).
-
-
-cancel_intercept() ->
- meck:expect(mem3_reshard_job, checkpoint_done, fun(Job) ->
- meck:passthrough([Job])
- end).
-
-
-wait_state(Url, State) ->
- test_util:wait(fun() ->
- case req(get, Url) of
- {200, #{<<"state">> := State}} -> ok;
- {200, #{}} -> timer:sleep(100), wait
- end
- end, 30000).
-
-
-wait_for_http_code(Url, Code) when is_integer(Code) ->
- test_util:wait(fun() ->
- case req(get, Url) of
- {Code, _} -> ok;
- {_, _} -> timer:sleep(100), wait
- end
- end, 30000).
-
-
-delete_source_in_state(Top, Db, State) when is_atom(State), is_binary(Db) ->
- intercept_state(State),
- Body = #{type => split, db => Db},
- {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
- receive {JobPid, State} -> ok end,
- sync_delete_db(Top, Db),
- JobPid ! continue,
- Id.
-
-
-recover_in_state(Top, Db, State) when is_atom(State) ->
- intercept_state(State),
- Body = #{type => split, db => Db},
- {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
- receive {JobPid, State} -> ok end,
- % Job is now stuck in running we prevented it from executing
- % the given state
- JobPid ! cancel,
- % Now restart resharding
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
- cancel_intercept(),
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
- Id.
-
-
-create_db(Top, Db, QArgs) when is_binary(Db) ->
- Url = Top ++ binary_to_list(Db) ++ QArgs,
- {ok, Status, _, _} = test_request:put(Url, [?JSON, ?AUTH], "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-
-delete_db(Top, Db) when is_binary(Db) ->
- Url = Top ++ binary_to_list(Db),
- case test_request:get(Url, [?AUTH]) of
- {ok, 404, _, _} ->
- not_found;
- {ok, 200, _, _} ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]),
- ok
- end.
-
-
-sync_delete_db(Top, Db) when is_binary(Db) ->
- delete_db(Top, Db),
- try
- Shards = mem3:local_shards(Db),
- ShardNames = [mem3:name(S) || S <- Shards],
- [couch_server:delete(N, [?ADMIN_CTX]) || N <- ShardNames],
- ok
- catch
- error:database_does_not_exist ->
- ok
- end.
-
-
-req(Method, Url) ->
- Headers = [?AUTH],
- {ok, Code, _, Res} = test_request:request(Method, Url, Headers),
- {Code, jiffy:decode(Res, [return_maps])}.
-
-
-req(Method, Url, #{} = Body) ->
- req(Method, Url, jiffy:encode(Body));
-
-req(Method, Url, Body) ->
- Headers = [?JSON, ?AUTH],
- {ok, Code, _, Res} = test_request:request(Method, Url, Headers, Body),
- {Code, jiffy:decode(Res, [return_maps])}.
diff --git a/src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl b/src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl
deleted file mode 100644
index 4b9e2a34a..000000000
--- a/src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl
+++ /dev/null
@@ -1,389 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_changes_feed_test).
-
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/src/mem3_reshard.hrl").
-
--define(TIMEOUT, 60). % seconds
-
--define(assertChanges(Expected, Received),
- begin
- ((fun() ->
- ExpectedIDs = lists:sort([I || #{id := I} <- Expected]),
- ReceivedIDs = lists:sort([I || #{id := I} <- Received]),
- ?assertEqual(ExpectedIDs, ReceivedIDs)
- end)())
- end).
-
-
-setup() ->
- Db1 = ?tempdb(),
- create_db(Db1, [{q, 1}, {n, 1}]),
- #{db1 => Db1}.
-
-
-teardown(#{} = Dbs) ->
- mem3_reshard:reset_state(),
- maps:map(fun(_, Db) -> delete_db(Db) end, Dbs).
-
-
-start_couch() ->
- test_util:start_couch(?CONFIG_CHAIN, [mem3, fabric]).
-
-
-stop_couch(Ctx) ->
- test_util:stop_couch(Ctx).
-
-
-mem3_reshard_changes_feed_test_() ->
- {
- "mem3 shard split changes feed tests",
- {
- setup,
- fun start_couch/0, fun stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun normal_feed_should_work_after_split/1,
- fun continuous_feed_should_work_during_split/1
- ]
- }
- }
- }.
-
-
-normal_feed_should_work_after_split(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocSpec = #{
- docs => [1, 10],
- delete => [5, 6]
- },
- add_test_docs(Db, DocSpec),
-
- % gather pre-shard changes
- BaseArgs = #changes_args{feed = "normal", dir = fwd, since = 0},
- {ok, OldChanges, OldEndSeq} = get_changes_feed(Db, BaseArgs),
-
- % Split the shard
- split_and_wait(Db),
-
- % verify changes list consistent for all the old seqs
- lists:foldl(fun(#{seq := Seq} = C, ExpectedChanges) ->
- Args = BaseArgs#changes_args{since = Seq},
- {ok, Changes, _EndSeq} = get_changes_feed(Db, Args),
- ?assertChanges(ExpectedChanges, Changes),
- [C | ExpectedChanges]
- end, [], OldChanges),
-
- % confirm that old LastSeq respected
- Args1 = BaseArgs#changes_args{since = OldEndSeq},
- {ok, Changes1, EndSeq1} = get_changes_feed(Db, Args1),
- ?assertChanges([], Changes1),
-
- % confirm that new LastSeq also respected
- Args2 = BaseArgs#changes_args{since = EndSeq1},
- {ok, Changes2, EndSeq2} = get_changes_feed(Db, Args2),
- ?assertChanges([], Changes2),
- ?assertEqual(EndSeq2, EndSeq1),
-
- % confirm we didn't lost any changes and have consistent last seq
- {ok, Changes3, EndSeq3} = get_changes_feed(Db, BaseArgs),
- ?assertChanges(OldChanges, Changes3),
-
- % add some docs
- add_test_docs(Db, #{docs => [11, 15]}),
- Args4 = BaseArgs#changes_args{since = EndSeq3},
- {ok, Changes4, EndSeq4} = get_changes_feed(Db, Args4),
- AddedChanges = [#{id => ID} || #doc{id = ID} <- docs([11, 15])],
- ?assertChanges(AddedChanges, Changes4),
-
- % confirm include_docs and deleted works
- Args5 = BaseArgs#changes_args{include_docs = true},
- {ok, Changes5, EndSeq5} = get_changes_feed(Db, Args5),
- ?assertEqual(EndSeq4, EndSeq5),
- [SampleChange] = [C || #{id := ID} = C <- Changes5, ID == <<"00005">>],
- ?assertMatch(#{deleted := true}, SampleChange),
- ?assertMatch(#{doc := {Body}} when is_list(Body), SampleChange),
-
- % update and delete some pre and post split docs
- AllDocs = [couch_doc:from_json_obj(Doc) || #{doc := Doc} <- Changes5],
- UpdateDocs = lists:filtermap(fun
- (#doc{id = <<"00002">>}) -> true;
- (#doc{id = <<"00012">>}) -> true;
- (#doc{id = <<"00004">>} = Doc) -> {true, Doc#doc{deleted = true}};
- (#doc{id = <<"00014">>} = Doc) -> {true, Doc#doc{deleted = true}};
- (_) -> false
- end, AllDocs),
- update_docs(Db, UpdateDocs),
-
- Args6 = BaseArgs#changes_args{since = EndSeq5},
- {ok, Changes6, EndSeq6} = get_changes_feed(Db, Args6),
- UpdatedChanges = [#{id => ID} || #doc{id = ID} <- UpdateDocs],
- ?assertChanges(UpdatedChanges, Changes6),
- [#{seq := Seq6} | _] = Changes6,
- ?assertEqual(EndSeq6, Seq6),
-
- Args7 = Args6#changes_args{dir = rev, limit = 4},
- {ok, Changes7, EndSeq7} = get_changes_feed(Db, Args7),
- ?assertEqual(4, length(Changes7)),
- [#{seq := Seq7} | _] = Changes7,
- ?assertEqual(EndSeq7, Seq7)
- end)}.
-
-
-continuous_feed_should_work_during_split(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- {UpdaterPid, UpdaterRef} = spawn_monitor(fun() ->
- Updater = fun U({State, I}) ->
- receive
- {get_state, {Pid, Ref}} ->
- Pid ! {state, Ref, {State, I}},
- U({State, I});
- add ->
- DocSpec = #{docs => [I, I]},
- add_test_docs(Db, DocSpec),
- U({State, I + 1});
- split ->
- spawn_monitor(fun() -> split_and_wait(Db) end),
- U({"in_process", I});
- stop ->
- receive {'DOWN', _, process, _, _} -> ok end,
- ok
- end
- end,
- Updater({"before", 1})
- end),
-
- Callback = fun
- (start, Acc) ->
- {ok, Acc};
- (waiting_for_updates, Acc) ->
- Ref = make_ref(),
- UpdaterPid ! {get_state, {self(), Ref}},
- receive {state, Ref, {State, _}} -> ok end,
- case {State, length(Acc)} of
- {"before", N} when N < 5 ->
- UpdaterPid ! add,
- {ok, Acc};
- {"before", _} ->
- UpdaterPid ! split,
- {ok, Acc};
- {"in_process", N} when N < 10 ->
- UpdaterPid ! add,
- {ok, Acc};
- {"in_process", _} ->
- {ok, Acc}
- end;
- (timeout, Acc) ->
- {ok, Acc};
- ({change, {Change}}, Acc) ->
- CM = maps:from_list(Change),
- {ok, [CM | Acc]};
- ({stop, EndSeq, _Pending}, Acc) ->
- % Notice updater is still running
- {stop, EndSeq, Acc}
- end,
-
- BaseArgs = #changes_args{
- feed = "continuous",
- heartbeat = 100,
- timeout = 1000
- },
- StopResult = get_changes_feed(Db, BaseArgs, Callback),
-
- % Changes feed stopped when source shard was deleted
- ?assertMatch({stop, _, _}, StopResult),
- {stop, StopEndSeq, StopChanges} = StopResult,
-
- % Add 5 extra docs to the db right after changes feed was stopped
- [UpdaterPid ! add || _ <- lists:seq(1, 5)],
-
- % The the number of documents that updater had added
- Ref = make_ref(),
- UpdaterPid ! {get_state, {self(), Ref}},
- DocCount = receive {state, Ref, {_, I}} -> I - 1 end,
-
- UpdaterPid ! stop,
- receive
- {'DOWN', UpdaterRef, process, UpdaterPid, normal} ->
- ok;
- {'DOWN', UpdaterRef, process, UpdaterPid, Error} ->
- erlang:error({test_context_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {value, Error},
- {reason, "Updater died"}]})
- end,
-
- AfterArgs = #changes_args{feed = "normal", since = StopEndSeq},
- {ok, AfterChanges, _} = get_changes_feed(Db, AfterArgs),
- DocIDs = [Id || #{id := Id} <- StopChanges ++ AfterChanges],
- ExpectedDocIDs = [doc_id(<<>>, N) || N <- lists:seq(1, DocCount)],
- ?assertEqual(ExpectedDocIDs, lists:usort(DocIDs))
- end)}.
-
-
-split_and_wait(Db) ->
- [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
- ResultShards = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(ResultShards)).
-
-
-wait_state(JobId, State) ->
- test_util:wait(fun() ->
- case mem3_reshard:job(JobId) of
- {ok, {Props}} ->
- case couch_util:get_value(job_state, Props) of
- State -> ok;
- _ -> timer:sleep(100), wait
- end;
- {error, not_found} -> timer:sleep(100), wait
- end
- end, 30000).
-
-
-get_changes_feed(Db, Args) ->
- get_changes_feed(Db, Args, fun changes_callback/2).
-
-
-get_changes_feed(Db, Args, Callback) ->
- with_proc(fun() ->
- fabric:changes(Db, Callback, [], Args)
- end).
-
-
-changes_callback(start, Acc) ->
- {ok, Acc};
-changes_callback({change, {Change}}, Acc) ->
- CM = maps:from_list(Change),
- {ok, [CM | Acc]};
-changes_callback({stop, EndSeq, _Pending}, Acc) ->
- {ok, Acc, EndSeq}.
-
-
-%% common helpers from here
-
-
-create_db(DbName, Opts) ->
- GL = erlang:group_leader(),
- with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL).
-
-
-delete_db(DbName) ->
- GL = erlang:group_leader(),
- with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL).
-
-
-with_proc(Fun) ->
- with_proc(Fun, undefined, 30000).
-
-
-with_proc(Fun, GroupLeader) ->
- with_proc(Fun, GroupLeader, 30000).
-
-
-with_proc(Fun, GroupLeader, Timeout) ->
- {Pid, Ref} = spawn_monitor(fun() ->
- case GroupLeader of
- undefined -> ok;
- _ -> erlang:group_leader(GroupLeader, self())
- end,
- exit({with_proc_res, Fun()})
- end),
- receive
- {'DOWN', Ref, process, Pid, {with_proc_res, Res}} ->
- Res;
- {'DOWN', Ref, process, Pid, Error} ->
- error(Error)
- after Timeout ->
- erlang:demonitor(Ref, [flush]),
- exit(Pid, kill),
- error({with_proc_timeout, Fun, Timeout})
- end.
-
-
-add_test_docs(DbName, #{} = DocSpec) ->
- Docs = docs(maps:get(docs, DocSpec, [])),
- Res = update_docs(DbName, Docs),
- Docs1 = lists:map(fun({Doc, {ok, {RevPos, Rev}}}) ->
- Doc#doc{revs = {RevPos, [Rev]}}
- end, lists:zip(Docs, Res)),
- case delete_docs(maps:get(delete, DocSpec, []), Docs1) of
- [] -> ok;
- [_ | _] = Deleted -> update_docs(DbName, Deleted)
- end,
- ok.
-
-
-update_docs(DbName, Docs) ->
- with_proc(fun() ->
- case fabric:update_docs(DbName, Docs, [?ADMIN_CTX]) of
- {accepted, Res} -> Res;
- {ok, Res} -> Res
- end
- end).
-
-
-delete_docs([S, E], Docs) when E >= S ->
- ToDelete = [doc_id(<<"">>, I) || I <- lists:seq(S, E)],
- lists:filtermap(fun(#doc{id = Id} = Doc) ->
- case lists:member(Id, ToDelete) of
- true -> {true, Doc#doc{deleted = true}};
- false -> false
- end
- end, Docs);
-delete_docs(_, _) ->
- [].
-
-
-docs([S, E]) when E >= S ->
- [doc(<<"">>, I) || I <- lists:seq(S, E)];
-docs(_) ->
- [].
-
-
-doc(Pref, Id) ->
- Body = [{<<"a">>, <<"b">>}],
- doc(Pref, Id, Body, 42).
-
-
-doc(Pref, Id, BodyProps, AttSize) ->
- #doc{
- id = doc_id(Pref, Id),
- body = {BodyProps},
- atts = atts(AttSize)
- }.
-
-
-doc_id(Pref, Id) ->
- IdBin = iolist_to_binary(io_lib:format("~5..0B", [Id])),
- <<Pref/binary, IdBin/binary>>.
-
-
-atts(0) ->
- [];
-
-atts(Size) when is_integer(Size), Size >= 1 ->
- Data = << <<"x">> || _ <- lists:seq(1, Size) >>,
- [couch_att:new([
- {name, <<"att">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, Data}
- ])].
diff --git a/src/mem3/test/eunit/mem3_reshard_test.erl b/src/mem3/test/eunit/mem3_reshard_test.erl
deleted file mode 100644
index 7cd6b1fe6..000000000
--- a/src/mem3/test/eunit/mem3_reshard_test.erl
+++ /dev/null
@@ -1,834 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_reshard_test).
-
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/src/mem3_reshard.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl"). % for all_docs function
-
--define(ID, <<"_id">>).
--define(TIMEOUT, 60).
-
-setup() ->
- HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name},
- case HaveDreyfus of false -> ok; true ->
- mock_dreyfus_indices()
- end,
-
- HaveHastings = code:lib_dir(hastings) /= {error, bad_name},
- case HaveHastings of false -> ok; true ->
- mock_hastings_indices()
- end,
- {Db1, Db2} = {?tempdb(), ?tempdb()},
- create_db(Db1, [{q, 1}, {n, 1}]),
- PartProps = [{partitioned, true}, {hash, [couch_partition, hash, []]}],
- create_db(Db2, [{q, 1}, {n, 1}, {props, PartProps}]),
- config:set("reshard", "retry_interval_sec", "0", _Persist=false),
- #{db1 => Db1, db2 => Db2}.
-
-
-teardown(#{} = Dbs) ->
- mem3_reshard:reset_state(),
- maps:map(fun(_, Db) -> delete_db(Db) end, Dbs),
- config:delete("reshard", "retry_interval_sec", _Persist=false),
- meck:unload().
-
-
-start_couch() ->
- test_util:start_couch(?CONFIG_CHAIN, [mem3, fabric]).
-
-
-stop_couch(Ctx) ->
- test_util:stop_couch(Ctx).
-
-
-mem3_reshard_db_test_() ->
- {
- "mem3 shard split db tests",
- {
- setup,
- fun start_couch/0, fun stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun split_one_shard/1,
- fun update_docs_before_topoff1/1,
- fun indices_are_built/1,
- fun split_partitioned_db/1,
- fun split_twice/1,
- fun couch_events_are_emitted/1,
- fun retries_work/1,
- fun target_reset_in_initial_copy/1,
- fun split_an_incomplete_shard_map/1,
- fun target_shards_are_locked/1
- ]
- }
- }
- }.
-
-
-% This is a basic test to check that shard splitting preserves documents, and
-% db meta props like revs limits and security.
-split_one_shard(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocSpec = #{docs => 10, delete => [5, 9], mrview => 1, local => 1},
- add_test_docs(Db, DocSpec),
-
- % Save documents before the split
- Docs0 = get_all_docs(Db),
- Local0 = get_local_docs(Db),
-
- % Set some custom metadata properties
- set_revs_limit(Db, 942),
- set_purge_infos_limit(Db, 943),
- SecObj = {[{<<"foo">>, <<"bar">>}]},
- set_security(Db, SecObj),
-
- % DbInfo is saved after setting metadata bits
- % as those could bump the update sequence
- DbInfo0 = get_db_info(Db),
-
- % Split the one shard
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
-
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- [#shard{range = R1}, #shard{range = R2}] = Shards1,
- ?assertEqual([16#00000000, 16#7fffffff], R1),
- ?assertEqual([16#80000000, 16#ffffffff], R2),
-
- % Check metadata bits after the split
- ?assertEqual(942, get_revs_limit(Db)),
- ?assertEqual(943, get_purge_infos_limit(Db)),
- ?assertEqual(SecObj, get_security(Db)),
-
- DbInfo1 = get_db_info(Db),
- Docs1 = get_all_docs(Db),
- Local1 = get_local_docs(Db),
-
- % When comparing db infos, ignore update sequences they won't be the
- % same since they are more shards involved after the split
- ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
-
- % Update seq prefix number is a sum of all shard update sequences
- #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0),
- #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1),
- ?assertEqual(UpdateSeq0 * 2, UpdateSeq1),
-
- % Finally compare that the documents are still there after the split
- ?assertEqual(Docs0, Docs1),
-
- % Don't forget about the local but don't include internal checkpoints
- % as some of those are munged and transformed during the split
- ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
- end)}.
-
-
-% This test checks that document added while the shard is being split are not
-% lost. Topoff1 state happens before indices are built
-update_docs_before_topoff1(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- add_test_docs(Db, #{docs => 10}),
-
- intercept_state(topoff1),
-
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
-
- receive {JobPid, topoff1} -> ok end,
- add_test_docs(Db, #{docs => [10, 19], local => 1}),
- Docs0 = get_all_docs(Db),
- Local0 = get_local_docs(Db),
- DbInfo0 = get_db_info(Db),
- JobPid ! continue,
-
- wait_state(JobId, completed),
-
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
-
- DbInfo1 = get_db_info(Db),
- Docs1 = get_all_docs(Db),
- Local1 = get_local_docs(Db),
-
- ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
-
- % Update sequence after initial copy with 10 docs would be 10 on each
- % target shard (to match the source) and the total update sequence
- % would have been 20. But then 10 more docs were added (3 might have
- % ended up on one target and 7 on another) so the final update sequence
- % would then be 20 + 10 = 30.
- ?assertMatch(#{<<"update_seq">> := 30}, update_seq_to_num(DbInfo1)),
-
- ?assertEqual(Docs0, Docs1),
- ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
- end)}.
-
-
-% This test that indices are built during shard splitting.
-indices_are_built(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name},
- HaveHastings = code:lib_dir(hastings) /= {error, bad_name},
-
- add_test_docs(Db, #{docs => 10, mrview => 2, search => 2, geo => 2}),
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- MRViewGroupInfo = get_group_info(Db, <<"_design/mrview00000">>),
- ?assertMatch(#{<<"update_seq">> := 32}, MRViewGroupInfo),
-
- HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name},
- case HaveDreyfus of false -> ok; true ->
- % 4 because there are 2 indices and 2 target shards
- ?assertEqual(4, meck:num_calls(dreyfus_index, await, 2))
- end,
-
- HaveHastings = code:lib_dir(hastings) /= {error, bad_name},
- case HaveHastings of false -> ok; true ->
- % 4 because there are 2 indices and 2 target shards
- ?assertEqual(4, meck:num_calls(hastings_index, await, 2))
- end
- end)}.
-
-
-mock_dreyfus_indices() ->
- meck:expect(dreyfus_index, design_doc_to_indexes, fun(Doc) ->
- #doc{body = {BodyProps}} = Doc,
- case couch_util:get_value(<<"indexes">>, BodyProps) of
- undefined ->
- [];
- {[_]} ->
- [{dreyfus, <<"db">>, dreyfus_index1}]
- end
- end),
- meck:expect(dreyfus_index_manager, get_index, fun(_, _) -> {ok, pid} end),
- meck:expect(dreyfus_index, await, fun(_, _) -> ok end).
-
-
-mock_hastings_indices() ->
- meck:expect(hastings_index, design_doc_to_indexes, fun(Doc) ->
- #doc{body = {BodyProps}} = Doc,
- case couch_util:get_value(<<"st_indexes">>, BodyProps) of
- undefined ->
- [];
- {[_]} ->
- [{hastings, <<"db">>, hastings_index1}]
- end
- end),
- meck:expect(hastings_index_manager, get_index, fun(_, _) -> {ok, pid} end),
- meck:expect(hastings_index, await, fun(_, _) -> ok end).
-
-% Split partitioned database
-split_partitioned_db(#{db2 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocSpec = #{
- pdocs => #{
- <<"PX">> => 5,
- <<"PY">> => 5
- },
- mrview => 1,
- local => 1
- },
- add_test_docs(Db, DocSpec),
-
- % Save documents before the split
- Docs0 = get_all_docs(Db),
- Local0 = get_local_docs(Db),
-
- % Set some custom metadata properties
- set_revs_limit(Db, 942),
- set_purge_infos_limit(Db, 943),
- SecObj = {[{<<"foo">>, <<"bar">>}]},
- set_security(Db, SecObj),
-
- % DbInfo is saved after setting metadata bits
- % as those could bump the update sequence
- DbInfo0 = get_db_info(Db),
- PX0 = get_partition_info(Db, <<"PX">>),
- PY0 = get_partition_info(Db, <<"PY">>),
-
- % Split the one shard
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
-
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- [#shard{range = R1}, #shard{range = R2}] = Shards1,
- ?assertEqual([16#00000000, 16#7fffffff], R1),
- ?assertEqual([16#80000000, 16#ffffffff], R2),
-
- % Check metadata bits after the split
- ?assertEqual(942, get_revs_limit(Db)),
- ?assertEqual(943, get_purge_infos_limit(Db)),
- ?assertEqual(SecObj, get_security(Db)),
-
- DbInfo1 = get_db_info(Db),
- Docs1 = get_all_docs(Db),
- Local1 = get_local_docs(Db),
-
- % When comparing db infos, ignore update sequences they won't be the
- % same since they are more shards involved after the split
- ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
-
- % Update seq prefix number is a sum of all shard update sequences
- #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0),
- #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1),
- ?assertEqual(UpdateSeq0 * 2, UpdateSeq1),
-
- % Finally compare that documents are still there after the split
- ?assertEqual(Docs0, Docs1),
-
- ?assertEqual(PX0, get_partition_info(Db, <<"PX">>)),
- ?assertEqual(PY0, get_partition_info(Db, <<"PY">>)),
-
- % Don't forget about the local but don't include internal checkpoints
- % as some of those are munged and transformed during the split
- ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
- end)}.
-
-
-% Make sure a shard can be split again after it was split once. This checks that
-% too many got added to some range, such that on next split they'd fail to fit
-% in to any of the new target ranges.
-split_twice(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocSpec = #{docs => 100, delete => [80, 99], mrview => 2, local => 100},
- add_test_docs(Db, DocSpec),
-
- % Save documents before the split
- Docs0 = get_all_docs(Db),
- Local0 = get_local_docs(Db),
-
- % Set some custom metadata properties
- set_revs_limit(Db, 942),
- set_purge_infos_limit(Db, 943),
- SecObj = {[{<<"foo">>, <<"bar">>}]},
- set_security(Db, SecObj),
-
- % DbInfo is saved after setting metadata bits
- % as those could bump the update sequence
- DbInfo0 = get_db_info(Db),
-
- % Split the one shard
- [#shard{name=Shard1}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId1} = mem3_reshard:start_split_job(Shard1),
- wait_state(JobId1, completed),
-
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- [#shard{range = R1}, #shard{range = R2}] = Shards1,
- ?assertEqual([16#00000000, 16#7fffffff], R1),
- ?assertEqual([16#80000000, 16#ffffffff], R2),
-
- % Check metadata bits after the split
- ?assertEqual(942, get_revs_limit(Db)),
- ?assertEqual(943, get_purge_infos_limit(Db)),
- ?assertEqual(SecObj, get_security(Db)),
-
- DbInfo1 = get_db_info(Db),
- Docs1 = get_all_docs(Db),
- Local1 = get_local_docs(Db),
-
- % When comparing db infos, ignore update sequences they won't be the
- % same since they are more shards involved after the split
- ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
-
- % Update seq prefix number is a sum of all shard update sequences
- #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0),
- #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1),
- ?assertEqual(UpdateSeq0 * 2, UpdateSeq1),
-
- ?assertEqual(Docs0, Docs1),
- ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1)),
-
- % Split the first range again
- [#shard{name=Shard2}, _] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId2} = mem3_reshard:start_split_job(Shard2),
- wait_state(JobId2, completed),
-
- Shards2 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(3, length(Shards2)),
- [R3, R4, R5] = [R || #shard{range = R} <- Shards2],
- ?assertEqual([16#00000000, 16#3fffffff], R3),
- ?assertEqual([16#40000000, 16#7fffffff], R4),
- ?assertEqual([16#80000000, 16#ffffffff], R5),
-
- % Check metadata bits after the second split
- ?assertEqual(942, get_revs_limit(Db)),
- ?assertEqual(943, get_purge_infos_limit(Db)),
- ?assertEqual(SecObj, get_security(Db)),
-
- DbInfo2 = get_db_info(Db),
- Docs2 = get_all_docs(Db),
- Local2 = get_local_docs(Db),
-
- ?assertEqual(without_seqs(DbInfo1), without_seqs(DbInfo2)),
- % Update seq prefix number is a sum of all shard update sequences
- % But only 1 shard out of 2 was split
- #{<<"update_seq">> := UpdateSeq2} = update_seq_to_num(DbInfo2),
- ?assertEqual(trunc(UpdateSeq1 * 1.5), UpdateSeq2),
- ?assertEqual(Docs1, Docs2),
- ?assertEqual(without_meta_locals(Local1), without_meta_locals(Local2))
- end)}.
-
-
-couch_events_are_emitted(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- couch_event:register_all(self()),
-
- % Split the one shard
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
-
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- [#shard{range = R1}, #shard{range = R2}] = Shards1,
- ?assertEqual([16#00000000, 16#7fffffff], R1),
- ?assertEqual([16#80000000, 16#ffffffff], R2),
-
- Flush = fun F(Events) ->
- receive
- {'$couch_event', DbName, Event} when Event =:= deleted
- orelse Event =:= updated ->
- case binary:match(DbName, Db) of
- nomatch -> F(Events);
- {_, _} -> F([Event | Events])
- end
- after 0 ->
- lists:reverse(Events)
- end
- end,
- Events = Flush([]),
- StartAtDeleted = lists:dropwhile(fun(E) -> E =/= deleted end, Events),
- ?assertMatch([deleted, deleted, updated, updated | _], StartAtDeleted),
- couch_event:unregister(self())
- end)}.
-
-
-retries_work(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- meck:expect(couch_db_split, split, fun(_, _, _) ->
- error(kapow)
- end),
-
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
-
- wait_state(JobId, failed),
- ?assertEqual(3, meck:num_calls(couch_db_split, split, 3))
- end)}.
-
-
-target_reset_in_initial_copy(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [#shard{} = Src] = lists:sort(mem3:local_shards(Db)),
- Job = #job{
- source = Src,
- target = [#shard{name= <<"t1">>}, #shard{name = <<"t2">>}],
- job_state = running,
- split_state = initial_copy
- },
- meck:expect(couch_db_split, cleanup_target, 2, ok),
- meck:expect(couch_server, exists, fun
- (<<"t1">>) -> true;
- (<<"t2">>) -> true;
- (DbName) -> meck:passthrough([DbName])
- end),
- JobPid = spawn(fun() -> mem3_reshard_job:initial_copy_impl(Job) end),
- meck:wait(2, couch_db_split, cleanup_target, ['_', '_'], 5000),
- exit(JobPid, kill),
- ?assertEqual(2, meck:num_calls(couch_db_split, cleanup_target, 2))
- end)}.
-
-
-split_an_incomplete_shard_map(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [#shard{} = Src] = lists:sort(mem3:local_shards(Db)),
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- meck:expect(mem3_util, calculate_max_n, 1, 0),
- ?assertMatch({error, {not_enough_shard_copies, _}},
- mem3_reshard:start_split_job(Shard))
- end)}.
-
-
-% Opening a db target db in initial copy phase will throw an error
-target_shards_are_locked(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- add_test_docs(Db, #{docs => 10}),
-
- % Make the job stops right when it was about to copy the docs
- TestPid = self(),
- meck:new(couch_db, [passthrough]),
- meck:expect(couch_db, start_link, fun(Engine, TName, FilePath, Opts) ->
- TestPid ! {start_link, self(), TName},
- receive
- continue ->
- meck:passthrough([Engine, TName, FilePath, Opts])
- end
- end),
-
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- {Target0, JobPid} = receive
- {start_link, Pid, TName} -> {TName, Pid}
- end,
- ?assertEqual({error, {locked, <<"shard splitting">>}},
- couch_db:open_int(Target0, [])),
-
- % Send two continues for two targets
- JobPid ! continue,
- JobPid ! continue,
-
- wait_state(JobId, completed)
- end)}.
-
-
-intercept_state(State) ->
- TestPid = self(),
- meck:new(mem3_reshard_job, [passthrough]),
- meck:expect(mem3_reshard_job, checkpoint_done, fun(Job) ->
- case Job#job.split_state of
- State ->
- TestPid ! {self(), State},
- receive
- continue -> meck:passthrough([Job]);
- cancel -> ok
- end;
- _ ->
- meck:passthrough([Job])
- end
- end).
-
-
-wait_state(JobId, State) ->
- test_util:wait(fun() ->
- case mem3_reshard:job(JobId) of
- {ok, {Props}} ->
- case couch_util:get_value(job_state, Props) of
- State -> ok;
- _ -> timer:sleep(100), wait
- end;
- {error, not_found} -> timer:sleep(100), wait
- end
- end, 30000).
-
-
-set_revs_limit(DbName, Limit) ->
- with_proc(fun() -> fabric:set_revs_limit(DbName, Limit, [?ADMIN_CTX]) end).
-
-
-get_revs_limit(DbName) ->
- with_proc(fun() -> fabric:get_revs_limit(DbName) end).
-
-
-get_purge_infos_limit(DbName) ->
- with_proc(fun() -> fabric:get_purge_infos_limit(DbName) end).
-
-
-set_purge_infos_limit(DbName, Limit) ->
- with_proc(fun() ->
- fabric:set_purge_infos_limit(DbName, Limit, [?ADMIN_CTX])
- end).
-
-
-set_security(DbName, SecObj) ->
- with_proc(fun() -> fabric:set_security(DbName, SecObj) end).
-
-
-get_security(DbName) ->
- with_proc(fun() -> fabric:get_security(DbName, [?ADMIN_CTX]) end).
-
-
-get_db_info(DbName) ->
- with_proc(fun() ->
- {ok, Info} = fabric:get_db_info(DbName),
- maps:with([
- <<"db_name">>, <<"doc_count">>, <<"props">>, <<"doc_del_count">>,
- <<"update_seq">>, <<"purge_seq">>, <<"disk_format_version">>
- ], to_map(Info))
- end).
-
-
-get_group_info(DbName, DesignId) ->
- with_proc(fun() ->
- {ok, GInfo} = fabric:get_view_group_info(DbName, DesignId),
- maps:with([
- <<"language">>, <<"purge_seq">>, <<"signature">>, <<"update_seq">>
- ], to_map(GInfo))
- end).
-
-
-get_partition_info(DbName, Partition) ->
- with_proc(fun() ->
- {ok, PInfo} = fabric:get_partition_info(DbName, Partition),
- maps:with([
- <<"db_name">>, <<"doc_count">>, <<"doc_del_count">>, <<"partition">>
- ], to_map(PInfo))
- end).
-
-
-get_all_docs(DbName) ->
- get_all_docs(DbName, #mrargs{}).
-
-
-get_all_docs(DbName, #mrargs{} = QArgs0) ->
- GL = erlang:group_leader(),
- with_proc(fun() ->
- Cb = fun
- ({row, Props}, Acc) ->
- Doc = to_map(couch_util:get_value(doc, Props)),
- #{?ID := Id} = Doc,
- {ok, Acc#{Id => Doc}};
- ({meta, _}, Acc) -> {ok, Acc};
- (complete, Acc) -> {ok, Acc}
- end,
- QArgs = QArgs0#mrargs{include_docs = true},
- {ok, Docs} = fabric:all_docs(DbName, Cb, #{}, QArgs),
- Docs
- end, GL).
-
-
-get_local_docs(DbName) ->
- LocalNS = {namespace, <<"_local">>},
- maps:map(fun(_, Doc) ->
- maps:without([<<"_rev">>], Doc)
- end, get_all_docs(DbName, #mrargs{extra = [LocalNS]})).
-
-
-without_seqs(#{} = InfoMap) ->
- maps:without([<<"update_seq">>, <<"purge_seq">>], InfoMap).
-
-
-without_meta_locals(#{} = Local) ->
- maps:filter(fun
- (<<"_local/purge-mrview-", _/binary>>, _) -> false;
- (<<"_local/shard-sync-", _/binary>>, _) -> false;
- (_, _) -> true
- end, Local).
-
-
-update_seq_to_num(#{} = InfoMap) ->
- maps:map(fun
- (<<"update_seq">>, Seq) -> seq_to_num(Seq);
- (<<"purge_seq">>, PSeq) -> seq_to_num(PSeq);
- (_, V) -> V
- end, InfoMap).
-
-
-seq_to_num(Seq) ->
- [SeqNum, _] = binary:split(Seq, <<"-">>),
- binary_to_integer(SeqNum).
-
-
-to_map([_ | _] = Props) ->
- to_map({Props});
-
-to_map({[_ | _]} = EJson) ->
- jiffy:decode(jiffy:encode(EJson), [return_maps]).
-
-
-create_db(DbName, Opts) ->
- GL = erlang:group_leader(),
- with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL).
-
-
-delete_db(DbName) ->
- GL = erlang:group_leader(),
- with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL).
-
-
-with_proc(Fun) ->
- with_proc(Fun, undefined, 30000).
-
-
-with_proc(Fun, GroupLeader) ->
- with_proc(Fun, GroupLeader, 30000).
-
-
-with_proc(Fun, GroupLeader, Timeout) ->
- {Pid, Ref} = spawn_monitor(fun() ->
- case GroupLeader of
- undefined -> ok;
- _ -> erlang:group_leader(GroupLeader, self())
- end,
- exit({with_proc_res, Fun()})
- end),
- receive
- {'DOWN', Ref, process, Pid, {with_proc_res, Res}} ->
- Res;
- {'DOWN', Ref, process, Pid, Error} ->
- error(Error)
- after Timeout ->
- erlang:demonitor(Ref, [flush]),
- exit(Pid, kill),
- error({with_proc_timeout, Fun, Timeout})
- end.
-
-
-add_test_docs(DbName, #{} = DocSpec) ->
- Docs = docs(maps:get(docs, DocSpec, []))
- ++ pdocs(maps:get(pdocs, DocSpec, #{}))
- ++ ddocs(mrview, maps:get(mrview, DocSpec, []))
- ++ ddocs(search, maps:get(search, DocSpec, []))
- ++ ddocs(geo, maps:get(geo, DocSpec, []))
- ++ ldocs(maps:get(local, DocSpec, [])),
- Res = update_docs(DbName, Docs),
- Docs1 = lists:map(fun({Doc, {ok, {RevPos, Rev}}}) ->
- Doc#doc{revs = {RevPos, [Rev]}}
- end, lists:zip(Docs, Res)),
- case delete_docs(maps:get(delete, DocSpec, []), Docs1) of
- [] -> ok;
- [_ | _] = Deleted -> update_docs(DbName, Deleted)
- end,
- ok.
-
-
-update_docs(DbName, Docs) ->
- with_proc(fun() ->
- case fabric:update_docs(DbName, Docs, [?ADMIN_CTX]) of
- {accepted, Res} -> Res;
- {ok, Res} -> Res
- end
- end).
-
-
-delete_docs([S, E], Docs) when E >= S ->
- ToDelete = [doc_id(<<"">>, I) || I <- lists:seq(S, E)],
- lists:filtermap(fun(#doc{id = Id} = Doc) ->
- case lists:member(Id, ToDelete) of
- true -> {true, Doc#doc{deleted = true}};
- false -> false
- end
- end, Docs);
-delete_docs(_, _) ->
- [].
-
-
-pdocs(#{} = PMap) ->
- maps:fold(fun(Part, DocSpec, DocsAcc) ->
- docs(DocSpec, <<Part/binary, ":">>) ++ DocsAcc
- end, [], PMap).
-
-
-docs(DocSpec) ->
- docs(DocSpec, <<"">>).
-
-
-docs(N, Prefix) when is_integer(N), N > 0 ->
- docs([0, N - 1], Prefix);
-docs([S, E], Prefix) when E >= S ->
- [doc(Prefix, I) || I <- lists:seq(S, E)];
-docs(_, _) ->
- [].
-
-ddocs(Type, N) when is_integer(N), N > 0 ->
- ddocs(Type, [0, N - 1]);
-ddocs(Type, [S, E]) when E >= S ->
- Body = ddprop(Type),
- BType = atom_to_binary(Type, utf8),
- [doc(<<"_design/", BType/binary>>, I, Body, 0) || I <- lists:seq(S, E)];
-ddocs(_, _) ->
- [].
-
-
-ldocs(N) when is_integer(N), N > 0 ->
- ldocs([0, N - 1]);
-ldocs([S, E]) when E >= S ->
- [doc(<<"_local/">>, I, bodyprops(), 0) || I <- lists:seq(S, E)];
-ldocs(_) ->
- [].
-
-
-
-doc(Pref, Id) ->
- Body = bodyprops(),
- doc(Pref, Id, Body, 42).
-
-
-doc(Pref, Id, BodyProps, AttSize) ->
- #doc{
- id = doc_id(Pref, Id),
- body = {BodyProps},
- atts = atts(AttSize)
- }.
-
-
-doc_id(Pref, Id) ->
- IdBin = iolist_to_binary(io_lib:format("~5..0B", [Id])),
- <<Pref/binary, IdBin/binary>>.
-
-
-ddprop(mrview) ->
- [
- {<<"views">>, {[
- {<<"v1">>, {[
- {<<"map">>, <<"function(d){emit(d);}">>}
- ]}}
- ]}}
- ];
-
-ddprop(geo) ->
- [
- {<<"st_indexes">>, {[
- {<<"area">>, {[
- {<<"analyzer">>, <<"standard">>},
- {<<"index">>, <<"function(d){if(d.g){st_index(d.g)}}">> }
- ]}}
- ]}}
- ];
-
-ddprop(search) ->
- [
- {<<"indexes">>, {[
- {<<"types">>, {[
- {<<"index">>, <<"function(d){if(d.g){st_index(d.g.type)}}">>}
- ]}}
- ]}}
- ].
-
-
-bodyprops() ->
- [
- {<<"g">>, {[
- {<<"type">>, <<"Polygon">>},
- {<<"coordinates">>, [[[-71.0, 48.4], [-70.0, 48.4], [-71.0, 48.4]]]}
- ]}}
- ].
-
-
-atts(0) ->
- [];
-
-atts(Size) when is_integer(Size), Size >= 1 ->
- Data = << <<"x">> || _ <- lists:seq(1, Size) >>,
- [couch_att:new([
- {name, <<"att">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, Data}
- ])].
diff --git a/src/mem3/test/eunit/mem3_ring_prop_tests.erl b/src/mem3/test/eunit/mem3_ring_prop_tests.erl
deleted file mode 100644
index 51d8f10bf..000000000
--- a/src/mem3/test/eunit/mem3_ring_prop_tests.erl
+++ /dev/null
@@ -1,151 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_ring_prop_tests).
-
-
--ifdef(WITH_PROPER).
-
--include_lib("couch/include/couch_eunit_proper.hrl").
-
-
-property_test_() ->
- ?EUNIT_QUICKCHECK(60).
-
-
-% Properties
-
-prop_get_ring_with_connected_intervals() ->
- ?FORALL({Start, End}, oneof(ranges()),
- ?FORALL(Intervals, g_connected_intervals(Start, End),
- mem3_util:get_ring(Intervals, Start, End) =:= lists:sort(Intervals)
- )
- ).
-
-
-prop_get_ring_connected_plus_random_intervals() ->
- ?FORALL({Intervals, Extra}, {g_connected_intervals(1, 100),
- g_random_intervals(1, 100)},
- ?IMPLIES(sets:is_disjoint(endpoints(Intervals), endpoints(Extra)),
- begin
- AllInts = Intervals ++ Extra,
- Ring = mem3_util:get_ring(AllInts, 1, 100),
- Ring =:= lists:sort(Intervals)
- end
- )
- ).
-
-
-prop_get_ring_connected_with_sub_intervals() ->
- ?FORALL(Intervals, g_connected_intervals(1, 100),
- ?FORALL(SubIntervals, g_subintervals(Intervals),
- begin
- AllInts = Intervals ++ SubIntervals,
- Ring = mem3_util:get_ring(AllInts, 1, 100),
- Ring =:= lists:sort(Intervals)
- end
- )
- ).
-
-
-prop_get_ring_with_disconnected_intervals() ->
- ?FORALL({Start, End}, oneof(ranges()),
- ?FORALL(Intervals, g_disconnected_intervals(Start, End),
- mem3_util:get_ring(Intervals, Start, End) =:= []
- )
- ).
-
-
-% Generators
-
-ranges() ->
- [{1, 10}, {0, 2 bsl 31 - 1}, {2 bsl 31 - 10, 2 bsl 31 - 1}].
-
-
-g_connected_intervals(Begin, End) ->
- ?SIZED(Size, g_connected_intervals(Begin, End, 5 * Size)).
-
-
-g_connected_intervals(Begin, End, Split) when Begin =< End ->
- ?LET(N, choose(0, Split),
- begin
- if
- N == 0 ->
- [{Begin, End}];
- N > 0 ->
- Ns = lists:seq(1, N - 1),
- Bs = lists:usort([rand_range(Begin, End) || _ <- Ns]),
- Es = [B - 1 || B <- Bs],
- shuffle(lists:zip([Begin] ++ Bs, Es ++ [End]))
- end
- end).
-
-
-g_non_trivial_connected_intervals(Begin, End, Split) ->
- ?SUCHTHAT(Connected, g_connected_intervals(Begin, End, Split),
- length(Connected) > 1).
-
-
-g_disconnected_intervals(Begin, End) ->
- ?SIZED(Size, g_disconnected_intervals(Begin, End, Size)).
-
-
-g_disconnected_intervals(Begin, End, Split) when Begin =< End ->
- ?LET(Connected, g_non_trivial_connected_intervals(Begin, End, Split),
- begin
- I = rand:uniform(length(Connected)) - 1,
- {Before, [_ | After]} = lists:split(I, Connected),
- Before ++ After
- end).
-
-
-g_subintervals(Intervals) ->
- lists:foldl(fun(R, Acc) -> split_interval(R) ++ Acc end, [], Intervals).
-
-
-split_interval({B, E}) when E - B >= 2 ->
- E1 = rand_range(B, E) - 1,
- B1 = E1 + 1,
- [{B, E1}, {B1, E}];
-
-split_interval(_Range) ->
- [].
-
-
-g_random_intervals(Start, End) ->
- ?LET(N, choose(1, 10),
- begin
- [begin
- B = rand_range(Start, End),
- E = rand_range(B, End),
- {B, E}
- end || _ <- lists:seq(1, N)]
- end).
-
-
-rand_range(B, B) ->
- B;
-
-rand_range(B, E) ->
- B + rand:uniform(E - B).
-
-
-shuffle(L) ->
- Tagged = [{rand:uniform(), X} || X <- L],
- [X || {_, X} <- lists:sort(Tagged)].
-
-
-endpoints(Ranges) ->
- {Begins, Ends} = lists:unzip(Ranges),
- sets:from_list(Begins ++ Ends).
-
--endif.
diff --git a/src/mem3/test/eunit/mem3_seeds_test.erl b/src/mem3/test/eunit/mem3_seeds_test.erl
deleted file mode 100644
index ba83b66be..000000000
--- a/src/mem3/test/eunit/mem3_seeds_test.erl
+++ /dev/null
@@ -1,69 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_seeds_test).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-a_test_() ->
- Tests = [
- {"empty seedlist should set status ok", fun empty_seedlist_status_ok/0},
- {"all seedlist nodes unreachable keeps status seeding", fun seedlist_misconfiguration/0},
- {"seedlist entries should be present in _nodes", fun check_nodelist/0}
- ],
- {setup, fun setup/0, fun teardown/1, Tests}.
-
-empty_seedlist_status_ok() ->
- ok = application:start(mem3),
- try
- {ok, {Result}} = mem3_seeds:get_status(),
- ?assertEqual({[]}, couch_util:get_value(seeds, Result)),
- ?assertEqual(ok, couch_util:get_value(status, Result))
- after
- cleanup()
- end.
-
-seedlist_misconfiguration() ->
- config:set("cluster", "seedlist", "couchdb@node1.example.com,couchdb@node2.example.com", false),
- ok = application:start(mem3),
- try
- {ok, {Result}} = mem3_seeds:get_status(),
- {Seeds} = couch_util:get_value(seeds, Result),
- ?assertEqual(2, length(Seeds)),
- ?assertMatch({_}, couch_util:get_value('couchdb@node1.example.com', Seeds)),
- ?assertMatch({_}, couch_util:get_value('couchdb@node2.example.com', Seeds)),
- ?assertEqual(seeding, couch_util:get_value(status, Result))
- after
- cleanup()
- end.
-
-check_nodelist() ->
- config:set("cluster", "seedlist", "couchdb@node1.example.com,couchdb@node2.example.com", false),
- ok = application:start(mem3),
- try
- Nodes = mem3:nodes(),
- ?assert(lists:member('couchdb@node1.example.com', Nodes)),
- ?assert(lists:member('couchdb@node2.example.com', Nodes))
- after
- cleanup()
- end.
-
-cleanup() ->
- application:stop(mem3),
- Filename = config:get("mem3", "nodes_db", "_nodes") ++ ".couch",
- file:delete(filename:join([?BUILDDIR(), "tmp", "data", Filename])).
-
-setup() ->
- test_util:start_couch([rexi]).
-
-teardown(Ctx) ->
- test_util:stop_couch(Ctx).
diff --git a/src/mem3/test/eunit/mem3_sync_security_test.erl b/src/mem3/test/eunit/mem3_sync_security_test.erl
deleted file mode 100644
index e67a72017..000000000
--- a/src/mem3/test/eunit/mem3_sync_security_test.erl
+++ /dev/null
@@ -1,54 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync_security_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
--include("mem3.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--define(TIMEOUT, 5). % seconds
-
-go_test_() ->
- {
- "security property sync test",
- {
- setup,
- fun start_couch/0, fun stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun sync_security_ok/1
- ]
- }
- }
- }.
-
-start_couch() ->
- test_util:start_couch([fabric, mem3]).
-
-stop_couch(Ctx) ->
- test_util:stop_couch(Ctx).
-
-setup() ->
- ok = meck:new(fabric, [passthrough]),
- meck:expect(fabric, all_dbs, fun() ->
- {ok, [<<"NoExistDb1">>, <<"NoExistDb2">>]}
- end).
-
-teardown(_) ->
- meck:unload().
-
-sync_security_ok(_) ->
- {timeout, ?TIMEOUT, ?_assertEqual(ok, mem3_sync_security:go())}.
diff --git a/src/mem3/test/eunit/mem3_util_test.erl b/src/mem3/test/eunit/mem3_util_test.erl
deleted file mode 100644
index 8b74c4b2b..000000000
--- a/src/mem3/test/eunit/mem3_util_test.erl
+++ /dev/null
@@ -1,130 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_util_test).
-
--include("mem3.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-name_shard_test() ->
- Shard1 = #shard{},
- ?assertError(function_clause, mem3_util:name_shard(Shard1, ".1234")),
-
- Shard2 = #shard{dbname = <<"testdb">>, range = [0,100]},
- #shard{name=Name2} = mem3_util:name_shard(Shard2, ".1234"),
- ?assertEqual(<<"shards/00000000-00000064/testdb.1234">>, Name2),
-
- ok.
-
-create_partition_map_test() ->
- {DbName1, N1, Q1, Nodes1} = {<<"testdb1">>, 3, 4, [a,b,c,d]},
- Map1 = mem3_util:create_partition_map(DbName1, N1, Q1, Nodes1),
- ?assertEqual(12, length(Map1)),
-
- {DbName2, N2, Q2, Nodes2} = {<<"testdb2">>, 1, 1, [a,b,c,d]},
- [#shard{name=Name2,node=Node2}] = Map2 =
- mem3_util:create_partition_map(DbName2, N2, Q2, Nodes2, ".1234"),
- ?assertEqual(1, length(Map2)),
- ?assertEqual(<<"shards/00000000-ffffffff/testdb2.1234">>, Name2),
- ?assertEqual(a, Node2),
- ok.
-
-build_shards_test() ->
- DocProps1 =
- [{<<"changelog">>,
- [[<<"add">>,<<"00000000-1fffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"20000000-3fffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"40000000-5fffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"60000000-7fffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"80000000-9fffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"a0000000-bfffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"c0000000-dfffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"e0000000-ffffffff">>,
- <<"bigcouch@node.local">>]]},
- {<<"by_node">>,
- {[{<<"bigcouch@node.local">>,
- [<<"00000000-1fffffff">>,<<"20000000-3fffffff">>,
- <<"40000000-5fffffff">>,<<"60000000-7fffffff">>,
- <<"80000000-9fffffff">>,<<"a0000000-bfffffff">>,
- <<"c0000000-dfffffff">>,<<"e0000000-ffffffff">>]}]}},
- {<<"by_range">>,
- {[{<<"00000000-1fffffff">>,[<<"bigcouch@node.local">>]},
- {<<"20000000-3fffffff">>,[<<"bigcouch@node.local">>]},
- {<<"40000000-5fffffff">>,[<<"bigcouch@node.local">>]},
- {<<"60000000-7fffffff">>,[<<"bigcouch@node.local">>]},
- {<<"80000000-9fffffff">>,[<<"bigcouch@node.local">>]},
- {<<"a0000000-bfffffff">>,[<<"bigcouch@node.local">>]},
- {<<"c0000000-dfffffff">>,[<<"bigcouch@node.local">>]},
- {<<"e0000000-ffffffff">>,[<<"bigcouch@node.local">>]}]}}],
- Shards1 = mem3_util:build_shards(<<"testdb1">>, DocProps1),
- ExpectedShards1 =
- [{shard,<<"shards/00000000-1fffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [0,536870911],
- undefined,[]},
- {shard,<<"shards/20000000-3fffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [536870912,1073741823],
- undefined,[]},
- {shard,<<"shards/40000000-5fffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [1073741824,1610612735],
- undefined,[]},
- {shard,<<"shards/60000000-7fffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [1610612736,2147483647],
- undefined,[]},
- {shard,<<"shards/80000000-9fffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [2147483648,2684354559],
- undefined,[]},
- {shard,<<"shards/a0000000-bfffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [2684354560,3221225471],
- undefined,[]},
- {shard,<<"shards/c0000000-dfffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [3221225472,3758096383],
- undefined,[]},
- {shard,<<"shards/e0000000-ffffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [3758096384,4294967295],
- undefined,[]}],
- ?assertEqual(ExpectedShards1, Shards1),
- ok.
-
-
-%% n_val tests
-
-nval_test_() ->
- {
- setup,
- fun() ->
- meck:new([config, couch_log]),
- meck:expect(couch_log, error, 2, ok),
- meck:expect(config, get, 3, "5")
- end,
- fun(_) -> meck:unload() end,
- [
- ?_assertEqual(2, mem3_util:n_val(2, 4)),
- ?_assertEqual(1, mem3_util:n_val(-1, 4)),
- ?_assertEqual(4, mem3_util:n_val(6, 4)),
- ?_assertEqual(5, mem3_util:n_val(undefined, 6))
- ]
- }.
diff --git a/src/rexi/README.md b/src/rexi/README.md
deleted file mode 100644
index b2eeaea2b..000000000
--- a/src/rexi/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-Rexi is a tailor-made RPC server application for sending [CouchDB][1] operations to nodes in a cluster. It is used in [BigCouch][2] as the remote procedure vehicle to get [fabric][6] functions to execute on remote cluster nodes.
-
-Rexi better fits the needs of the BigCouch distributed data store by dropping some unneeded overhead in rex, the RPC server that ships with Erlang/OTP. Rexi is optimized for the case when you need to spawn a bunch of remote processes. Cast messages are sent from the origin to the remote rexi server, and local processes are spawned from there, which is vastly more efficient than spawning remote processes from the origin. You still get monitoring of the remote processes, but the request-handling process doesn't get stuck trying to connect to an overloaded/dead node. 'rexi_DOWN' messages will arrive at the client eventually. This has been an extremely advantageous mix of latency and failure detection, vastly improving the performance of BigCouch.
-
-Rexi is used in conjunction with 'Fabric' which is also an application within BigCouch, but can be used on a stand-alone basis.
-
-### Getting Started
-Rexi requires R13B03 or higher and can be built with [rebar][7], which comes bundled in the repository.
-
-### License
-[Apache 2.0][3]
-
-### Contact
- * [http://cloudant.com][4]
- * [info@cloudant.com][5]
-
-[1]: http://couchdb.apache.org
-[2]: http://github.com/cloudant/BigCouch
-[3]: http://www.apache.org/licenses/LICENSE-2.0.html
-[4]: http://cloudant.com
-[5]: mailto:info@cloudant.com
-[6]: http://github.com/cloudant/fabric
-[7]: http://github.com/basho/rebar
diff --git a/src/rexi/priv/stats_descriptions.cfg b/src/rexi/priv/stats_descriptions.cfg
deleted file mode 100644
index 93c29d95a..000000000
--- a/src/rexi/priv/stats_descriptions.cfg
+++ /dev/null
@@ -1,24 +0,0 @@
-{[rexi, buffered], [
- {type, counter},
- {desc, <<"number of rexi messages buffered">>}
-]}.
-{[rexi, down], [
- {type, counter},
- {desc, <<"number of rexi_DOWN messages handled">>}
-]}.
-{[rexi, dropped], [
- {type, counter},
- {desc, <<"number of rexi messages dropped from buffers">>}
-]}.
-{[rexi, streams, timeout, init_stream], [
- {type, counter},
- {desc, <<"number of rexi stream initialization timeouts">>}
-]}.
-{[rexi, streams, timeout, stream], [
- {type, counter},
- {desc, <<"number of rexi stream timeouts">>}
-]}.
-{[rexi, streams, timeout, wait_for_ack], [
- {type, counter},
- {desc, <<"number of rexi stream timeouts while waiting for acks">>}
-]}.
diff --git a/src/rexi/rebar.config b/src/rexi/rebar.config
deleted file mode 100644
index e0d18443b..000000000
--- a/src/rexi/rebar.config
+++ /dev/null
@@ -1,2 +0,0 @@
-{cover_enabled, true}.
-{cover_print_enabled, true}.
diff --git a/src/rexi/src/rexi.app.src b/src/rexi/src/rexi.app.src
deleted file mode 100644
index 400293219..000000000
--- a/src/rexi/src/rexi.app.src
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, rexi, [
- {description, "Lightweight RPC server"},
- {vsn, git},
- {registered, [
- rexi_sup,
- rexi_server
- ]},
- {applications, [
- kernel,
- stdlib,
- couch_log,
- couch_stats,
- config
- ]},
- {mod, {rexi_app,[]}}
-]}.
diff --git a/src/rexi/src/rexi.erl b/src/rexi/src/rexi.erl
deleted file mode 100644
index 170503b7c..000000000
--- a/src/rexi/src/rexi.erl
+++ /dev/null
@@ -1,320 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi).
--export([start/0, stop/0, restart/0]).
--export([cast/2, cast/3, cast/4, kill/2, kill_all/1]).
--export([reply/1, sync_reply/1, sync_reply/2]).
--export([async_server_call/2, async_server_call/3]).
--export([stream_init/0, stream_init/1]).
--export([stream_start/1, stream_cancel/1]).
--export([stream/1, stream/2, stream/3, stream_ack/1, stream_ack/2]).
--export([stream2/1, stream2/2, stream2/3, stream_last/1, stream_last/2]).
--export([ping/0]).
-
--include_lib("rexi/include/rexi.hrl").
-
-start() ->
- application:start(rexi).
-
-stop() ->
- application:stop(rexi).
-
-restart() ->
- stop(), start().
-
-
-%% @equiv cast(Node, self(), MFA)
--spec cast(node(), {atom(), atom(), list()}) -> reference().
-cast(Node, MFA) ->
- cast(Node, self(), MFA).
-
-%% @doc Executes apply(M, F, A) on Node.
-%% You might want to use this instead of rpc:cast/4 for two reasons. First,
-%% the Caller pid and the returned reference are inserted into the remote
-%% process' dictionary as `rexi_from', so it has a way to communicate with you.
-%% Second, the remote process is monitored. If it exits with a Reason other
-%% than normal, Caller will receive a message of the form
-%% `{Ref, {rexi_EXIT, Reason}}' where Ref is the returned reference.
--spec cast(node(), pid(), {atom(), atom(), list()}) -> reference().
-cast(Node, Caller, MFA) ->
- Ref = make_ref(),
- Msg = cast_msg({doit, {Caller, Ref}, get(nonce), MFA}),
- rexi_utils:send(rexi_utils:server_pid(Node), Msg),
- Ref.
-
-%% @doc Executes apply(M, F, A) on Node.
-%% This version accepts a sync option which uses the erlang:send/2 call
-%% directly in process instead of deferring to a spawned process if
-%% erlang:send/2 were to block. If the sync option is omitted this call
-%% is identical to cast/3.
--spec cast(node(), pid(), {atom(), atom(), list()}, [atom()]) -> reference().
-cast(Node, Caller, MFA, Options) ->
- case lists:member(sync, Options) of
- true ->
- Ref = make_ref(),
- Msg = cast_msg({doit, {Caller, Ref}, get(nonce), MFA}),
- erlang:send(rexi_utils:server_pid(Node), Msg),
- Ref;
- false ->
- cast(Node, Caller, MFA)
- end.
-
-%% @doc Sends an async kill signal to the remote process associated with Ref.
-%% No rexi_EXIT message will be sent.
--spec kill(node(), reference()) -> ok.
-kill(Node, Ref) ->
- rexi_utils:send(rexi_utils:server_pid(Node), cast_msg({kill, Ref})),
- ok.
-
-%% @doc Sends an async kill signal to the remote processes associated with Refs.
-%% No rexi_EXIT message will be sent.
--spec kill_all([{node(), reference()}]) -> ok.
-kill_all(NodeRefs) when is_list(NodeRefs) ->
- %% Upgrade clause. Since kill_all is a new message, nodes in a mixed
- %% cluster won't know how to process it. In that case, the default is to send
- %% the individual kill messages. Once all the nodes have been upgraded, can
- %% configure the cluster to send kill_all messages.
- case config:get_boolean("rexi", "use_kill_all", false) of
- true ->
- PerNodeMap = lists:foldl(fun({Node, Ref}, Acc) ->
- maps:update_with(Node, fun(Refs) ->
- [Ref | Refs]
- end, [Ref], Acc)
- end, #{}, NodeRefs),
- maps:map(fun(Node, Refs) ->
- ServerPid = rexi_utils:server_pid(Node),
- rexi_utils:send(ServerPid, cast_msg({kill_all, Refs}))
- end, PerNodeMap);
- false ->
- lists:foreach(fun({Node, Ref}) -> kill(Node, Ref) end, NodeRefs)
- end,
- ok.
-
-%% @equiv async_server_call(Server, self(), Request)
--spec async_server_call(pid() | {atom(),node()}, any()) -> reference().
-async_server_call(Server, Request) ->
- async_server_call(Server, self(), Request).
-
-%% @doc Sends a properly formatted gen_server:call Request to the Server and
-%% returns the reference which the Server will include in its reply. The
-%% function acts more like cast() than call() in that the server process
-%% is not monitored. Clients who want to know if the server is alive should
-%% monitor it themselves before calling this function.
--spec async_server_call(pid() | {atom(),node()}, pid(), any()) -> reference().
-async_server_call(Server, Caller, Request) ->
- Ref = make_ref(),
- rexi_utils:send(Server, {'$gen_call', {Caller,Ref}, Request}),
- Ref.
-
-%% @doc convenience function to reply to the original rexi Caller.
--spec reply(any()) -> any().
-reply(Reply) ->
- {Caller, Ref} = get(rexi_from),
- erlang:send(Caller, {Ref,Reply}).
-
-%% @equiv sync_reply(Reply, 300000)
-sync_reply(Reply) ->
- sync_reply(Reply, 300000).
-
-%% @doc convenience function to reply to caller and wait for response. Message
-%% is of the form {OriginalRef, {self(),reference()}, Reply}, which enables the
-%% original caller to respond back.
--spec sync_reply(any(), pos_integer() | infinity) -> any().
-sync_reply(Reply, Timeout) ->
- {Caller, Ref} = get(rexi_from),
- Tag = make_ref(),
- erlang:send(Caller, {Ref, {self(),Tag}, Reply}),
- receive {Tag, Response} ->
- Response
- after Timeout ->
- timeout
- end.
-
-%% @equiv stream_init(300000)
-stream_init() ->
- stream_init(300000).
-
-%% @doc Initialize an RPC stream that involves sending multiple
-%% messages back to the coordinator.
-%%
-%% This should be called by rexi workers. It blocks until the
-%% coordinator responds with whether this worker should proceed.
-%% This function will either return with `ok` or call
-%% `erlang:exit/1`.
--spec stream_init(pos_integer()) -> ok.
-stream_init(Timeout) ->
- case sync_reply(rexi_STREAM_INIT, Timeout) of
- rexi_STREAM_START ->
- ok;
- rexi_STREAM_CANCEL ->
- exit(normal);
- timeout ->
- couch_stats:increment_counter(
- [rexi, streams, timeout, init_stream]
- ),
- exit(timeout);
- Else ->
- exit({invalid_stream_message, Else})
- end.
-
-%% @doc Start a worker stream
-%%
-%% If a coordinator wants to continue using a streaming worker it
-%% should use this function to inform the worker to continue
-%% sending messages. The `From` should be the value provided by
-%% the worker in the rexi_STREAM_INIT message.
--spec stream_start({pid(), any()}) -> ok.
-stream_start({Pid, _Tag}=From) when is_pid(Pid) ->
- gen_server:reply(From, rexi_STREAM_START).
-
-%% @doc Cancel a worker stream
-%%
-%% If a coordinator decideds that a worker is not going to be part
-%% of the response it should use this function to cancel the worker.
-%% The `From` should be the value provided by the worker in the
-%% rexi_STREAM_INIT message.
--spec stream_cancel({pid(), any()}) -> ok.
-stream_cancel({Pid, _Tag}=From) when is_pid(Pid) ->
- gen_server:reply(From, rexi_STREAM_CANCEL).
-
-%% @equiv stream(Msg, 100, 300000)
-stream(Msg) ->
- stream(Msg, 10, 300000).
-
-%% @equiv stream(Msg, Limit, 300000)
-stream(Msg, Limit) ->
- stream(Msg, Limit, 300000).
-
-%% @doc convenience function to stream messages to caller while blocking when
-%% a specific number of messages are outstanding. Message is of the form
-%% {OriginalRef, self(), Reply}, which enables the original caller to ack.
--spec stream(any(), integer(), pos_integer() | infinity) -> any().
-stream(Msg, Limit, Timeout) ->
- try maybe_wait(Limit, Timeout) of
- {ok, Count} ->
- put(rexi_unacked, Count+1),
- {Caller, Ref} = get(rexi_from),
- erlang:send(Caller, {Ref, self(), Msg}),
- ok
- catch throw:timeout ->
- couch_stats:increment_counter([rexi, streams, timeout, stream]),
- exit(timeout)
- end.
-
-%% @equiv stream2(Msg, 5, 300000)
-stream2(Msg) ->
- Limit = config:get_integer("rexi", "stream_limit", 5),
- stream2(Msg, Limit).
-
-%% @equiv stream2(Msg, Limit, 300000)
-stream2(Msg, Limit) ->
- stream2(Msg, Limit, 300000).
-
-%% @doc Stream a message back to the coordinator. It limits the
-%% number of unacked messsages to Limit and throws a timeout error
-%% if it doesn't receive an ack in Timeout milliseconds. This
-%% is a combination of the old stream_start and stream functions
-%% which automatically does the stream initialization logic.
--spec stream2(any(), pos_integer(), pos_integer() | inifinity) -> any().
-stream2(Msg, Limit, Timeout) ->
- maybe_init_stream(Timeout),
- try maybe_wait(Limit, Timeout) of
- {ok, Count} ->
- put(rexi_unacked, Count+1),
- {Caller, Ref} = get(rexi_from),
- erlang:send(Caller, {Ref, self(), Msg}),
- ok
- catch throw:timeout ->
- couch_stats:increment_counter([rexi, streams, timeout, stream]),
- exit(timeout)
- end.
-
-%% @equiv stream_last(Msg, 300000)
-stream_last(Msg) ->
- stream_last(Msg, 300000).
-
-%% @doc Send the last message in a stream. This difference between
-%% this and stream is that it uses rexi:reply/1 which doesn't include
-%% the worker pid and doesn't wait for a response from the controller.
-stream_last(Msg, Timeout) ->
- maybe_init_stream(Timeout),
- rexi:reply(Msg),
- ok.
-
-%% @equiv stream_ack(Client, 1)
-stream_ack(Client) ->
- erlang:send(Client, {rexi_ack, 1}).
-
-%% @doc Ack streamed messages
-stream_ack(Client, N) ->
- erlang:send(Client, {rexi_ack, N}).
-
-
-%% Sends a ping message to the coordinator. This is for long running
-%% operations on a node that could exceed the rexi timeout
-ping() ->
- {Caller, _} = get(rexi_from),
- erlang:send(Caller, {rexi, '$rexi_ping'}).
-
-
-%% internal functions %%
-
-cast_msg(Msg) -> {'$gen_cast', Msg}.
-
-maybe_init_stream(Timeout) ->
- case get(rexi_STREAM_INITED) of
- true ->
- ok;
- _ ->
- init_stream(Timeout)
- end.
-
-init_stream(Timeout) ->
- case sync_reply(rexi_STREAM_INIT, Timeout) of
- rexi_STREAM_START ->
- put(rexi_STREAM_INITED, true),
- ok;
- rexi_STREAM_CANCEL ->
- exit(normal);
- timeout ->
- exit(timeout);
- Else ->
- exit({invalid_stream_message, Else})
- end.
-
-maybe_wait(Limit, Timeout) ->
- case get(rexi_unacked) of
- undefined ->
- {ok, 0};
- Count when Count >= Limit ->
- wait_for_ack(Count, Timeout);
- Count ->
- drain_acks(Count)
- end.
-
-wait_for_ack(Count, Timeout) ->
- receive
- {rexi_ack, N} -> drain_acks(Count-N)
- after Timeout ->
- couch_stats:increment_counter([rexi, streams, timeout, wait_for_ack]),
- throw(timeout)
- end.
-
-drain_acks(Count) when Count < 0 ->
- erlang:error(mismatched_rexi_ack);
-drain_acks(Count) ->
- receive
- {rexi_ack, N} -> drain_acks(Count-N)
- after 0 ->
- {ok, Count}
- end.
diff --git a/src/rexi/src/rexi_app.erl b/src/rexi/src/rexi_app.erl
deleted file mode 100644
index 0f1e892b5..000000000
--- a/src/rexi/src/rexi_app.erl
+++ /dev/null
@@ -1,22 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-
-start(_Type, StartArgs) ->
- rexi_sup:start_link(StartArgs).
-
-stop(_State) ->
- ok.
diff --git a/src/rexi/src/rexi_buffer.erl b/src/rexi/src/rexi_buffer.erl
deleted file mode 100644
index d16dc8ba3..000000000
--- a/src/rexi/src/rexi_buffer.erl
+++ /dev/null
@@ -1,104 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(rexi_buffer).
-
--behaviour(gen_server).
--vsn(1).
-
-% gen_server callbacks
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
--export ([
- send/2,
- start_link/1
-]).
-
--record(state, {
- buffer = queue:new(),
- sender = nil,
- count = 0,
- max_count
-}).
-
-start_link(ServerId) ->
- gen_server:start_link({local, ServerId}, ?MODULE, nil, []).
-
-send(Dest, Msg) ->
- Server = list_to_atom(lists:concat([rexi_buffer, "_", get_node(Dest)])),
- gen_server:cast(Server, {deliver, Dest, Msg}).
-
-
-init(_) ->
- %% TODO Leverage os_mon to discover available memory in the system
- Max = list_to_integer(config:get("rexi", "buffer_count", "2000")),
- {ok, #state{max_count = Max}}.
-
-handle_call(erase_buffer, _From, State) ->
- {reply, ok, State#state{buffer = queue:new(), count = 0}, 0};
-
-handle_call(get_buffered_count, _From, State) ->
- {reply, State#state.count, State, 0}.
-
-handle_cast({deliver, Dest, Msg}, #state{buffer = Q, count = C} = State) ->
- couch_stats:increment_counter([rexi, buffered]),
- Q2 = queue:in({Dest, Msg}, Q),
- case should_drop(State) of
- true ->
- couch_stats:increment_counter([rexi, dropped]),
- {noreply, State#state{buffer = queue:drop(Q2)}, 0};
- false ->
- {noreply, State#state{buffer = Q2, count = C+1}, 0}
- end.
-
-handle_info(timeout, #state{sender = nil, buffer = {[],[]}, count = 0}=State) ->
- {noreply, State};
-handle_info(timeout, #state{sender = nil, count = C} = State) when C > 0 ->
- #state{buffer = Q, count = C} = State,
- {{value, {Dest, Msg}}, Q2} = queue:out_r(Q),
- NewState = State#state{buffer = Q2, count = C-1},
- case erlang:send(Dest, Msg, [noconnect, nosuspend]) of
- ok when C =:= 1 ->
- % We just sent the last queued messsage, we'll use this opportunity
- % to hibernate the process and run a garbage collection
- {noreply, NewState, hibernate};
- ok when C > 1 ->
- % Use a zero timeout to recurse into this handler ASAP
- {noreply, NewState, 0};
- _Else ->
- % We're experiencing delays, keep buffering internally
- Sender = spawn_monitor(erlang, send, [Dest, Msg]),
- {noreply, NewState#state{sender = Sender}}
- end;
-handle_info(timeout, State) ->
- % Waiting on a sender to return
- {noreply, State};
-
-handle_info({'DOWN', Ref, _, Pid, _}, #state{sender = {Pid, Ref}} = State) ->
- {noreply, State#state{sender = nil}, 0}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, {state, Buffer, Sender, Count}, _Extra) ->
- Max = list_to_integer(config:get("rexi", "buffer_count", "2000")),
- {ok, #state{buffer=Buffer, sender=Sender, count=Count, max_count=Max}};
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-should_drop(#state{count = Count, max_count = Max}) ->
- Count >= Max.
-
-get_node({_, Node}) when is_atom(Node) ->
- Node;
-get_node(Pid) when is_pid(Pid) ->
- node(Pid).
diff --git a/src/rexi/src/rexi_monitor.erl b/src/rexi/src/rexi_monitor.erl
deleted file mode 100644
index f90ec5160..000000000
--- a/src/rexi/src/rexi_monitor.erl
+++ /dev/null
@@ -1,65 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_monitor).
--export([start/1, stop/1]).
--export([wait_monitors/1]).
-
-
-%% @doc spawn_links a process which monitors the supplied list of items and
-%% returns the process ID. If a monitored process exits, the caller will
-%% receive a {rexi_DOWN, MonitoringPid, DeadPid, Reason} message.
--spec start([pid() | atom() | {atom(),node()}]) -> pid().
-start(Procs) ->
- Parent = self(),
- Nodes = [node() | nodes()],
- {Mon, Skip} = lists:partition(fun(P) -> should_monitor(P, Nodes) end,
- Procs),
- spawn_link(fun() ->
- [notify_parent(Parent, P, noconnect) || P <- Skip],
- [erlang:monitor(process, P) || P <- Mon],
- wait_monitors(Parent)
- end).
-
-%% @doc Cleanly shut down the monitoring process and flush all rexi_DOWN
-%% messages from our mailbox.
--spec stop(pid()) -> ok.
-stop(MonitoringPid) ->
- MonitoringPid ! {self(), shutdown},
- flush_down_messages().
-
-%% internal functions %%
-
-notify_parent(Parent, Pid, Reason) ->
- couch_stats:increment_counter([rexi, down]),
- erlang:send(Parent, {rexi_DOWN, self(), Pid, Reason}).
-
-should_monitor(Pid, Nodes) when is_pid(Pid) ->
- lists:member(node(Pid), Nodes);
-should_monitor({_, Node}, Nodes) ->
- lists:member(Node, Nodes).
-
-wait_monitors(Parent) ->
- receive
- {'DOWN', _, process, Pid, Reason} ->
- notify_parent(Parent, Pid, Reason),
- ?MODULE:wait_monitors(Parent);
- {Parent, shutdown} ->
- ok
- end.
-
-flush_down_messages() ->
- receive {rexi_DOWN, _, _, _} ->
- flush_down_messages()
- after 0 ->
- ok
- end.
diff --git a/src/rexi/src/rexi_server.erl b/src/rexi/src/rexi_server.erl
deleted file mode 100644
index fedff69c3..000000000
--- a/src/rexi/src/rexi_server.erl
+++ /dev/null
@@ -1,193 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_server).
--behaviour(gen_server).
--vsn(1).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
-
--export([start_link/1, init_p/2, init_p/3]).
-
--include_lib("rexi/include/rexi.hrl").
-
--record(job, {
- client::reference(),
- worker::reference(),
- client_pid::pid(),
- worker_pid::pid()
-}).
-
--record(st, {
- workers = ets:new(workers, [private, {keypos, #job.worker}]),
- clients = ets:new(clients, [private, {keypos, #job.client}]),
- errors = queue:new(),
- error_limit = 0,
- error_count = 0
-}).
-
-start_link(ServerId) ->
- gen_server:start_link({local, ServerId}, ?MODULE, [], []).
-
-init([]) ->
- couch_util:set_mqd_off_heap(?MODULE),
- {ok, #st{}}.
-
-handle_call(get_errors, _From, #st{errors = Errors} = St) ->
- {reply, {ok, lists:reverse(queue:to_list(Errors))}, St};
-
-handle_call(get_last_error, _From, #st{errors = Errors} = St) ->
- try
- {reply, {ok, queue:get_r(Errors)}, St}
- catch error:empty ->
- {reply, {error, empty}, St}
- end;
-
-handle_call({set_error_limit, N}, _From, #st{error_count=Len, errors=Q} = St) ->
- if N < Len ->
- {NewQ, _} = queue:split(N, Q);
- true ->
- NewQ = Q
- end,
- NewLen = queue:len(NewQ),
- {reply, ok, St#st{error_limit=N, error_count=NewLen, errors=NewQ}};
-
-handle_call(_Request, _From, St) ->
- {reply, ignored, St}.
-
-
-handle_cast({doit, From, MFA}, St) ->
- handle_cast({doit, From, undefined, MFA}, St);
-
-handle_cast({doit, {ClientPid, ClientRef} = From, Nonce, MFA}, State) ->
- {LocalPid, Ref} = spawn_monitor(?MODULE, init_p, [From, MFA, Nonce]),
- Job = #job{
- client = ClientRef,
- worker = Ref,
- client_pid = ClientPid,
- worker_pid = LocalPid
- },
- {noreply, add_job(Job, State)};
-
-
-handle_cast({kill, FromRef}, St) ->
- kill_worker(FromRef, St),
- {noreply, St};
-
-handle_cast({kill_all, FromRefs}, St) ->
- lists:foreach(fun(FromRef) -> kill_worker(FromRef, St) end, FromRefs),
- {noreply, St};
-
-handle_cast(_, St) ->
- couch_log:notice("rexi_server ignored_cast", []),
- {noreply, St}.
-
-handle_info({'DOWN', Ref, process, _, normal}, #st{workers=Workers} = St) ->
- case find_worker(Ref, Workers) of
- #job{} = Job ->
- {noreply, remove_job(Job, St)};
- false ->
- {noreply, St}
- end;
-
-handle_info({'DOWN', Ref, process, Pid, Error}, #st{workers=Workers} = St) ->
- case find_worker(Ref, Workers) of
- #job{worker_pid=Pid, worker=Ref, client_pid=CPid, client=CRef} =Job ->
- case Error of #error{reason = {_Class, Reason}, stack = Stack} ->
- notify_caller({CPid, CRef}, {Reason, Stack}),
- St1 = save_error(Error, St),
- {noreply, remove_job(Job, St1)};
- _ ->
- notify_caller({CPid, CRef}, Error),
- {noreply, remove_job(Job, St)}
- end;
- false ->
- {noreply, St}
- end;
-
-handle_info(_Info, St) ->
- {noreply, St}.
-
-terminate(_Reason, St) ->
- ets:foldl(fun(#job{worker_pid=Pid},_) -> exit(Pid,kill) end, nil,
- St#st.workers),
- ok.
-
-code_change(_OldVsn, #st{}=State, _Extra) ->
- {ok, State}.
-
-init_p(From, MFA) ->
- init_p(From, MFA, undefined).
-
-%% @doc initializes a process started by rexi_server.
--spec init_p({pid(), reference()}, {atom(), atom(), list()},
- string() | undefined) -> any().
-init_p(From, {M,F,A}, Nonce) ->
- put(rexi_from, From),
- put('$initial_call', {M,F,length(A)}),
- put(nonce, Nonce),
- try apply(M, F, A) catch exit:normal -> ok; Class:Reason ->
- Stack = clean_stack(),
- {ClientPid, _ClientRef} = From,
- couch_log:error(
- "rexi_server: from: ~s(~p) mfa: ~s:~s/~p ~p:~p ~100p", [
- node(ClientPid), ClientPid, M, F, length(A),
- Class, Reason, Stack]),
- exit(#error{
- timestamp = os:timestamp(),
- reason = {Class, Reason},
- mfa = {M,F,A},
- nonce = Nonce,
- stack = Stack
- })
- end.
-
-%% internal
-
-save_error(_E, #st{error_limit = 0} = St) ->
- St;
-save_error(E, #st{errors=Q, error_limit=L, error_count=C} = St) when C >= L ->
- St#st{errors = queue:in(E, queue:drop(Q))};
-save_error(E, #st{errors=Q, error_count=C} = St) ->
- St#st{errors = queue:in(E, Q), error_count = C+1}.
-
-clean_stack() ->
- lists:map(fun({M,F,A}) when is_list(A) -> {M,F,length(A)}; (X) -> X end,
- erlang:get_stacktrace()).
-
-add_job(Job, #st{workers = Workers, clients = Clients} = State) ->
- ets:insert(Workers, Job),
- ets:insert(Clients, Job),
- State.
-
-remove_job(Job, #st{workers = Workers, clients = Clients} = State) ->
- ets:delete_object(Workers, Job),
- ets:delete_object(Clients, Job),
- State.
-
-find_worker(Ref, Tab) ->
- case ets:lookup(Tab, Ref) of [] -> false; [Worker] -> Worker end.
-
-notify_caller({Caller, Ref}, Reason) ->
- rexi_utils:send(Caller, {Ref, {rexi_EXIT, Reason}}).
-
-
-kill_worker(FromRef, #st{clients = Clients} = St) ->
- case find_worker(FromRef, Clients) of
- #job{worker = KeyRef, worker_pid = Pid} = Job ->
- erlang:demonitor(KeyRef),
- exit(Pid, kill),
- remove_job(Job, St),
- ok;
- false ->
- ok
- end.
diff --git a/src/rexi/src/rexi_server_mon.erl b/src/rexi/src/rexi_server_mon.erl
deleted file mode 100644
index cfe1144ce..000000000
--- a/src/rexi/src/rexi_server_mon.erl
+++ /dev/null
@@ -1,176 +0,0 @@
-% Copyright 2010-2013 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_server_mon).
--behaviour(gen_server).
--behaviour(mem3_cluster).
--vsn(1).
-
-
--export([
- start_link/1,
- status/0
-]).
-
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
--export([
- cluster_stable/1,
- cluster_unstable/1
-]).
-
-
--define(CLUSTER_STABILITY_PERIOD_SEC, 15).
-
-
-start_link(ChildMod) ->
- Name = list_to_atom(lists:concat([ChildMod, "_mon"])),
- gen_server:start_link({local, Name}, ?MODULE, ChildMod, []).
-
-
-status() ->
- gen_server:call(?MODULE, status).
-
-
-% Mem3 cluster callbacks
-
-cluster_unstable(Server) ->
- couch_log:notice("~s : cluster unstable", [?MODULE]),
- gen_server:cast(Server, cluster_unstable),
- Server.
-
-cluster_stable(Server) ->
- gen_server:cast(Server, cluster_stable),
- Server.
-
-
-% gen_server callbacks
-
-init(ChildMod) ->
- {ok, _Mem3Cluster} = mem3_cluster:start_link(?MODULE, self(),
- ?CLUSTER_STABILITY_PERIOD_SEC, ?CLUSTER_STABILITY_PERIOD_SEC),
- start_servers(ChildMod),
- couch_log:notice("~s : started servers", [ChildMod]),
- {ok, ChildMod}.
-
-
-terminate(_Reason, _St) ->
- ok.
-
-
-handle_call(status, _From, ChildMod) ->
- case missing_servers(ChildMod) of
- [] ->
- {reply, ok, ChildMod};
- Missing ->
- {reply, {waiting, length(Missing)}, ChildMod}
- end;
-
-handle_call(Msg, _From, St) ->
- couch_log:notice("~s ignored_call ~w", [?MODULE, Msg]),
- {reply, ignored, St}.
-
-% If cluster is unstable a node was added or just removed. Check if any nodes
-% can be started, but do not immediately stop nodes, defer that till cluster
-% stabilized.
-handle_cast(cluster_unstable, ChildMod) ->
- couch_log:notice("~s : cluster unstable", [ChildMod]),
- start_servers(ChildMod),
- {noreply, ChildMod};
-
-% When cluster is stable, start any servers for new nodes and stop servers for
-% the ones that disconnected.
-handle_cast(cluster_stable, ChildMod) ->
- couch_log:notice("~s : cluster stable", [ChildMod]),
- start_servers(ChildMod),
- stop_servers(ChildMod),
- {noreply, ChildMod};
-
-handle_cast(Msg, St) ->
- couch_log:notice("~s ignored_cast ~w", [?MODULE, Msg]),
- {noreply, St}.
-
-
-handle_info(Msg, St) ->
- couch_log:notice("~s ignored_info ~w", [?MODULE, Msg]),
- {noreply, St}.
-
-
-code_change(_OldVsn, nil, _Extra) ->
- {ok, rexi_server};
-code_change(_OldVsn, St, _Extra) ->
- {ok, St}.
-
-
-start_servers(ChildMod) ->
- lists:foreach(fun(Id) ->
- {ok, _} = start_server(ChildMod, Id)
- end, missing_servers(ChildMod)).
-
-stop_servers(ChildMod) ->
- lists:foreach(fun(Id) ->
- ok = stop_server(ChildMod, Id)
- end, extra_servers(ChildMod)).
-
-
-server_ids(ChildMod) ->
- Nodes = [node() | nodes()],
- [list_to_atom(lists:concat([ChildMod, "_", Node])) || Node <- Nodes].
-
-
-running_servers(ChildMod) ->
- [Id || {Id, _, _, _} <- supervisor:which_children(sup_module(ChildMod))].
-
-
-missing_servers(ChildMod) ->
- server_ids(ChildMod) -- running_servers(ChildMod).
-
-
-extra_servers(ChildMod) ->
- running_servers(ChildMod) -- server_ids(ChildMod).
-
-
-start_server(ChildMod, ChildId) ->
- ChildSpec = {
- ChildId,
- {ChildMod, start_link, [ChildId]},
- permanent,
- brutal_kill,
- worker,
- [ChildMod]
- },
- case supervisor:start_child(sup_module(ChildMod), ChildSpec) of
- {ok, Pid} ->
- {ok, Pid};
- Else ->
- erlang:error(Else)
- end.
-
-
-stop_server(ChildMod, ChildId) ->
- SupMod = sup_module(ChildMod),
- ok = supervisor:terminate_child(SupMod, ChildId),
- ok = supervisor:delete_child(SupMod, ChildId).
-
-
-sup_module(ChildMod) ->
- list_to_atom(lists:concat([ChildMod, "_sup"])).
diff --git a/src/rexi/src/rexi_server_sup.erl b/src/rexi/src/rexi_server_sup.erl
deleted file mode 100644
index 29c6ad60c..000000000
--- a/src/rexi/src/rexi_server_sup.erl
+++ /dev/null
@@ -1,29 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_server_sup).
--behaviour(supervisor).
-
-
--export([init/1]).
-
--export([start_link/1]).
-
-
-start_link(Name) ->
- supervisor:start_link({local, Name}, ?MODULE, []).
-
-
-init([]) ->
- {ok, {{one_for_one, 1, 1}, []}}.
diff --git a/src/rexi/src/rexi_sup.erl b/src/rexi/src/rexi_sup.erl
deleted file mode 100644
index 3d9aa2a16..000000000
--- a/src/rexi/src/rexi_sup.erl
+++ /dev/null
@@ -1,64 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_sup).
--behaviour(supervisor).
-
--export([start_link/1]).
--export([init/1]).
-
-start_link(Args) ->
- supervisor:start_link({local,?MODULE}, ?MODULE, Args).
-
-init([]) ->
- {ok, {{rest_for_one, 3, 10}, [
- {
- rexi_server,
- {rexi_server, start_link, [rexi_server]},
- permanent,
- 100,
- worker,
- [rexi_server]
- },
- {
- rexi_server_sup,
- {rexi_server_sup, start_link, [rexi_server_sup]},
- permanent,
- 100,
- supervisor,
- [rexi_server_sup]
- },
- {
- rexi_server_mon,
- {rexi_server_mon, start_link, [rexi_server]},
- permanent,
- 100,
- worker,
- [rexi_server_mon]
- },
- {
- rexi_buffer_sup,
- {rexi_server_sup, start_link, [rexi_buffer_sup]},
- permanent,
- 100,
- supervisor,
- [rexi_server_sup]
- },
- {
- rexi_buffer_mon,
- {rexi_server_mon, start_link, [rexi_buffer]},
- permanent,
- 100,
- worker,
- [rexi_server_mon]
- }
- ]}}.
diff --git a/src/rexi/src/rexi_utils.erl b/src/rexi/src/rexi_utils.erl
deleted file mode 100644
index 960318418..000000000
--- a/src/rexi/src/rexi_utils.erl
+++ /dev/null
@@ -1,105 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_utils).
-
--export([server_id/1, server_pid/1, send/2, recv/6]).
-
-%% @doc Return a rexi_server id for the given node.
-server_id(Node) ->
- case config:get_boolean("rexi", "server_per_node", true) of
- true ->
- list_to_atom("rexi_server_" ++ atom_to_list(Node));
- _ ->
- rexi_server
- end.
-
-%% @doc Return a {server_id(node()), Node} Pid name for the given Node.
-server_pid(Node) ->
- {server_id(node()), Node}.
-
-%% @doc send a message as quickly as possible
-send(Dest, Msg) ->
- case erlang:send(Dest, Msg, [noconnect, nosuspend]) of
- ok ->
- ok;
- _ ->
- % treat nosuspend and noconnect the same
- rexi_buffer:send(Dest, Msg)
- end.
-
-%% @doc set up the receive loop with an overall timeout
--spec recv([any()], integer(), function(), any(), timeout(), timeout()) ->
- {ok, any()} | {timeout, any()} | {error, atom()} | {error, atom(), any()}.
-recv(Refs, Keypos, Fun, Acc0, infinity, PerMsgTO) ->
- process_mailbox(Refs, Keypos, Fun, Acc0, nil, PerMsgTO);
-recv(Refs, Keypos, Fun, Acc0, GlobalTimeout, PerMsgTO) ->
- TimeoutRef = erlang:make_ref(),
- TRef = erlang:send_after(GlobalTimeout, self(), {timeout, TimeoutRef}),
- try
- process_mailbox(Refs, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO)
- after
- erlang:cancel_timer(TRef)
- end.
-
-process_mailbox(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) ->
- case process_message(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) of
- {ok, Acc} ->
- process_mailbox(RefList, Keypos, Fun, Acc, TimeoutRef, PerMsgTO);
- {new_refs, NewRefList, Acc} ->
- process_mailbox(NewRefList, Keypos, Fun, Acc, TimeoutRef, PerMsgTO);
- {stop, Acc} ->
- {ok, Acc};
- Error ->
- Error
- end.
-
-process_message(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) ->
- receive
- {timeout, TimeoutRef} ->
- {timeout, Acc0};
- {rexi, Ref, Msg} ->
- case lists:keyfind(Ref, Keypos, RefList) of
- false ->
- {ok, Acc0};
- Worker ->
- Fun(Msg, Worker, Acc0)
- end;
- {rexi, Ref, From, Msg} ->
- case lists:keyfind(Ref, Keypos, RefList) of
- false ->
- {ok, Acc0};
- Worker ->
- Fun(Msg, {Worker, From}, Acc0)
- end;
- {rexi, '$rexi_ping'} ->
- {ok, Acc0};
- {Ref, Msg} ->
- case lists:keyfind(Ref, Keypos, RefList) of
- false ->
- % this was some non-matching message which we will ignore
- {ok, Acc0};
- Worker ->
- Fun(Msg, Worker, Acc0)
- end;
- {Ref, From, Msg} ->
- case lists:keyfind(Ref, Keypos, RefList) of
- false ->
- {ok, Acc0};
- Worker ->
- Fun(Msg, {Worker, From}, Acc0)
- end;
- {rexi_DOWN, _, _, _} = Msg ->
- Fun(Msg, nil, Acc0)
- after PerMsgTO ->
- {timeout, Acc0}
- end.
diff --git a/src/setup/.gitignore b/src/setup/.gitignore
deleted file mode 100644
index f84f14c93..000000000
--- a/src/setup/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-ebin
-.rebar
-*~
-*.swp
diff --git a/src/setup/LICENSE b/src/setup/LICENSE
deleted file mode 100644
index 94ad231b8..000000000
--- a/src/setup/LICENSE
+++ /dev/null
@@ -1,203 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/src/setup/README.md b/src/setup/README.md
deleted file mode 100644
index 8a76d9dc5..000000000
--- a/src/setup/README.md
+++ /dev/null
@@ -1,210 +0,0 @@
-This module implements /_cluster_setup and manages the setting up, duh, of a CouchDB cluster.
-
-### Testing
-
-```bash
-git clone https://git-wip-us.apache.org/repos/asf/couchdb.git
-cd couchdb
-git checkout setup
-./configure
-make
-dev/run --no-join -n 2 --admin a:b
-```
-
-Then, in a new terminal:
-
- $ src/setup/test/t.sh
-
-Before running each test, kill the `dev/run` script, then reset the
-CouchDB instances with:
-
- $ rm -rf dev/lib/ dev/logs/
- $ dev/run --no-join -n 2 --admin a:b
-
-before running the next shell script.
-
-The Plan:
-
-N. End User Action
-- What happens behind the scenes.
-
-
-1. Launch CouchDB with `$ couchdb`, or init.d, or any other way, exactly
-like it is done in 1.x.x.
-- CouchDB launches and listens on 127.0.0.1:5984
-
-From here on, there are two paths, one is via Fauxton (a) the other is
-using a HTTP endpoint (b). Fauxton just uses the HTTP endpoint in (b).
-(b) can be used to set up a cluster programmatically.
-
-When using (b) you POST HTTP requests with a JSON request body (the request content type has to be set to application/json).
-
-If you have already setup a server admin account, you might need to pass the credentials to the HTTP calls using HTTP basic authentication.
-Alternativaly, if you use the cURL command you can can add username and password inline, like so:
-
-```
-curl -X PUT "http://admin:password@127.0.0.1:5984/mydb"
-```
-
-2.a. Go to Fauxton. There is a “Cluster Setup” tab in the sidebar. Go
-to the tab and get presented with a form that asks you to enter an admin
-username, admin password and optionally a bind_address and port to bind
-to publicly. Submit the form with the [Enable Cluster] button.
-
-If this is a single node install that already has an admin set up, there
-is no need to ask for admin credentials here. If the bind_address is !=
-127.0.0.1, we can skip this entirely and Fauxton can show the add_node
-UI right away.
-
-- POST a JSON entity to /_cluster_setup, the entity looks like:
-```
-{
- "action":"enable_cluster",
- "username":"username",
- "password":"password",
- "bind_address":"0.0.0.0",
- "port": 5984
-}
-```
-
-This sets up the admin user on the current node and binds to 0.0.0.0:5984
-or the specified ip:port. Logs admin user into Fauxton automatically.
-
-2.b. POST to /_cluster_setup as shown above.
-
-Repeat on all nodes.
-- keep the same username/password everywhere.
-
-
-3. Pick any one node, for simplicity use the first one, to be the
-“setup coordination node”.
-- this is a “master” node that manages the setup and requires all
- other nodes to be able to see it and vice versa. Setup won’t work
- with unavailable nodes (duh). The notion of “master” will be gone
- once the setup is finished. At that point, the system has no
- master node. Ignore I ever said “master”.
-
-a. Go to Fauxton / Cluster Setup, once we have enabled the cluster, the
-UI shows an “Add Node” interface with the fields admin, and node:
-- POST a JSON entity to /_cluster_setup, the entity looks like:
-```
-{
- "action":"add_node",
- "username":"username",
- "password":"password",
- "host":"192.168.1.100",
- ["port": 5984],
- "name": "node1" // as in “node1@hostname”, same as in vm.args
-}
-```
-
-In the example above, this adds the node with IP address 192.168.1.100 to the cluster.
-
-b. as in a, but without the Fauxton bits, just POST to /_cluster_setup
-- this request will do this:
- - on the “setup coordination node”:
- - check if we have an Erlang Cookie Secret. If not, generate
- a UUID and set the erlang cookie to to that UUID.
- - store the cookie in config.ini, re-set_cookie() on startup.
- - make a POST request to the node specified in the body above
- using the admin credentials in the body above:
- POST to http://username:password@node_b:5984/_cluster_setup with:
-```
- {
- "action": "receive_cookie",
- "cookie": "<secretcookie>",
- }
-```
-
- - when the request to node B returns, we know the Erlang-level
- inter-cluster communication is enabled and we can start adding
- the node on the CouchDB level. To do that, the “setup
- coordination node” does this to it’s own HTTP endpoint:
- PUT /nodes/node_b:5984 or the same thing with internal APIs.
-
-- Repeat for all nodes.
-- Fauxton keeps a list of all set up nodes for users to see.
-
-
-4.a. When all nodes are added, click the [Finish Cluster Setup] button
-in Fauxton.
-- this does POST /_cluster_setup
-```
- {
- "action": "finish_cluster"
- }
-```
-
-b. Same as in a.
-
-- this manages the final setup bits, like creating the _users,
- _replicator and _metadata, _db_updates endpoints and
- whatever else is needed. // TBD: collect what else is needed.
-
-## Single node auto setup
-
-Option `single_node` set to `true` in `[couchdb]` configuration executes single node configuration on startup so the node is ready for use immediately.
-
-### Testing single_node auto setup
-
-Pass `--config-overrides single_node=true` and `-n 1` to `dev/run`
-
-
- $ dev/run --no-join -n 1 --admin a:b --config-overrides single_node=true
-
-
-Then, in a new terminal:
-
- $ src/setup/test/t-single_node.sh
-
-The script should show that single node is enabled.
-
-## The Setup Endpoint
-
-This is not a REST-y endpoint, it is a simple state machine operated
-by HTTP POST with JSON bodies that have an `action` field.
-
-### State 1: No Cluster Enabled
-
-This is right after starting a node for the first time, and any time
-before the cluster is enabled as outlined above.
-
-```
-GET /_cluster_setup
-{"state": "cluster_disabled"}
-
-POST /_cluster_setup {"action":"enable_cluster"...} -> Transition to State 2
-POST /_cluster_setup {"action":"enable_cluster"...} with empty admin user/pass or invalid host/post or host/port not available -> Error
-POST /_cluster_setup {"action":"anything_but_enable_cluster"...} -> Error
-```
-
-### State 2: Cluster enabled, admin user set, waiting for nodes to be added.
-
-```
-GET /_cluster_setup
-{"state":"cluster_enabled","nodes":[]}
-
-POST /_cluster_setup {"action":"enable_cluster"...} -> Error
-POST /_cluster_setup {"action":"add_node"...} -> Stay in State 2, but return "nodes":["node B"}] on GET
-POST /_cluster_setup {"action":"add_node"...} -> if target node not available, Error
-POST /_cluster_setup {"action":"finish_cluster"} with no nodes set up -> Error
-POST /_cluster_setup {"action":"finish_cluster"} -> Transition to State 3
-POST /_cluster_setup {"action":"delete_node"...} -> Stay in State 2, but delete node from /nodes, reflect the change in GET /_cluster_setup
-POST /_cluster_setup {"action":"delete_node","node":"unknown"} -> Error Unknown Node
-```
-
-### State 3: Cluster set up, all nodes operational
-
-```
-GET /_cluster_setup
-{"state":"cluster_finished","nodes":["node a", "node b", ...]}
-
-POST /_cluster_setup {"action":"enable_cluster"...} -> Error
-POST /_cluster_setup {"action":"finish_cluster"...} -> Stay in State 3, do nothing
-POST /_cluster_setup {"action":"add_node"...} -> Error
-POST /_cluster_setup?i_know_what_i_am_doing=true {"action":"add_node"...} -> Add node, stay in State 3.
-POST /_cluster_setup {"action":"delete_node"...} -> Stay in State 3, but delete node from /nodes, reflect the change in GET /_cluster_setup
-POST /_cluster_setup {"action":"delete_node","node":"unknown"} -> Error Unknown Node
-```
-
-// TBD: we need to persist the setup state somewhere.
diff --git a/src/setup/src/setup.app.src b/src/setup/src/setup.app.src
deleted file mode 100644
index ae685c971..000000000
--- a/src/setup/src/setup.app.src
+++ /dev/null
@@ -1,27 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, setup,
- [
- {description, ""},
- {vsn, "1"},
- {registered, []},
- {applications, [
- kernel,
- stdlib,
- couch_epi,
- chttpd,
- couch_log
- ]},
- {mod, { setup_app, []}},
- {env, []}
- ]}.
diff --git a/src/setup/src/setup.erl b/src/setup/src/setup.erl
deleted file mode 100644
index 5129765da..000000000
--- a/src/setup/src/setup.erl
+++ /dev/null
@@ -1,386 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(setup).
-
--export([enable_cluster/1, finish_cluster/1, add_node/1, receive_cookie/1]).
--export([is_cluster_enabled/0, has_cluster_system_dbs/1, cluster_system_dbs/0]).
--export([enable_single_node/1, is_single_node_enabled/1]).
-
--include_lib("../couch/include/couch_db.hrl").
-
-
-require_admins(undefined, {undefined, undefined}) ->
- % no admin in CouchDB, no admin in request
- throw({error, "Cluster setup requires admin account to be configured"});
-require_admins(_,_) ->
- ok.
-
-require_node_count(undefined) ->
- throw({error, "Cluster setup requires node_count to be configured"});
-require_node_count(_) ->
- ok.
-
-error_local_bind_address() ->
- throw({error, "Cluster setup requires a remote bind_address (not 127.0.0.1 nor ::1)"}).
-
-error_invalid_bind_address(InvalidBindAddress) ->
- throw({error, io:format("Setup requires a valid IP bind_address. " ++
- "~p is invalid.", [InvalidBindAddress])}).
-
-require_remote_bind_address(OldBindAddress, NewBindAddress) ->
- case {OldBindAddress, NewBindAddress} of
- {"127.0.0.1", undefined} -> error_local_bind_address();
- {_, <<"127.0.0.1">>} -> error_local_bind_address();
- {"::1", undefined} -> error_local_bind_address();
- {_, <<"::1">>} -> error_local_bind_address();
- {_, undefined} -> ok;
- {_, PresentNewBindAddress} -> require_valid_bind_address(PresentNewBindAddress)
- end.
-
-require_valid_bind_address(BindAddress) ->
- ListBindAddress = binary_to_list(BindAddress),
- case inet_parse:address(ListBindAddress) of
- {ok, _} -> ok;
- {error, _} -> error_invalid_bind_address(ListBindAddress)
- end.
-
-is_cluster_enabled() ->
- % bind_address != 127.0.0.1 AND admins != empty
- BindAddress = config:get("chttpd", "bind_address"),
- Admins = config:get("admins"),
- case {BindAddress, Admins} of
- {"127.0.0.1", _} -> false;
- {_,[]} -> false;
- {_,_} -> true
- end.
-
-is_single_node_enabled(Dbs) ->
- % admins != empty AND dbs exist
- Admins = config:get("admins"),
- HasDbs = has_cluster_system_dbs(Dbs),
- case {Admins, HasDbs} of
- {[], _} -> false;
- {_, false} -> false;
- {_,_} -> true
- end.
-
-cluster_system_dbs() ->
- ["_users", "_replicator"].
-
-
-has_cluster_system_dbs([]) ->
- true;
-has_cluster_system_dbs([Db|Dbs]) ->
- case catch fabric:get_db_info(Db) of
- {ok, _} -> has_cluster_system_dbs(Dbs);
- _ -> false
- end.
-
-enable_cluster(Options) ->
-
- case couch_util:get_value(remote_node, Options, undefined) of
- undefined ->
- enable_cluster_int(Options, is_cluster_enabled());
- _ ->
- enable_cluster_http(Options)
- end.
-
-get_remote_request_options(Options) ->
- case couch_util:get_value(remote_current_user, Options, undefined) of
- undefined ->
- [];
- _ ->
- [
- {basic_auth, {
- binary_to_list(couch_util:get_value(remote_current_user, Options)),
- binary_to_list(couch_util:get_value(remote_current_password, Options))
- }}
- ]
- end.
-
-enable_cluster_http(Options) ->
- % POST to nodeB/_setup
- RequestOptions = get_remote_request_options(Options),
- AdminUsername = couch_util:get_value(username, Options),
- AdminPasswordHash = config:get("admins", binary_to_list(AdminUsername)),
-
- Body = ?JSON_ENCODE({[
- {<<"action">>, <<"enable_cluster">>},
- {<<"username">>, AdminUsername},
- {<<"password_hash">>, ?l2b(AdminPasswordHash)},
- {<<"bind_address">>, couch_util:get_value(bind_address, Options)},
- {<<"port">>, couch_util:get_value(port, Options)},
- {<<"node_count">>, couch_util:get_value(node_count, Options)}
- ]}),
-
- Headers = [
- {"Content-Type","application/json"}
- ],
-
- RemoteNode = couch_util:get_value(remote_node, Options),
- Port = get_port(couch_util:get_value(port, Options, 5984)),
-
- Url = binary_to_list(<<"http://", RemoteNode/binary, ":", Port/binary, "/_cluster_setup">>),
-
- case ibrowse:send_req(Url, Headers, post, Body, RequestOptions) of
- {ok, "201", _, _} ->
- ok;
- Else ->
- {error, Else}
- end.
-
-enable_cluster_int(_Options, true) ->
- {error, cluster_enabled};
-enable_cluster_int(Options, false) ->
-
- % if no admin in config and no admin in req -> error
- CurrentAdmins = config:get("admins"),
- NewCredentials = {
- proplists:get_value(username, Options),
- case proplists:get_value(password_hash, Options) of
- undefined -> proplists:get_value(password, Options);
- Pw -> Pw
- end
- },
- ok = require_admins(CurrentAdmins, NewCredentials),
- % if bind_address == 127.0.0.1 and no bind_address in req -> error
- CurrentBindAddress = config:get("chttpd","bind_address"),
- NewBindAddress = proplists:get_value(bind_address, Options),
- ok = require_remote_bind_address(CurrentBindAddress, NewBindAddress),
- NodeCount = couch_util:get_value(node_count, Options),
- ok = require_node_count(NodeCount),
- Port = proplists:get_value(port, Options),
-
- setup_node(NewCredentials, NewBindAddress, NodeCount, Port),
- couch_log:debug("Enable Cluster: ~p~n", [Options]).
-
-set_admin(Username, Password) ->
- config:set("admins", binary_to_list(Username), binary_to_list(Password), #{sensitive => true}).
-
-setup_node(NewCredentials, NewBindAddress, NodeCount, Port) ->
- case NewCredentials of
- {undefined, undefined} ->
- ok;
- {Username, Password} ->
- set_admin(Username, Password)
- end,
-
- ok = require_valid_bind_address(NewBindAddress),
- case NewBindAddress of
- undefined ->
- config:set("chttpd", "bind_address", "0.0.0.0");
- NewBindAddress ->
- config:set("chttpd", "bind_address", binary_to_list(NewBindAddress))
- end,
-
- % for single node setups, set n=1, for larger setups, don’t
- % exceed n=3 as a default
- config:set_integer("cluster", "n", min(NodeCount, 3)),
-
- case Port of
- undefined ->
- ok;
- Port when is_binary(Port) ->
- config:set("chttpd", "port", binary_to_list(Port));
- Port when is_integer(Port) ->
- config:set_integer("chttpd", "port", Port)
- end.
-
-
-finish_cluster(Options) ->
- % ensure that uuid is set
- couch_server:get_uuid(),
-
- ok = wait_connected(),
- ok = sync_admins(),
- ok = sync_uuid(),
- ok = sync_auth_secret(),
- Dbs = proplists:get_value(ensure_dbs_exist, Options, cluster_system_dbs()),
- finish_cluster_int(Dbs, has_cluster_system_dbs(Dbs)).
-
-
-wait_connected() ->
- Nodes = other_nodes(),
- Result = test_util:wait(fun() ->
- case disconnected(Nodes) of
- [] -> ok;
- _ -> wait
- end
- end),
- case Result of
- timeout ->
- Reason = "Cluster setup timed out waiting for nodes to connect",
- throw({setup_error, Reason});
- ok ->
- ok
- end.
-
-
-other_nodes() ->
- mem3:nodes() -- [node()].
-
-
-disconnected(Nodes) ->
- lists:filter(fun(Node) ->
- case net_adm:ping(Node) of
- pong -> false;
- pang -> true
- end
- end, Nodes).
-
-
-sync_admins() ->
- ok = lists:foreach(fun({User, Pass}) ->
- sync_admin(User, Pass)
- end, config:get("admins")).
-
-
-sync_admin(User, Pass) ->
- sync_config("admins", User, Pass).
-
-
-sync_uuid() ->
- Uuid = config:get("couchdb", "uuid"),
- sync_config("couchdb", "uuid", Uuid).
-
-sync_auth_secret() ->
- Secret = config:get("couch_httpd_auth", "secret"),
- sync_config("couch_httpd_auth", "secret", Secret).
-
-
-sync_config(Section, Key, Value) ->
- {Results, Errors} = rpc:multicall(other_nodes(), config, set,
- [Section, Key, Value]),
- case validate_multicall(Results, Errors) of
- ok ->
- ok;
- error ->
- couch_log:error("~p sync_admin results ~p errors ~p",
- [?MODULE, Results, Errors]),
- Reason = "Cluster setup unable to sync admin passwords",
- throw({setup_error, Reason})
- end.
-
-
-validate_multicall(Results, Errors) ->
- AllOk = lists:all(fun
- (ok) -> true;
- (_) -> false
- end, Results),
- case AllOk andalso Errors == [] of
- true ->
- ok;
- false ->
- error
- end.
-
-
-finish_cluster_int(_Dbs, true) ->
- {error, cluster_finished};
-finish_cluster_int(Dbs, false) ->
- lists:foreach(fun fabric:create_db/1, Dbs).
-
-
-enable_single_node(Options) ->
- % if no admin in config and no admin in req -> error
- CurrentAdmins = config:get("admins"),
- NewCredentials = {
- proplists:get_value(username, Options),
- case proplists:get_value(password_hash, Options) of
- undefined -> proplists:get_value(password, Options);
- Pw -> Pw
- end
- },
- ok = require_admins(CurrentAdmins, NewCredentials),
- % skip bind_address validation, anything is fine
- NewBindAddress = proplists:get_value(bind_address, Options),
- Port = proplists:get_value(port, Options),
-
- setup_node(NewCredentials, NewBindAddress, 1, Port),
- Dbs = proplists:get_value(ensure_dbs_exist, Options, cluster_system_dbs()),
- finish_cluster_int(Dbs, has_cluster_system_dbs(Dbs)),
- couch_log:debug("Enable Single Node: ~p~n", [Options]).
-
-
-add_node(Options) ->
- add_node_int(Options, is_cluster_enabled()).
-
-add_node_int(_Options, false) ->
- {error, cluster_not_enabled};
-add_node_int(Options, true) ->
- couch_log:debug("add node_int: ~p~n", [Options]),
- ErlangCookie = erlang:get_cookie(),
-
- % POST to nodeB/_setup
- RequestOptions = [
- {basic_auth, {
- binary_to_list(proplists:get_value(username, Options)),
- binary_to_list(proplists:get_value(password, Options))
- }}
- ],
-
- Body = ?JSON_ENCODE({[
- {<<"action">>, <<"receive_cookie">>},
- {<<"cookie">>, atom_to_binary(ErlangCookie, utf8)}
- ]}),
-
- Headers = [
- {"Content-Type","application/json"}
- ],
-
- Host = proplists:get_value(host, Options),
- Port = get_port(proplists:get_value(port, Options, 5984)),
- Name = proplists:get_value(name, Options, get_default_name(Port)),
-
- Url = binary_to_list(<<"http://", Host/binary, ":", Port/binary, "/_cluster_setup">>),
-
- case ibrowse:send_req(Url, Headers, post, Body, RequestOptions) of
- {ok, "201", _, _} ->
- % when done, PUT :5986/nodes/nodeB
- create_node_doc(Host, Name);
- Else ->
- Else
- end.
-
-get_port(Port) when is_integer(Port) ->
- list_to_binary(integer_to_list(Port));
-get_port(Port) when is_list(Port) ->
- list_to_binary(Port);
-get_port(Port) when is_binary(Port) ->
- Port.
-
-create_node_doc(Host, Name) ->
- {ok, Db} = couch_db:open_int(<<"_nodes">>, []),
- Doc = {[{<<"_id">>, <<Name/binary, "@", Host/binary>>}]},
- Options = [],
- CouchDoc = couch_doc:from_json_obj(Doc),
-
- couch_db:update_doc(Db, CouchDoc, Options).
-
-get_default_name(Port) ->
- case Port of
- % shortcut for easier development
- <<"15984">> ->
- <<"node1">>;
- <<"25984">> ->
- <<"node2">>;
- <<"35984">> ->
- <<"node3">>;
- % by default, all nodes have the user `couchdb`
- _ ->
- <<"couchdb">>
- end.
-
-receive_cookie(Options) ->
- Cookie = proplists:get_value(cookie, Options),
- erlang:set_cookie(node(), binary_to_atom(Cookie, latin1)).
diff --git a/src/setup/src/setup_app.erl b/src/setup/src/setup_app.erl
deleted file mode 100644
index 330450131..000000000
--- a/src/setup/src/setup_app.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(setup_app).
-
--behaviour(application).
-
-%% Application callbacks
--export([start/2, stop/1]).
-
-%% ===================================================================
-%% Application callbacks
-%% ===================================================================
-
-start(_StartType, _StartArgs) ->
- setup_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/setup/src/setup_epi.erl b/src/setup/src/setup_epi.erl
deleted file mode 100644
index c3f2636f0..000000000
--- a/src/setup/src/setup_epi.erl
+++ /dev/null
@@ -1,49 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
--module(setup_epi).
-
--behaviour(couch_epi_plugin).
-
--export([
- app/0,
- providers/0,
- services/0,
- data_subscriptions/0,
- data_providers/0,
- processes/0,
- notify/3
-]).
-
-app() ->
- setup.
-
-providers() ->
- [
- {chttpd_handlers, setup_httpd_handlers}
- ].
-
-services() ->
- [].
-
-data_subscriptions() ->
- [].
-
-data_providers() ->
- [].
-
-processes() ->
- [].
-
-notify(_Key, _Old, _New) ->
- ok.
diff --git a/src/setup/src/setup_httpd.erl b/src/setup/src/setup_httpd.erl
deleted file mode 100644
index 48b1b2a5a..000000000
--- a/src/setup/src/setup_httpd.erl
+++ /dev/null
@@ -1,180 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(setup_httpd).
--include_lib("couch/include/couch_db.hrl").
-
--export([handle_setup_req/1]).
-
-handle_setup_req(#httpd{method='POST'}=Req) ->
- ok = chttpd:verify_is_server_admin(Req),
- couch_httpd:validate_ctype(Req, "application/json"),
- Setup = get_body(Req),
- couch_log:notice("Setup: ~p~n", [remove_sensitive(Setup)]),
- Action = binary_to_list(couch_util:get_value(<<"action">>, Setup, <<"missing">>)),
- case handle_action(Action, Setup) of
- ok ->
- chttpd:send_json(Req, 201, {[{ok, true}]});
- {error, Message} ->
- couch_httpd:send_error(Req, 400, <<"bad_request">>, Message)
- end;
-handle_setup_req(#httpd{method='GET'}=Req) ->
- ok = chttpd:verify_is_server_admin(Req),
- Dbs = chttpd:qs_json_value(Req, "ensure_dbs_exist", setup:cluster_system_dbs()),
- couch_log:notice("Dbs: ~p~n", [Dbs]),
- SingleNodeConfig = config:get_boolean("couchdb", "single_node", false),
- case SingleNodeConfig of
- true ->
- chttpd:send_json(Req, 200, {[{state, single_node_enabled}]});
- _ ->
- case config:get("cluster", "n", undefined) of
- "1" ->
- case setup:is_single_node_enabled(Dbs) of
- false ->
- chttpd:send_json(Req, 200, {[{state, single_node_disabled}]});
- true ->
- chttpd:send_json(Req, 200, {[{state, single_node_enabled}]})
- end;
- _ ->
- case setup:is_cluster_enabled() of
- false ->
- chttpd:send_json(Req, 200, {[{state, cluster_disabled}]});
- true ->
- case setup:has_cluster_system_dbs(Dbs) of
- false ->
- chttpd:send_json(Req, 200, {[{state, cluster_enabled}]});
- true ->
- chttpd:send_json(Req, 200, {[{state, cluster_finished}]})
- end
- end
- end
- end;
-handle_setup_req(#httpd{}=Req) ->
- chttpd:send_method_not_allowed(Req, "GET,POST").
-
-
-get_options(Options, Setup) ->
- ExtractValues = fun({Tag, Option}, OptionsAcc) ->
- case couch_util:get_value(Option, Setup) of
- undefined -> OptionsAcc;
- Value -> [{Tag, Value} | OptionsAcc]
- end
- end,
- lists:foldl(ExtractValues, [], Options).
-
-handle_action("enable_cluster", Setup) ->
- Options = get_options([
- {username, <<"username">>},
- {password, <<"password">>},
- {password_hash, <<"password_hash">>},
- {bind_address, <<"bind_address">>},
- {port, <<"port">>},
- {remote_node, <<"remote_node">>},
- {remote_current_user, <<"remote_current_user">>},
- {remote_current_password, <<"remote_current_password">>},
- {node_count, <<"node_count">>}
- ], Setup),
- case setup:enable_cluster(Options) of
- {error, cluster_enabled} ->
- {error, <<"Cluster is already enabled">>};
- _ -> ok
- end;
-
-
-handle_action("finish_cluster", Setup) ->
- couch_log:notice("finish_cluster: ~p~n", [remove_sensitive(Setup)]),
-
- Options = get_options([
- {ensure_dbs_exist, <<"ensure_dbs_exist">>}
- ], Setup),
- case setup:finish_cluster(Options) of
- {error, cluster_finished} ->
- {error, <<"Cluster is already finished">>};
- Else ->
- couch_log:notice("finish_cluster: ~p~n", [Else]),
- ok
- end;
-
-handle_action("enable_single_node", Setup) ->
- couch_log:notice("enable_single_node: ~p~n", [remove_sensitive(Setup)]),
-
- Options = get_options([
- {ensure_dbs_exist, <<"ensure_dbs_exist">>},
- {username, <<"username">>},
- {password, <<"password">>},
- {password_hash, <<"password_hash">>},
- {bind_address, <<"bind_address">>},
- {port, <<"port">>}
- ], Setup),
- case setup:enable_single_node(Options) of
- {error, cluster_finished} ->
- {error, <<"Cluster is already finished">>};
- Else ->
- couch_log:notice("Else: ~p~n", [Else]),
- ok
- end;
-
-
-handle_action("add_node", Setup) ->
- couch_log:notice("add_node: ~p~n", [remove_sensitive(Setup)]),
-
- Options = get_options([
- {username, <<"username">>},
- {password, <<"password">>},
- {host, <<"host">>},
- {port, <<"port">>},
- {name, <<"name">>}
- ], Setup),
- case setup:add_node(Options) of
- {error, cluster_not_enabled} ->
- {error, <<"Cluster is not enabled.">>};
- {error, {conn_failed, {error, econnrefused}}} ->
- {error, <<"Add node failed. Invalid Host and/or Port.">>};
- {error, wrong_credentials} ->
- {error, <<"Add node failed. Invalid admin credentials,">>};
- {error, Message} ->
- {error, Message};
- _ -> ok
- end;
-
-handle_action("remove_node", Setup) ->
- couch_log:notice("remove_node: ~p~n", [remove_sensitive(Setup)]);
-
-handle_action("receive_cookie", Setup) ->
- couch_log:notice("receive_cookie: ~p~n", [remove_sensitive(Setup)]),
- Options = get_options([
- {cookie, <<"cookie">>}
- ], Setup),
- case setup:receive_cookie(Options) of
- {error, Error} ->
- {error, Error};
- _ -> ok
- end;
-
-handle_action(_, _) ->
- couch_log:notice("invalid_action: ~n", []),
- {error, <<"Invalid Action'">>}.
-
-
-get_body(Req) ->
- case catch couch_httpd:json_body_obj(Req) of
- {Body} ->
- Body;
- Else ->
- couch_log:notice("Body Fail: ~p~n", [Else]),
- couch_httpd:send_error(Req, 400, <<"bad_request">>, <<"Missing JSON body'">>)
- end.
-
-remove_sensitive(KVList0) ->
- KVList1 = lists:keyreplace(<<"username">>, 1, KVList0, {<<"username">>, <<"****">>}),
- KVList2 = lists:keyreplace(<<"password">>, 1, KVList1, {<<"password">>, <<"****">>}),
- KVList2. \ No newline at end of file
diff --git a/src/setup/src/setup_httpd_handlers.erl b/src/setup/src/setup_httpd_handlers.erl
deleted file mode 100644
index e26fbc3c4..000000000
--- a/src/setup/src/setup_httpd_handlers.erl
+++ /dev/null
@@ -1,32 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(setup_httpd_handlers).
-
--export([url_handler/1, db_handler/1, design_handler/1, handler_info/3]).
-
-url_handler(<<"_cluster_setup">>) -> fun setup_httpd:handle_setup_req/1;
-url_handler(_) -> no_match.
-
-db_handler(_) -> no_match.
-
-design_handler(_) -> no_match.
-
-
-handler_info('GET', [<<"_cluster_setup">>], _) ->
- {'cluster_setup.read', #{}};
-
-handler_info('POST', [<<"_cluster_setup">>], _) ->
- {'cluster_setup.write', #{}};
-
-handler_info(_, _, _) ->
- no_match. \ No newline at end of file
diff --git a/src/setup/src/setup_sup.erl b/src/setup/src/setup_sup.erl
deleted file mode 100644
index 4670a0a59..000000000
--- a/src/setup/src/setup_sup.erl
+++ /dev/null
@@ -1,44 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(setup_sup).
-
--behaviour(supervisor).
-
-%% API
--export([start_link/0]).
-
-%% Supervisor callbacks
--export([init/1]).
-
-%% Helper macro for declaring children of supervisor
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
-%% ===================================================================
-%% API functions
-%% ===================================================================
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-%% ===================================================================
-%% Supervisor callbacks
-%% ===================================================================
-
-init([]) ->
- case config:get_boolean("couchdb", "single_node", false) of
- true ->
- setup:finish_cluster([]);
- false ->
- ok
- end,
- {ok, {{one_for_one, 5, 10}, couch_epi:register_service(setup_epi, [])}}.
diff --git a/src/setup/test/t-frontend-setup.sh b/src/setup/test/t-frontend-setup.sh
deleted file mode 100755
index e025cfba2..000000000
--- a/src/setup/test/t-frontend-setup.sh
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/bin/sh -ex
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-echo "To test, comment out the fake_uuid line in dev/run"
-
-HEADERS="-HContent-Type:application/json"
-# show cluster state:
-curl a:b@127.0.0.1:15986/_nodes/_all_docs
-
-# Enable Cluster on node A
-curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"enable_cluster","username":"foo","password":"baz","bind_address":"0.0.0.0","node_count":2}' $HEADERS
-
-# Enable Cluster on node B
-curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"enable_cluster","remote_node":"127.0.0.1","port":"25984","remote_current_user":"a","remote_current_password":"b","username":"foo","password":"baz","bind_address":"0.0.0.0","node_count":2}' $HEADERS
-
-# Add node B on node A
-curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"add_node","username":"foo","password":"baz","host":"127.0.0.1","port":25984,"name":"node2"}' $HEADERS
-
-# Show cluster state:
-curl a:b@127.0.0.1:15986/_nodes/_all_docs
-
-# Show db doesn’t exist on node A
-curl a:b@127.0.0.1:15984/foo
-
-# Show db doesn’t exist on node B
-curl a:b@127.0.0.1:25984/foo
-
-# Create database (on node A)
-curl -X PUT a:b@127.0.0.1:15984/foo
-
-# Show db does exist on node A
-curl a:b@127.0.0.1:15984/foo
-
-# Show db does exist on node B
-curl a:b@127.0.0.1:25984/foo
-
-# Finish cluster
-curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"finish_cluster"}' $HEADERS
-
-# Show system dbs exist on node A
-curl a:b@127.0.0.1:15984/_users
-curl a:b@127.0.0.1:15984/_replicator
-curl a:b@127.0.0.1:15984/_global_changes
-
-# Show system dbs exist on node B
-curl a:b@127.0.0.1:25984/_users
-curl a:b@127.0.0.1:25984/_replicator
-curl a:b@127.0.0.1:25984/_global_changes
-
-# Number of nodes is set to 2
-curl a:b@127.0.0.1:25984/_node/node2@127.0.0.1/_config/cluster/n
-
-# uuid and auth secret are the same
-curl a:b@127.0.0.1:15984/_node/node1@127.0.0.1/_config/couchdb/uuid
-curl a:b@127.0.0.1:15984/_node/node2@127.0.0.1/_config/couchdb/uuid
-
-curl a:b@127.0.0.1:15984/_node/node1@127.0.0.1/_config/couch_httpd_auth/secret
-curl a:b@127.0.0.1:15984/_node/node2@127.0.0.1/_config/couch_httpd_auth/secret
-
-
-echo "YAY ALL GOOD"
diff --git a/src/setup/test/t-single-node-auto-setup.sh b/src/setup/test/t-single-node-auto-setup.sh
deleted file mode 100755
index 0276990f5..000000000
--- a/src/setup/test/t-single-node-auto-setup.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/sh -ex
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-HEADERS="-HContent-Type:application/json"
-
-# Show cluster state:
-curl a:b@127.0.0.1:15986/_nodes/_all_docs
-curl a:b@127.0.0.1:15984/_all_dbs
-curl a:b@127.0.0.1:15984/_cluster_setup
-
-# Change the check
-curl -g 'a:b@127.0.0.1:15984/_cluster_setup?ensure_dbs_exist=["_replicator","_users"]'
-
-echo "YAY ALL GOOD"
diff --git a/src/setup/test/t-single-node.sh b/src/setup/test/t-single-node.sh
deleted file mode 100755
index d49043773..000000000
--- a/src/setup/test/t-single-node.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/sh -ex
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-HEADERS="-HContent-Type:application/json"
-# show cluster state:
-curl a:b@127.0.0.1:15986/_nodes/_all_docs
-curl a:b@127.0.0.1:15984/_cluster_setup
-
-# Enable Cluster on single node
-curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"enable_single_node","username":"foo","password":"baz","bind_address":"127.0.0.1"}' $HEADERS
-
-# Show cluster state:
-curl a:b@127.0.0.1:15986/_nodes/_all_docs
-curl a:b@127.0.0.1:15984/_all_dbs
-curl a:b@127.0.0.1:15984/_cluster_setup
-
-# Delete a database
-curl -X DELETE a:b@127.0.0.1:15984/_global_changes
-
-# Should show single_node_disabled
-curl a:b@127.0.0.1:15984/_cluster_setup
-
-# Change the check
-curl -g 'a:b@127.0.0.1:15984/_cluster_setup?ensure_dbs_exist=["_replicator","_users"]'
-
-# delete all the things
-curl -X DELETE a:b@127.0.0.1:15984/_replicator
-curl -X DELETE a:b@127.0.0.1:15984/_users
-
-# setup only creating _users
-curl -g a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"enable_single_node","username":"foo","password":"baz","bind_address":"127.0.0.1","ensure_dbs_exist":["_users"]}' $HEADERS
-
-# check it
-curl -g 'a:b@127.0.0.1:15984/_cluster_setup?ensure_dbs_exist=["_users"]'
-
-echo "YAY ALL GOOD"
diff --git a/src/setup/test/t.sh b/src/setup/test/t.sh
deleted file mode 100755
index 6bd74cdd7..000000000
--- a/src/setup/test/t.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/bin/sh -ex
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-HEADERS="-HContent-Type:application/json"
-# show cluster state:
-curl a:b@127.0.0.1:15986/_nodes/_all_docs
-
-# Enable Cluster on node A
-curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"enable_cluster","username":"foo","password":"baz","bind_address":"0.0.0.0","node_count":2}' $HEADERS
-
-# Enable Cluster on node B
-curl a:b@127.0.0.1:25984/_cluster_setup -d '{"action":"enable_cluster","username":"foo","password":"baz","bind_address":"0.0.0.0","node_count":2}' $HEADERS
-
-# Add node B on node A
-curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"add_node","username":"foo","password":"baz","host":"127.0.0.1","port":25984,"name":"node2"}' $HEADERS
-
-# Show cluster state:
-curl a:b@127.0.0.1:15986/_nodes/_all_docs
-
-# Show db doesn’t exist on node A
-curl a:b@127.0.0.1:15984/foo
-
-# Show db doesn’t exist on node B
-curl a:b@127.0.0.1:25984/foo
-
-# Create database (on node A)
-curl -X PUT a:b@127.0.0.1:15984/foo
-
-# Show db does exist on node A
-curl a:b@127.0.0.1:15984/foo
-
-# Show db does exist on node B
-curl a:b@127.0.0.1:25984/foo
-
-# Finish cluster
-curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"finish_cluster"}' $HEADERS
-
-# Show system dbs exist on node A
-curl a:b@127.0.0.1:15984/_users
-curl a:b@127.0.0.1:15984/_replicator
-curl a:b@127.0.0.1:15984/_metadata
-curl a:b@127.0.0.1:15984/_global_changes
-
-# Show system dbs exist on node B
-curl a:b@127.0.0.1:25984/_users
-curl a:b@127.0.0.1:25984/_replicator
-curl a:b@127.0.0.1:25984/_metadata
-curl a:b@127.0.0.1:25984/_global_changes
-
-# Number of nodes is set to 2
-curl a:b@127.0.0.1:25984/_node/node2@127.0.0.1/_config/cluster/n
-
-echo "YAY ALL GOOD"
diff --git a/src/smoosh/README.md b/src/smoosh/README.md
deleted file mode 100644
index 9f9a48074..000000000
--- a/src/smoosh/README.md
+++ /dev/null
@@ -1,140 +0,0 @@
-Smoosh
-======
-
-Smoosh is CouchDB's auto-compaction daemon. It is notified when
-databases and views are updated and may then elect to enqueue them for
-compaction.
-
-API
----
-
-All API functions are in smoosh.erl and only the exported functions in
-this module should be called from outside of the smoosh application.
-
-Additionally, smoosh responds to config changes dynamically and these
-changes are the principal means of interacting with smoosh.
-
-Top-Level Settings
-------------------
-
-The main settings one interacts with are:
-
-<dl>
-<dt>db_channels<dd>A comma-separated list of channel names for
-databases.
-<dt>view_channels<dd>A comma-separated list of channel names for
-views.
-<dt>staleness<dd>The number of minutes that the (expensive) priority
-calculation can be stale for before it is recalculated. Defaults to 5.
-</dl>
-
-Sometimes it's necessary to use the following:
-
-<dl>
-<dt>cleanup_index_files</dt><dd>Whether smoosh cleans up the files
-for indexes that have been deleted. Defaults to false and probably
-shouldn't be changed unless the cluster is running low on disk space,
-and only after considering the ramifications.</dd>
-<dt>wait_secs</dt><dd>The time a channel waits before starting compactions
-to allow time to observe the system and make a smarter decision about what
-to compact first. Hardly ever changed from the default. Default 30 (seconds).
-</dd>
-</dl>
-
-Channel Settings
-----------------
-
-A channel has several important settings that control runtime
-behavior.
-
-<dl>
-<dt>capacity<dd>The maximum number of items the channel can hold (lowest priority item is removed to make room for new items). Defaults to 9999.
-<dt>concurrency<dd>The maximum number of jobs that can run concurrently. Defaults to 1.
-<dt>max_priority<dd>The item must have a priority lower than this to be enqueued. Defaults to infinity.
-<dt>max_size<dd>The item must be no larger than this many bytes in length to be enqueued. Defaults to infinity.
-<dt>min_priority<dd>The item must have a priority at least this high to be enqueued. Defaults to 5.0 for ratio and 16 mb for slack.
-<dt>min_changes<dd>The minimum number of changes since last compaction before the item will be enqueued. Defaults to 0. Currently only works for databases.
-<dt>min_size<dd>The item must be at least this many bytes in length to be enqueued. Defaults to 1mb (1048576 bytes).
-<dt>priority<dd>The method used to calculate priority. Can be ratio (calculated as disk_size/data_size) or slack (calculated as disk_size-data_size). Defaults to ratio.
-</dl>
-
-Structure
----------
-
-Smoosh consists of a central gen_server (smoosh_server) which manages
-a number of subordinate smoosh_channel gen_servers. This is not
-properly managed by OTP yet.
-
-Compaction Scheduling Algorithm
--------------------------------
-
-Smoosh decides whether to compact a database or view by evaluating the
-item against the selection criteria of each _channel_ in the order
-they are configured. By default there are two channels for databases
-("ratio_dbs" and "slack_dbs"), and two channels for views ("ratio_views"
-and "slack_views")
-
-Smoosh will enqueue the new item to the first channel that accepts
-it. If none accept it, the item is not enqueued for compaction.
-
-Notes on the data_size value
-----------------------------
-
-Every database and view shard has a data_size value. In CouchDB this
-accurately reflects the post-compaction file size. In DbCore, it is
-the size of the file that we bill for. It excludes the b+tree and
-database footer overhead. We also bill customers for the uncompressed
-size of their documents, though we store them compressed on disk.
-These two systems were developed independently (ours predates
-CouchDB's) and DbCore only calculates the billing size value.
-
-Because of the way our data_size is currently calculated, it can
-sometimes be necessary to enqueue databases and views with very low
-ratios. Due to this, it is also currently impossible to tell how
-optimally compacted a cluster is.
-
-Example config commands
------------------------
-
-Change the set of database channels;
-
- config:set("smoosh", "db_channels", "small_dbs,medium_dbs,large_dbs").
-
-Change the set of database channels on all live nodes in the cluster;
-
- rpc:multicall(config, set, ["smoosh", "db_channels", "small_dbs,medium_dbs,large_dbs"]).
-
-Change the concurrency of the ratio_dbs database channel to 2
-
- config:set("smoosh.ratio_dbs", "concurrency", "2").
-
-Change it on all live nodes in the cluster;
-
- rpc:multicall(config, set, ["smoosh.ratio_dbs", "concurrency", "2"]).
-
-Example API commands
---------------------
-
-smoosh:status()
-
-This prints the state of each channel; how many jobs they are
-currently running and how many jobs are enqueued (as well as the
-lowest and highest priority of those enqueued items). The idea is to
-provide, at a glance, sufficient insight into smoosh that an operator
-can assess whether smoosh is adequately targeting the reclaimable
-space in the cluster. In general, a healthy status output will have
-items in the ratio_dbs and ratio_views channels. Owing to the default
-settings, the slack_dbs and slack_views will almost certainly have
-items in them. Historically, we've not found that the slack channels,
-on their own, are particularly adept at keeping things well compacted.
-
-smoosh:enqueue_all_dbs(), smoosh:enqueue_all_views()
-
-These functions do just what they say but should not generally need to
-be called, smoosh is supposed to be autonomous. Call them if you get
-alerted to a disk space issue, they might well help. If they do, that
-indicates a bug in smoosh as it should already have enqueued eligible
-shards once they met the configured settings.
-
-
-
diff --git a/src/smoosh/operator_guide.md b/src/smoosh/operator_guide.md
deleted file mode 100644
index a0c981086..000000000
--- a/src/smoosh/operator_guide.md
+++ /dev/null
@@ -1,396 +0,0 @@
-# An operator's guide to smoosh
-
-Smoosh is the auto-compactor for the databases. It automatically selects and
-processes the compacting of database shards on each node.
-
-## Smoosh Channels
-
-Smoosh works using the concept of channels. A channel is essentially a queue of pending
-compactions. There are separate sets of channels for database and view compactions. Each
-channel is assigned a configuration which defines whether a compaction ends up in
-the channel's queue and how compactions are prioritised within that queue.
-
-Smoosh takes each channel and works through the compactions queued in each in priority
-order. Each channel is processed concurrently, so the priority levels only matter within
-a given channel.
-
-Finally, each channel has an assigned number of active compactions, which defines how
-many compactions happen for that channel in parallel. For example, a cluster with
-a lot of database churn but few views might require more active compactions to the
-database channel(s).
-
-It's important to remember that a channel is local to a dbcore node, that is
-each node maintains and processes an independent set of compactions.
-
-### Channel configuration options
-
-#### Channel types
-
-Each channel has a basic type for the algorithm it uses to select pending
-compactions for its queue and how it prioritises them.
-
-The two queue types are:
-
-* **ratio**: this uses the ratio `total_bytes / user_bytes` as its driving
-calculation. The result _X_ must be greater than some configurable value _Y_ for a
-compaction to be added to the queue. Compactions are then prioritised for
-higher values of _X_.
-
-* **slack**: this uses `total_bytes - user_bytes` as its driving calculation.
-The result _X_ must be greater than some configurable value _Y_ for a compaction
-to be added to the queue. Compactions are prioritised for higher values of _X_.
-
-In both cases, _Y_ is set using the `min_priority` configuration variable. The
-calculation of _X_ is described in [Priority calculation](#priority-calculation), below.
-
-Both algorithms operate on two main measures:
-
-* **user_bytes**: this is the amount of data the user has in the file. It
-doesn't include storage overhead: old revisions, on-disk btree structure and
-so on.
-
-* **total_bytes**: the size of the file on disk.
-
-Channel type is set using the `priority` configuration setting.
-
-#### Further configuration options
-
-Beyond its basic type, there are several other configuration options which
-can be applied to a queue.
-
-*All options MUST be set as strings.* See the [smoosh readme][srconfig] for
-all settings and their defaults.
-
-#### Priority calculation
-
-The algorithm type and certain configuration options feed into the priority
-calculation.
-
-The priority is calculated when a compaction is enqueued. As each channel
-has a different configuration, each channel will end up with a different
-priority value. The enqueue code checks each channel in turn to see whether the
-compaction passes its configured priority threshold (`min_priority`). Once
-a channel is found that can accept the compaction, the compaction is added
-to that channel's queue and the enqueue process stops. Therefore the
-ordering of channels has a bearing in what channel a compaction ends up in.
-
-If you want to follow this along, the call order is all in `smoosh_server`,
-`enqueue_request -> find_channel -> get_priority`.
-
-The priority calculation is probably the easiest way to understand the effects
-of configuration variables. It's defined in `smoosh_server#get_priority/3`,
-currently [here][ss].
-
-[ss]: https://github.com/apache/couchdb-smoosh/blob/master/src/smoosh_server.erl#L277
-[srconfig]: https://github.com/apache/couchdb-smoosh#channel-settings
-
-#### Background Detail
-
-`user_bytes` is called `data_size` in `db_info` blocks. It is the total of all bytes
-that are used to store docs and their attachments.
-
-Since `.couch` files are append only, every update adds data to the file. When
-you update a btree, a new leaf node is written and all the nodes back up the
-root. In this update, old data is never overwritten and these parts of the
-file are no longer live; this includes old btree nodes and document bodies.
-Compaction takes this file and writes a new file that only contains live data.
-
-`total_data` is the number of bytes in the file as reported by `ls -al filename`.
-
-#### Flaws
-
-An important flaw in this calculation is that `total_data` takes into account
-the compression of data on disk, whereas `user_bytes` does not. This can give
-unexpected results to calculations, as the values are not directly comparable.
-
-However, it's the best measure we currently have.
-
-[Even more info](https://github.com/apache/couchdb-smoosh#notes-on-the-data_size-value).
-
-
-### Defining a channel
-
-Defining a channel is done via normal dbcore configuration, with some
-convention as to the parameter names.
-
-Channel configuration is defined using `smoosh.channel_name` top level config
-options. Defining a channel is just setting the various options you want
-for the channel, then bringing it into smoosh's sets of active channels by
-adding it to either `db_channels` or `view_channels`.
-
-This means that smoosh channels can be defined either for a single node or
-globally across a cluster, by setting the configuration either globally or
-locally. In the example, we set up a new global channel.
-
-It's important to choose good channel names. There are some conventional ones:
-
-* `ratio_dbs`: a ratio channel for dbs, usually using the default settings.
-* `slack_dbs`: a slack channel for dbs, usually using the default settings.
-* `ratio_views`: a ratio channel for views, usually using the default settings.
-* `slack_views`: a slack channel for views, usually using the default settings.
-
-These four are defined by default if there are no others set ([source][source1]).
-
-[source1]: https://github.com/apache/couchdb-smoosh/blob/master/src/smoosh_server.erl#L75
-
-And some standard names for ones we often have to add:
-
-* `big_dbs`: a ratio channel for only enqueuing large database shards. What
- _large_ means is very workload specific.
-
-Channels have certain defaults for their configuration, defined in the
-[smoosh readme][srconfig]. It's only neccessary to set up how this channel
-differs from those defaults. Below, we just need to set the `min_size` and
-`concurrency` settings, and allow the `priority` to default to `ratio`
-along with the other defaults.
-
-```bash
-# Define the new channel
-(couchdb@db1.foo.bar)3> s:set_config("smoosh.big_dbs", "min_size", "20000000000", global).
-{[ok,ok,ok],[]}
-(couchdb@db1.foo.bar)3> s:set_config("smoosh.big_dbs", "concurrency", "2", global).
-{[ok,ok,ok],[]}
-
-# Add the channel to the db_channels set -- note we need to get the original
-# value first so we can add the new one to the existing list!
-(couchdb@db1.foo.bar)5> s:get_config("smoosh", "db_channels", global).
-{[{'couchdb@db1.foo.bar',"ratio_dbs"},
-{'couchdb@db3.foo.bar',"ratio_dbs"},
-{'couchdb@db2.foo.bar',"ratio_dbs"}],
-[]}
-(couchdb@db1.foo.bar)6> s:set_config("smoosh", "db_channels", "ratio_dbs,big_dbs", global).
-{[ok,ok,ok],[]}
-```
-
-### Viewing active channels
-
-```bash
-(couchdb@db3.foo.bar)3> s:get_config("smoosh", "db_channels", global).
-{[{'couchdb@db3.foo.bar',"ratio_dbs,big_dbs"},
- {'couchdb@db1.foo.bar',"ratio_dbs,big_dbs"},
- {'couchdb@db2.foo.bar',"ratio_dbs,big_dbs"}],
- []}
-(couchdb@db3.foo.bar)4> s:get_config("smoosh", "view_channels", global).
-{[{'couchdb@db3.foo.bar',"ratio_views"},
- {'couchdb@db1.foo.bar',"ratio_views"},
- {'couchdb@db2.foo.bar',"ratio_views"}],
- []}
-```
-
-### Removing a channel
-
-```bash
-# Remove it from the active set
-(couchdb@db1.foo.bar)5> s:get_config("smoosh", "db_channels", global).
-{[{'couchdb@db1.foo.bar',"ratio_dbs,big_dbs"},
-{'couchdb@db3.foo.bar',"ratio_dbs,big_dbs"},
-{'couchdb@db2.foo.bar',"ratio_dbs,big_dbs"}],
-[]}
-(couchdb@db1.foo.bar)6> s:set_config("smoosh", "db_channels", "ratio_dbs", global).
-{[ok,ok,ok],[]}
-
-# Delete the config -- you need to do each value
-(couchdb@db1.foo.bar)3> rpc:multicall(config, delete, ["smoosh.big_dbs", "concurrency"]).
-{[ok,ok,ok],[]}
-(couchdb@db1.foo.bar)3> rpc:multicall(config, delete, ["smoosh.big_dbs", "min_size"]).
-{[ok,ok,ok],[]}
-```
-
-### Getting channel configuration
-
-As far as I know, you have to get each setting separately:
-
-```
-(couchdb@db1.foo.bar)1> s:get_config("smoosh.big_dbs", "concurrency", global).
-{[{'couchdb@db3.foo.bar',"2"},
- {'couchdb@db1.foo.bar',"2"},
- {'couchdb@db2.foo.bar',"2"}],
- []}
-
-```
-
-### Setting channel configuration
-
-The same as defining a channel, you just need to set the new value:
-
-```
-(couchdb@db1.foo.bar)2> s:set_config("smoosh.ratio_dbs", "concurrency", "1", global).
-{[ok,ok,ok],[]}
-```
-
-It sometimes takes a little while to take affect.
-
-
-
-## Standard operating procedures
-
-There are a few standard things that operators often have to do when responding
-to pages.
-
-In addition to the below, in some circumstances it's useful to define new
-channels with certain properties (`big_dbs` is a common one) if smoosh isn't
-selecting and prioritising compactions that well.
-
-### Checking smoosh's status
-
-You can see the queued items for each channel by going into `remsh` on a node
-and using:
-
-```
-> smoosh:status().
-{ok,[{"ratio_dbs",
- [{active,1},
- {starting,0},
- {waiting,[{size,522},
- {min,{5.001569007970237,{1378,394651,323864}}},
- {max,{981756.5441159063,{1380,370286,655752}}}]}]},
- {"slack_views",
- [{active,1},
- {starting,0},
- {waiting,[{size,819},
- {min,{16839814,{1375,978920,326458}}},
- {max,{1541336279,{1380,370205,709896}}}]}]},
- {"slack_dbs",
- [{active,1},
- {starting,0},
- {waiting,[{size,286},
- {min,{19004944,{1380,295245,887295}}},
- {max,{48770817098,{1380,370185,876596}}}]}]},
- {"ratio_views",
- [{active,1},
- {starting,0},
- {waiting,[{size,639},
- {min,{5.0126340031149335,{1380,186581,445489}}},
- {max,{10275.555632057285,{1380,370411,421477}}}]}]}]}
-```
-
-This gives you the node-local status for each queue.
-
-Under each channel there is some information about the channel:
-
-* `active`: number of current compactions in the channel.
-* `starting`: number of compactions starting-up.
-* `waiting`: number of queued compactions.
- * `min` and `max` give an idea of the queued jobs' effectiveness. The values
- for these are obviously dependent on whether the queue is ratio or slack.
-
-For ratio queues, the default minimum for smoosh to enqueue a compaction is 5. In
-the example above, we can guess that 981,756 is quite high. This could be a
-small database, however, so it doesn't necessarily mean useful compactions
-from the point of view of reclaiming disk space.
-
-For this example, we can see that there are quite a lot of queued compactions,
-but we don't know which would be most effective to run to reclaim disk space.
-It's also worth noting that the waiting queue sizes are only meaningful
-related to other factors on the cluster (e.g., db number and size).
-
-
-### Smoosh IOQ priority
-
-This is a global setting which affects all channels. Increasing it allows each
-active compaction to (hopefully) proceed faster as the compaction work is of
-a higher priority relative to other jobs. Decreasing it (hopefully) has the
-converse effect.
-
-By this point you'll [know whether smoosh is backing up](#checking-smooshs-status).
-If it's falling behind (big queues), try increasing compaction priority.
-
-Smoosh's IOQ priority is controlled via the `ioq` -> `compaction` queue.
-
-```
-> s:get_config("ioq", "compaction", global).
-{[{'couchdb@db1.foo.bar',undefined},
- {'couchdb@db2.foo.bar',undefined},
- {'couchdb@db3.foo.bar',undefined}],
- []}
-
-```
-
-Priority by convention runs 0 to 1, though the priority can be any positive
-number. The default for compaction is 0.01; pretty low.
-
-If it looks like smoosh has a bunch of work that it's not getting
-through, priority can be increased. However, be careful that this
-doesn't adversely impact the customer experience. If it will, and
-it's urgent, at least drop them a warning.
-
-```
-> s:set_config("ioq", "compaction", "0.5", global).
-{[ok,ok,ok],[]}
-```
-
-In general, this should be a temporary measure. For some clusters,
-a change from the default may be required to help smoosh keep up
-with particular workloads.
-
-### Granting specific channels more workers
-
-Giving smoosh a higher concurrency for a given channel can allow a backlog
-in that channel to catch up.
-
-Again, some clusters run best with specific channels having more workers.
-
-From [assessing disk space](#assess-the-space-on-the-disk), you should
-know whether the biggest offenders are db or view files. From this,
-you can infer whether it's worth giving a specific smoosh channel a
-higher concurrency.
-
-The current setting can be seen for a channel like so:
-
-```
-> s:get_config("smoosh.ratio_dbs", "concurrency", global).
-{[{'couchdb@db1.foo.bar',undefined},
- {'couchdb@db2.foo.bar',undefined},
- {'couchdb@db3.foo.bar',undefined}],
- []}
-```
-
-`undefined` means the default is used.
-
-If we knew that disk space for DBs was the major user of disk space, we might
-want to increase a `_dbs` channel. Experience shows `ratio_dbs` is often best
-but evaluate this based on the current status.
-
-If we want to increase the ratio_dbs setting:
-
-```
-> s:set_config("smoosh.ratio_dbs", "concurrency", "2", global).
-{[ok,ok,ok],[]}
-```
-
-### Suspending smoosh
-
-If smoosh itself is causing issues, it's possible to suspend its operation.
-This differs from either `application:stop(smoosh).` or setting all channel's
-concurrency to zero because it both pauses on going compactions and maintains
-the channel queues intact.
-
-If, for example, a node's compactions are causing disk space issues, smoosh
-could be suspended while working out which channel is causing the problem. For
-example, a big_dbs channel might be creating huge compaction-in-progress
-files if there's not much in the shard to compact away.
-
-It's therefore useful to use when testing to see if smoosh is causing a
-problem.
-
-```
-# suspend
-smoosh:suspend().
-
-# resume a suspended smoosh
-smoosh:resume().
-```
-
-Suspend is currently pretty literal: `erlang:suspend_process(Pid, [unless_suspending])`
-is called for each compaction process in each channel. `resume_process` is called
-for resume.
-
-### Restarting Smoosh
-
-Restarting Smoosh is a long shot and is a brute force approach in the hope that
-when Smoosh rescans the DBs that it makes the right decisions. If required to take
-this step contact rnewson or davisp so that they can inspect Smoosh and see the bug.
-
-```
-> exit(whereis(smoosh_server), kill), smoosh:enqueue_all_dbs(), smoosh:enqueue_all_views().
-```
diff --git a/src/smoosh/src/smoosh.app.src b/src/smoosh/src/smoosh.app.src
deleted file mode 100644
index a6cdb7f5e..000000000
--- a/src/smoosh/src/smoosh.app.src
+++ /dev/null
@@ -1,29 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, smoosh,
- [
- {description, "Auto-compaction daemon"},
- {vsn, git},
- {registered, [smoosh_server]},
- {applications, [
- kernel,
- stdlib,
- couch_log,
- config,
- couch_event,
- couch,
- mem3
- ]},
- {mod, { smoosh_app, []}},
- {env, []}
- ]}.
diff --git a/src/smoosh/src/smoosh.erl b/src/smoosh/src/smoosh.erl
deleted file mode 100644
index 676e7faad..000000000
--- a/src/smoosh/src/smoosh.erl
+++ /dev/null
@@ -1,69 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(smoosh).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--export([suspend/0, resume/0, enqueue/1, status/0]).
--export([enqueue_all_dbs/0, enqueue_all_dbs/1, enqueue_all_views/0]).
-
-suspend() ->
- smoosh_server:suspend().
-
-resume() ->
- smoosh_server:resume().
-
-enqueue(Object) ->
- smoosh_server:enqueue(Object).
-
-sync_enqueue(Object) ->
- smoosh_server:sync_enqueue(Object).
-
-sync_enqueue(Object, Timeout) ->
- smoosh_server:sync_enqueue(Object, Timeout).
-
-status() ->
- smoosh_server:status().
-
-enqueue_all_dbs() ->
- fold_local_shards(fun(#shard{name=Name}, _Acc) ->
- sync_enqueue(Name) end, ok).
-
-enqueue_all_dbs(Timeout) ->
- fold_local_shards(fun(#shard{name=Name}, _Acc) ->
- sync_enqueue(Name, Timeout) end, ok).
-
-enqueue_all_views() ->
- fold_local_shards(fun(#shard{name=Name}, _Acc) ->
- catch enqueue_views(Name) end, ok).
-
-fold_local_shards(Fun, Acc0) ->
- mem3:fold_shards(fun(Shard, Acc1) ->
- case node() == Shard#shard.node of
- true ->
- Fun(Shard, Acc1);
- false ->
- Acc1
- end
- end, Acc0).
-
-enqueue_views(ShardName) ->
- DbName = mem3:dbname(ShardName),
- {ok, DDocs} = fabric:design_docs(DbName),
- [sync_enqueue({ShardName, id(DDoc)}) || DDoc <- DDocs].
-
-id(#doc{id=Id}) ->
- Id;
-id({Props}) ->
- couch_util:get_value(<<"_id">>, Props).
diff --git a/src/smoosh/src/smoosh_app.erl b/src/smoosh/src/smoosh_app.erl
deleted file mode 100644
index eba3579fe..000000000
--- a/src/smoosh/src/smoosh_app.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(smoosh_app).
-
--behaviour(application).
-
-%% Application callbacks
--export([start/2, stop/1]).
-
-%% ===================================================================
-%% Application callbacks
-%% ===================================================================
-
-start(_StartType, _StartArgs) ->
- smoosh_sup:start_link().
-
-stop(_State) ->
- ok.
diff --git a/src/smoosh/src/smoosh_channel.erl b/src/smoosh/src/smoosh_channel.erl
deleted file mode 100644
index 2bc98be9d..000000000
--- a/src/smoosh/src/smoosh_channel.erl
+++ /dev/null
@@ -1,325 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(smoosh_channel).
--behaviour(gen_server).
--vsn(1).
--include_lib("couch/include/couch_db.hrl").
-
-% public api.
--export([start_link/1, close/1, suspend/1, resume/1, get_status/1]).
--export([enqueue/3, last_updated/2, flush/1]).
-
-% gen_server api.
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- code_change/3, terminate/2]).
-
-% records.
-
--record(state, {
- active=[],
- name,
- waiting=smoosh_priority_queue:new(),
- paused=true,
- starting=[]
-}).
-
-% public functions.
-
-start_link(Name) ->
- gen_server:start_link(?MODULE, Name, []).
-
-suspend(ServerRef) ->
- gen_server:call(ServerRef, suspend).
-
-resume(ServerRef) ->
- gen_server:call(ServerRef, resume).
-
-enqueue(ServerRef, Object, Priority) ->
- gen_server:cast(ServerRef, {enqueue, Object, Priority}).
-
-last_updated(ServerRef, Object) ->
- gen_server:call(ServerRef, {last_updated, Object}).
-
-get_status(ServerRef) ->
- gen_server:call(ServerRef, status).
-
-close(ServerRef) ->
- gen_server:call(ServerRef, close).
-
-flush(ServerRef) ->
- gen_server:call(ServerRef, flush).
-
-% gen_server functions.
-
-init(Name) ->
- schedule_unpause(),
- erlang:send_after(60 * 1000, self(), check_window),
- {ok, #state{name=Name}}.
-
-handle_call({last_updated, Object}, _From, State0) ->
- {ok, State} = code_change(nil, State0, nil),
- LastUpdated = smoosh_priority_queue:last_updated(Object, State#state.waiting),
- {reply, LastUpdated, State};
-
-handle_call(suspend, _From, State0) ->
- {ok, State} = code_change(nil, State0, nil),
- #state{active = Active} = State,
- [catch erlang:suspend_process(Pid, [unless_suspending])
- || {_,Pid} <- Active],
- {reply, ok, State#state{paused=true}};
-
-handle_call(resume, _From, State0) ->
- {ok, State} = code_change(nil, State0, nil),
- #state{active = Active} = State,
- [catch erlang:resume_process(Pid) || {_,Pid} <- Active],
- {reply, ok, State#state{paused=false}};
-
-handle_call(status, _From, State0) ->
- {ok, State} = code_change(nil, State0, nil),
- {reply, {ok, [
- {active, length(State#state.active)},
- {starting, length(State#state.starting)},
- {waiting, smoosh_priority_queue:info(State#state.waiting)}
- ]}, State};
-
-handle_call(close, _From, State0) ->
- {ok, State} = code_change(nil, State0, nil),
- {stop, normal, ok, State};
-
-handle_call(flush, _From, State0) ->
- {ok, State} = code_change(nil, State0, nil),
- {reply, ok, State#state{waiting=smoosh_priority_queue:new()}}.
-
-handle_cast({enqueue, _Object, 0}, State0) ->
- {ok, State} = code_change(nil, State0, nil),
- {noreply, State};
-handle_cast({enqueue, Object, Priority}, State0) ->
- {ok, State} = code_change(nil, State0, nil),
- {noreply, maybe_start_compaction(add_to_queue(Object, Priority, State))}.
-
-% We accept noproc here due to possibly having monitored a restarted compaction
-% pid after it finished.
-handle_info({'DOWN', Ref, _, Job, Reason}, State0) when Reason == normal;
- Reason == noproc ->
- {ok, State} = code_change(nil, State0, nil),
- #state{active=Active, starting=Starting} = State,
- {noreply, maybe_start_compaction(
- State#state{active=lists:keydelete(Job, 2, Active),
- starting=lists:keydelete(Ref, 1, Starting)})};
-
-handle_info({'DOWN', Ref, _, Job, Reason}, State0) ->
- {ok, State} = code_change(nil, State0, nil),
- #state{active=Active0, starting=Starting0} = State,
- case lists:keytake(Job, 2, Active0) of
- {value, {Key, _Pid}, Active1} ->
- State1 = maybe_remonitor_cpid(State#state{active=Active1}, Key,
- Reason),
- {noreply, maybe_start_compaction(State1)};
- false ->
- case lists:keytake(Ref, 1, Starting0) of
- {value, {_, Key}, Starting1} ->
- couch_log:warning("failed to start compaction of ~p: ~p", [
- smoosh_utils:stringify(Key), Reason]),
- {ok, _} = timer:apply_after(5000, smoosh_server, enqueue, [Key]),
- {noreply, maybe_start_compaction(State#state{starting=Starting1})};
- false ->
- {noreply, State}
- end
- end;
-
-handle_info({Ref, {ok, Pid}}, State0) when is_reference(Ref) ->
- {ok, State} = code_change(nil, State0, nil),
- case lists:keytake(Ref, 1, State#state.starting) of
- {value, {_, Key}, Starting1} ->
- couch_log:notice("~s: Started compaction for ~s",
- [State#state.name, smoosh_utils:stringify(Key)]),
- erlang:monitor(process, Pid),
- erlang:demonitor(Ref, [flush]),
- {noreply, State#state{active=[{Key, Pid}|State#state.active],
- starting=Starting1}};
- false ->
- {noreply, State}
- end;
-
-handle_info(check_window, State0) ->
- {ok, State} = code_change(nil, State0, nil),
- #state{paused = Paused, name = Name} = State,
- StrictWindow = smoosh_utils:get(Name, "strict_window", "false"),
- FinalState = case {not Paused, smoosh_utils:in_allowed_window(Name)} of
- {false, false} ->
- % already in desired state
- State;
- {true, true} ->
- % already in desired state
- State;
- {false, true} ->
- % resume is always safe even if we did not previously suspend
- {reply, ok, NewState} = handle_call(resume, nil, State),
- NewState;
- {true, false} ->
- if StrictWindow =:= "true" ->
- {reply, ok, NewState} = handle_call(suspend, nil, State),
- NewState;
- true ->
- State#state{paused=true}
- end
- end,
- erlang:send_after(60 * 1000, self(), check_window),
- {noreply, FinalState};
-
-handle_info(pause, State0) ->
- {ok, State} = code_change(nil, State0, nil),
- {noreply, State#state{paused=true}};
-handle_info(unpause, State0) ->
- {ok, State} = code_change(nil, State0, nil),
- {noreply, maybe_start_compaction(State#state{paused=false})}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, #state{}=State, _Extra) ->
- {ok, State}.
-
-% private functions.
-
-add_to_queue(Key, Priority, State) ->
- #state{active=Active,waiting=Q} = State,
- case lists:keymember(Key, 1, Active) of
- true ->
- State;
- false ->
- Capacity = list_to_integer(smoosh_utils:get(State#state.name, "capacity", "9999")),
- couch_log:notice(
- "~s: adding ~p to internal compactor queue with priority ~p",
- [State#state.name, Key, Priority]),
- State#state{
- waiting=smoosh_priority_queue:in(Key, Priority, Priority, Capacity, Q)
- }
- end.
-
-maybe_start_compaction(#state{paused=true}=State) ->
- State;
-maybe_start_compaction(State) ->
- Concurrency = list_to_integer(smoosh_utils:get(State#state.name,
- "concurrency", "1")),
- if length(State#state.active) + length(State#state.starting) < Concurrency ->
- case smoosh_priority_queue:out(State#state.waiting) of
- false ->
- State;
- {Key, Priority, Q} ->
- try
- State2 = case start_compact(State, Key) of
- false ->
- State;
- State1 ->
- couch_log:notice(
- "~s: Starting compaction for ~s (priority ~p)",
- [State#state.name, smoosh_utils:stringify(Key), Priority]),
- State1
- end,
- maybe_start_compaction(State2#state{waiting=Q})
- catch Class:Exception ->
- couch_log:notice("~s: ~p ~p for ~s",
- [State#state.name, Class, Exception,
- smoosh_utils:stringify(Key)]),
- maybe_start_compaction(State#state{waiting=Q})
- end
- end;
- true ->
- State
- end.
-
-start_compact(State, {schema, DbName, GroupId}) ->
- case smoosh_utils:ignore_db({DbName, GroupId}) of
- false ->
- {ok, Pid} = couch_md_index_manager:get_group_pid(DbName,
- GroupId),
- Ref = erlang:monitor(process, Pid),
- Pid ! {'$gen_call', {self(), Ref}, compact},
- State#state{starting=[{Ref, {schema, DbName,
- GroupId}} | State#state.starting]};
- _ ->
- false
- end;
-
-start_compact(State, DbName) when is_list(DbName) ->
- start_compact(State, ?l2b(DbName));
-start_compact(State, DbName) when is_binary(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- try start_compact(State, Db) after couch_db:close(Db) end;
-start_compact(State, {Shard,GroupId}) ->
- case smoosh_utils:ignore_db({Shard, GroupId}) of
- false ->
- DbName = mem3:dbname(Shard),
- {ok, Pid} = couch_index_server:get_index(
- couch_mrview_index, Shard, GroupId),
- spawn(fun() -> cleanup_index_files(DbName, Shard) end),
- Ref = erlang:monitor(process, Pid),
- Pid ! {'$gen_call', {self(), Ref}, compact},
- State#state{starting=[{Ref, {Shard, GroupId}}|State#state.starting]};
- _ ->
- false
- end;
-start_compact(State, Db) ->
- case smoosh_utils:ignore_db(Db) of
- false ->
- DbPid = couch_db:get_pid(Db),
- Key = couch_db:name(Db),
- case couch_db:get_compactor_pid(Db) of
- nil ->
- Ref = erlang:monitor(process, DbPid),
- DbPid ! {'$gen_call', {self(), Ref}, start_compact},
- State#state{starting=[{Ref, Key}|State#state.starting]};
- % Compaction is already running, so monitor existing compaction pid.
- CPid ->
- couch_log:notice("Db ~s continuing compaction",
- [smoosh_utils:stringify(Key)]),
- erlang:monitor(process, CPid),
- State#state{active=[{Key, CPid}|State#state.active]}
- end;
- _ ->
- false
- end.
-
-maybe_remonitor_cpid(State, DbName, Reason) when is_binary(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- case couch_db:get_compactor_pid_sync(Db) of
- nil ->
- couch_log:warning("exit for compaction of ~p: ~p",
- [smoosh_utils:stringify(DbName), Reason]),
- {ok, _} = timer:apply_after(5000, smoosh_server, enqueue, [DbName]),
- State;
- CPid ->
- couch_log:notice("~s compaction already running. Re-monitor Pid ~p",
- [smoosh_utils:stringify(DbName), CPid]),
- erlang:monitor(process, CPid),
- State#state{active=[{DbName, CPid}|State#state.active]}
- end;
-% not a database compaction, so ignore the pid check
-maybe_remonitor_cpid(State, Key, Reason) ->
- couch_log:warning("exit for compaction of ~p: ~p",
- [smoosh_utils:stringify(Key), Reason]),
- {ok, _} = timer:apply_after(5000, smoosh_server, enqueue, [Key]),
- State.
-
-schedule_unpause() ->
- WaitSecs = list_to_integer(config:get("smoosh", "wait_secs", "30")),
- erlang:send_after(WaitSecs * 1000, self(), unpause).
-
-cleanup_index_files(DbName, _Shard) ->
- case config:get("smoosh", "cleanup_index_files", "false") of
- "true" ->
- fabric:cleanup_index_files(DbName);
- _ ->
- ok
- end.
diff --git a/src/smoosh/src/smoosh_priority_queue.erl b/src/smoosh/src/smoosh_priority_queue.erl
deleted file mode 100644
index 6376103d9..000000000
--- a/src/smoosh/src/smoosh_priority_queue.erl
+++ /dev/null
@@ -1,86 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(smoosh_priority_queue).
-
--export([new/0, last_updated/2, is_key/2, in/4, in/5, out/1, size/1, info/1]).
-
--record(priority_queue, {
- dict=dict:new(),
- tree=gb_trees:empty()
-}).
-
-new() ->
- #priority_queue{}.
-
-last_updated(Key, #priority_queue{dict=Dict}) ->
- case dict:find(Key, Dict) of
- {ok, {_Priority, {LastUpdatedMTime, _MInt}}} ->
- LastUpdatedMTime;
- error ->
- false
- end.
-
-is_key(Key, #priority_queue{dict=Dict}) ->
- dict:is_key(Key, Dict).
-
-in(Key, Value, Priority, Q) ->
- in(Key, Value, Priority, infinity, Q).
-
-in(Key, Value, Priority, Capacity, #priority_queue{dict=Dict, tree=Tree}) ->
- Tree1 = case dict:find(Key, Dict) of
- {ok, TreeKey} ->
- gb_trees:delete_any(TreeKey, Tree);
- error ->
- Tree
- end,
- Now = {erlang:monotonic_time(), erlang:unique_integer([monotonic])},
- TreeKey1 = {Priority, Now},
- Tree2 = gb_trees:enter(TreeKey1, {Key, Value}, Tree1),
- Dict1 = dict:store(Key, TreeKey1, Dict),
- truncate(Capacity, #priority_queue{dict=Dict1, tree=Tree2}).
-
-out(#priority_queue{dict=Dict,tree=Tree}) ->
- case gb_trees:is_empty(Tree) of
- true ->
- false;
- false ->
- {_, {Key, Value}, Tree1} = gb_trees:take_largest(Tree),
- Dict1 = dict:erase(Key, Dict),
- {Key, Value, #priority_queue{dict=Dict1, tree=Tree1}}
- end.
-
-size(#priority_queue{tree=Tree}) ->
- gb_trees:size(Tree).
-
-info(#priority_queue{tree=Tree}=Q) ->
- [{size, ?MODULE:size(Q)}|
- case gb_trees:is_empty(Tree) of
- true ->
- [];
- false ->
- {Min, _, _} = gb_trees:take_smallest(Tree),
- {Max, _, _} = gb_trees:take_largest(Tree),
- [{min, Min}, {max, Max}]
- end].
-
-truncate(infinity, Q) ->
- Q;
-truncate(Capacity, Q) when Capacity > 0 ->
- truncate(Capacity, ?MODULE:size(Q), Q).
-
-truncate(Capacity, Size, Q) when Size =< Capacity ->
- Q;
-truncate(Capacity, Size, #priority_queue{dict=Dict, tree=Tree}) when Size > 0 ->
- {_, {Key, _}, Tree1} = gb_trees:take_smallest(Tree),
- Q1 = #priority_queue{dict=dict:erase(Key, Dict), tree=Tree1},
- truncate(Capacity, ?MODULE:size(Q1), Q1).
diff --git a/src/smoosh/src/smoosh_server.erl b/src/smoosh/src/smoosh_server.erl
deleted file mode 100644
index f9c5210db..000000000
--- a/src/smoosh/src/smoosh_server.erl
+++ /dev/null
@@ -1,606 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(smoosh_server).
--behaviour(gen_server).
--vsn(4).
--behaviour(config_listener).
--include_lib("couch/include/couch_db.hrl").
-
-% public api.
--export([
- start_link/0,
- suspend/0,
- resume/0,
- enqueue/1,
- sync_enqueue/1,
- sync_enqueue/2,
- handle_db_event/3,
- status/0
-]).
-
--define(SECONDS_PER_MINUTE, 60).
-
-% gen_server api.
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- code_change/3, terminate/2]).
-
-% config_listener api
--export([handle_config_change/5, handle_config_terminate/3]).
-
-% exported but for internal use.
--export([enqueue_request/2]).
-
--ifdef(TEST).
--define(RELISTEN_DELAY, 50).
--else.
--define(RELISTEN_DELAY, 5000).
--endif.
-
-% private records.
-
--record(state, {
- db_channels=[],
- view_channels=[],
- schema_channels=[],
- tab,
- event_listener,
- waiting=dict:new()
-}).
-
--record(channel, {
- name,
- pid
-}).
-
-% public functions.
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-suspend() ->
- gen_server:call(?MODULE, suspend).
-
-resume() ->
- gen_server:call(?MODULE, resume).
-
-status() ->
- gen_server:call(?MODULE, status).
-
-enqueue(Object) ->
- gen_server:cast(?MODULE, {enqueue, Object}).
-
-sync_enqueue(Object) ->
- gen_server:call(?MODULE, {enqueue, Object}).
-
-sync_enqueue(Object, Timeout) ->
- gen_server:call(?MODULE, {enqueue, Object}, Timeout).
-
-handle_db_event(DbName, local_updated, St) ->
- smoosh_server:enqueue(DbName),
- {ok, St};
-handle_db_event(DbName, updated, St) ->
- smoosh_server:enqueue(DbName),
- {ok, St};
-handle_db_event(DbName, {index_commit, IdxName}, St) ->
- smoosh_server:enqueue({DbName, IdxName}),
- {ok, St};
-handle_db_event(DbName, {schema_updated, DDocId}, St) ->
- smoosh_server:enqueue({schema, DbName, DDocId}),
- {ok, St};
-handle_db_event(_DbName, _Event, St) ->
- {ok, St}.
-
-% gen_server functions.
-
-init([]) ->
- process_flag(trap_exit, true),
- ok = config:listen_for_changes(?MODULE, nil),
- {ok, Pid} = start_event_listener(),
- DbChannels = smoosh_utils:split(
- config:get("smoosh", "db_channels", "upgrade_dbs,ratio_dbs,slack_dbs")),
- ViewChannels = smoosh_utils:split(
- config:get("smoosh", "view_channels", "upgrade_views,ratio_views,slack_views")),
- SchemaChannels = smoosh_utils:split(config:get("smoosh",
- "schema_channels", "ratio_schemas,slack_schemas")),
- Tab = ets:new(channels, [{keypos, #channel.name}]),
- {ok, create_missing_channels(#state{
- db_channels=DbChannels,
- view_channels=ViewChannels,
- schema_channels=SchemaChannels,
- event_listener=Pid,
- tab=Tab
- })}.
-
-handle_config_change("smoosh", "db_channels", L, _, _) ->
- {ok, gen_server:cast(?MODULE, {new_db_channels, smoosh_utils:split(L)})};
-handle_config_change("smoosh", "view_channels", L, _, _) ->
- {ok, gen_server:cast(?MODULE, {new_view_channels, smoosh_utils:split(L)})};
-handle_config_change("smoosh", "schema_channels", L, _, _) ->
- {ok, gen_server:cast(?MODULE, {new_schema_channels, smoosh_utils:split(L)})};
-handle_config_change(_, _, _, _, _) ->
- {ok, nil}.
-
-handle_config_terminate(_Server, stop, _State) ->
- ok;
-handle_config_terminate(_Server, _Reason, _State) ->
- erlang:send_after(?RELISTEN_DELAY,
- whereis(?MODULE), restart_config_listener).
-
-handle_call(status, _From, State) ->
- Acc = ets:foldl(fun get_channel_status/2, [], State#state.tab),
- {reply, {ok, Acc}, State};
-
-handle_call({enqueue, Object}, _From, State) ->
- {noreply, NewState} = handle_cast({enqueue, Object}, State),
- {reply, ok, NewState};
-
-handle_call(suspend, _From, State) ->
- ets:foldl(fun(#channel{name=Name, pid=P}, _) ->
- couch_log:notice("Suspending ~p", [Name]),
- smoosh_channel:suspend(P) end, 0,
- State#state.tab),
- {reply, ok, State};
-
-handle_call(resume, _From, State) ->
- ets:foldl(fun(#channel{name=Name, pid=P}, _) ->
- couch_log:notice("Resuming ~p", [Name]),
- smoosh_channel:resume(P) end, 0,
- State#state.tab),
- {reply, ok, State}.
-
-handle_cast({new_db_channels, Channels}, State) ->
- [smoosh_channel:close(channel_pid(State#state.tab, C)) ||
- C <- State#state.db_channels -- Channels],
- {noreply, create_missing_channels(State#state{db_channels=Channels})};
-
-handle_cast({new_view_channels, Channels}, State) ->
- [smoosh_channel:close(channel_pid(State#state.tab, C)) ||
- C <- State#state.view_channels -- Channels],
- {noreply, create_missing_channels(State#state{view_channels=Channels})};
-
-handle_cast({new_schema_channels, Channels}, State) ->
- [smoosh_channel:close(channel_pid(State#state.tab, C)) ||
- C <- State#state.schema_channels -- Channels],
- {noreply, create_missing_channels(State#state{view_channels=Channels})};
-
-handle_cast({enqueue, Object}, State) ->
- #state{waiting=Waiting}=State,
- case dict:is_key(Object, Waiting) of
- true ->
- {noreply, State};
- false ->
- {_Pid, Ref} = spawn_monitor(?MODULE, enqueue_request, [State, Object]),
- {noreply, State#state{waiting=dict:store(Object, Ref, Waiting)}}
- end.
-
-handle_info({'EXIT', Pid, Reason}, #state{event_listener=Pid}=State) ->
- couch_log:notice("update notifier died ~p", [Reason]),
- {ok, Pid1} = start_event_listener(),
- {noreply, State#state{event_listener=Pid1}};
-handle_info({'EXIT', Pid, Reason}, State) ->
- couch_log:notice("~p ~p died ~p", [?MODULE, Pid, Reason]),
- case ets:match_object(State#state.tab, #channel{pid=Pid, _='_'}) of
- [#channel{name=Name}] ->
- ets:delete(State#state.tab, Name);
- _ ->
- ok
- end,
- {noreply, create_missing_channels(State)};
-
-handle_info({'DOWN', Ref, _, _, _}, State) ->
- Waiting = dict:filter(fun(_Key, Value) -> Value =/= Ref end,
- State#state.waiting),
- {noreply, State#state{waiting=Waiting}};
-
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State};
-
-handle_info(_Msg, State) ->
- {noreply, State}.
-
-terminate(_Reason, State) ->
- ets:foldl(fun(#channel{pid=P}, _) -> smoosh_channel:close(P) end, 0,
- State#state.tab),
- ok.
-
-code_change(_OldVsn, {state, DbChannels, ViewChannels, Tab,
- EventListener, Waiting}, _Extra) ->
- {ok, #state{db_channels=DbChannels, view_channels=ViewChannels,
- schema_channels=[], tab=Tab, event_listener = EventListener,
- waiting=Waiting}};
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-% private functions.
-
-get_channel_status(#channel{name=Name, pid=P}, Acc0) when is_pid(P) ->
- try gen_server:call(P, status) of
- {ok, Status} ->
- [{Name, Status} | Acc0];
- _ ->
- Acc0
- catch _:_ ->
- Acc0
- end;
-get_channel_status(_, Acc0) ->
- Acc0.
-
-start_event_listener() ->
- couch_event:link_listener(?MODULE, handle_db_event, nil, [all_dbs]).
-
-enqueue_request(State, Object) ->
- try
- case find_channel(State, Object) of
- false ->
- ok;
- {ok, Pid, Priority} ->
- smoosh_channel:enqueue(Pid, Object, Priority)
- end
- catch Class:Exception ->
- Stack = erlang:get_stacktrace(),
- couch_log:notice("~s: ~p ~p for ~s : ~p",
- [?MODULE, Class, Exception,
- smoosh_utils:stringify(Object), Stack])
- end.
-
-find_channel(#state{}=State, {schema, DbName, GroupId}) ->
- find_channel(State#state.tab, State#state.schema_channels, {schema, DbName, GroupId});
-find_channel(#state{}=State, {Shard, GroupId}) ->
- find_channel(State#state.tab, State#state.view_channels, {Shard, GroupId});
-find_channel(#state{}=State, DbName) ->
- find_channel(State#state.tab, State#state.db_channels, DbName).
-
-find_channel(_Tab, [], _Object) ->
- false;
-find_channel(Tab, [Channel|Rest], Object) ->
- Pid = channel_pid(Tab, Channel),
- LastUpdated = smoosh_channel:last_updated(Pid, Object),
- StalenessInSec = config:get_integer("smoosh", "staleness", 5)
- * ?SECONDS_PER_MINUTE,
- Staleness = erlang:convert_time_unit(StalenessInSec, seconds, native),
- Now = erlang:monotonic_time(),
- case LastUpdated =:= false orelse Now - LastUpdated > Staleness of
- true ->
- case smoosh_utils:ignore_db(Object) of
- true ->
- find_channel(Tab, Rest, Object);
- _ ->
- case get_priority(Channel, Object) of
- 0 ->
- find_channel(Tab, Rest, Object);
- Priority ->
- {ok, Pid, Priority}
- end
- end;
- false ->
- find_channel(Tab, Rest, Object)
- end.
-
-channel_pid(Tab, Channel) ->
- [#channel{pid=Pid}] = ets:lookup(Tab, Channel),
- Pid.
-
-create_missing_channels(State) ->
- create_missing_channels(State#state.tab, State#state.db_channels),
- create_missing_channels(State#state.tab, State#state.view_channels),
- create_missing_channels(State#state.tab, State#state.schema_channels),
- State.
-
-create_missing_channels(_Tab, []) ->
- ok;
-create_missing_channels(Tab, [Channel|Rest]) ->
- case ets:lookup(Tab, Channel) of
- [] ->
- {ok, Pid} = smoosh_channel:start_link(Channel),
- true = ets:insert(Tab, [#channel{name=Channel, pid=Pid}]);
- _ ->
- ok
- end,
- create_missing_channels(Tab, Rest).
-
-get_priority(Channel, {Shard, GroupId}) ->
- case couch_index_server:get_index(couch_mrview_index, Shard, GroupId) of
- {ok, Pid} ->
- try
- {ok, ViewInfo} = couch_index:get_info(Pid),
- {SizeInfo} = couch_util:get_value(sizes, ViewInfo),
- DiskSize = couch_util:get_value(file, SizeInfo),
- ActiveSize = couch_util:get_value(active, SizeInfo),
- NeedsUpgrade = needs_upgrade(ViewInfo),
- get_priority(Channel, DiskSize, ActiveSize, NeedsUpgrade)
- catch
- exit:{timeout, _} ->
- 0
- end;
- {not_found, _Reason} ->
- 0;
- {error, Reason} ->
- couch_log:warning("Failed to get group_pid for ~p ~p ~p: ~p",
- [Channel, Shard, GroupId, Reason]),
- 0
- end;
-
-get_priority(Channel, {schema, DbName, DDocId}) ->
- case couch_md_index_manager:get_group_pid(DbName, DDocId) of
- {ok, Pid} ->
- {ok, SchemaInfo} = couch_md_index:get_info(Pid),
- DiskSize = couch_util:get_value(disk_size, SchemaInfo),
- DataSize = couch_util:get_value(data_size, SchemaInfo),
- get_priority(Channel, DiskSize, DataSize, false);
- {error, Reason} ->
- couch_log:warning("Failed to get group_pid for ~p ~p ~p: ~p",
- [Channel, DbName, DDocId, Reason]),
- 0
- end;
-
-get_priority(Channel, DbName) when is_list(DbName) ->
- get_priority(Channel, ?l2b(DbName));
-get_priority(Channel, DbName) when is_binary(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- try get_priority(Channel, Db) after couch_db:close(Db) end;
-get_priority(Channel, Db) ->
- {ok, DocInfo} = couch_db:get_db_info(Db),
- {SizeInfo} = couch_util:get_value(sizes, DocInfo),
- DiskSize = couch_util:get_value(file, SizeInfo),
- ActiveSize = couch_util:get_value(active, SizeInfo),
- NeedsUpgrade = needs_upgrade(DocInfo),
- case db_changed(Channel, DocInfo) of
- true -> get_priority(Channel, DiskSize, ActiveSize, NeedsUpgrade);
- false -> 0
- end.
-
-get_priority(Channel, DiskSize, DataSize, NeedsUpgrade) ->
- Priority = get_priority(Channel),
- MinSize = to_number(Channel, "min_size", "1048576"),
- MaxSize = to_number(Channel, "max_size", "infinity"),
- DefaultMinPriority = case Priority of "slack" -> "16777216"; _ -> "5.0" end,
- MinPriority = to_number(Channel, "min_priority", DefaultMinPriority),
- MaxPriority = to_number(Channel, "max_priority", "infinity"),
- if Priority =:= "upgrade", NeedsUpgrade ->
- 1;
- DiskSize =< MinSize ->
- 0;
- DiskSize > MaxSize ->
- 0;
- DataSize =:= 0 ->
- MinPriority;
- Priority =:= "ratio", DiskSize/DataSize =< MinPriority ->
- 0;
- Priority =:= "ratio", DiskSize/DataSize > MaxPriority ->
- 0;
- Priority =:= "ratio" ->
- DiskSize/DataSize;
- Priority =:= "slack", DiskSize-DataSize =< MinPriority ->
- 0;
- Priority =:= "slack", DiskSize-DataSize > MaxPriority ->
- 0;
- Priority =:= "slack" ->
- DiskSize-DataSize;
- true ->
- 0
- end.
-
-db_changed(Channel, Info) ->
- case couch_util:get_value(compacted_seq, Info) of
- undefined ->
- true;
- CompactedSeq ->
- MinChanges = list_to_integer(
- smoosh_utils:get(Channel, "min_changes", "0")),
- UpdateSeq = couch_util:get_value(update_seq, Info),
- UpdateSeq - CompactedSeq >= MinChanges
- end.
-
-to_number(Channel, Name, Default) ->
- case smoosh_utils:get(Channel, Name, Default) of
- "infinity" -> infinity;
- Value ->
- try
- list_to_float(Value)
- catch error:badarg ->
- list_to_integer(Value)
- end
- end.
-
-get_priority("ratio_dbs") ->
- "ratio";
-get_priority("ratio_views") ->
- "ratio";
-get_priority("ratio_schemas") ->
- "ratio";
-get_priority("slack_dbs") ->
- "slack";
-get_priority("slack_views") ->
- "slack";
-get_priority("slack_schemas") ->
- "slack";
-get_priority("upgrade_dbs") ->
- "upgrade";
-get_priority("upgrade_views") ->
- "upgrade";
-get_priority(Channel) ->
- smoosh_utils:get(Channel, "priority", "ratio").
-
-needs_upgrade(Props) ->
- DiskVersion = couch_util:get_value(disk_format_version, Props),
- case couch_util:get_value(engine, Props) of
- couch_bt_engine ->
- (couch_bt_engine_header:latest(DiskVersion) =:= false);
- _ ->
- false
- end.
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-
-setup_all() ->
- Ctx = test_util:start_couch([couch_log]),
- meck:new([config, couch_index, couch_index_server], [passthrough]),
- Pid = list_to_pid("<0.0.0>"),
- meck:expect(couch_index_server, get_index, 3, {ok, Pid}),
- meck:expect(config, get, fun(_, _, Default) -> Default end),
- Ctx.
-
-teardown_all(Ctx) ->
- meck:unload(),
- test_util:stop_couch(Ctx).
-
-setup() ->
- Shard = <<"shards/00000000-1fffffff/test.1529510412">>,
- GroupId = <<"_design/ddoc">>,
- {ok, Shard, GroupId}.
-
-teardown(_) ->
- ok.
-
-config_change_test_() ->
- {
- "Test config updates",
- {
- foreach,
- fun() -> test_util:start_couch([smoosh]) end,
- fun test_util:stop_couch/1,
- [
- fun t_restart_config_listener/1
- ]
- }
-}.
-
-get_priority_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- fun t_ratio_view/1,
- fun t_slack_view/1,
- fun t_no_data_view/1,
- fun t_below_min_priority_view/1,
- fun t_below_min_size_view/1,
- fun t_timeout_view/1,
- fun t_missing_view/1,
- fun t_invalid_view/1
- ]
- }
- }.
-
-t_restart_config_listener(_) ->
- ?_test(begin
- ConfigMonitor = config_listener_mon(),
- ?assert(is_process_alive(ConfigMonitor)),
- test_util:stop_sync(ConfigMonitor),
- ?assertNot(is_process_alive(ConfigMonitor)),
- NewConfigMonitor = test_util:wait(fun() ->
- case config_listener_mon() of
- undefined -> wait;
- Pid -> Pid
- end
- end),
- ?assert(is_process_alive(NewConfigMonitor))
- end).
-
-t_ratio_view({ok, Shard, GroupId}) ->
- ?_test(begin
- meck:expect(couch_index, get_info, fun(_) ->
- {ok, [{sizes, {[{file, 5242880}, {active, 524288}]}}]}
- end),
- ?assertEqual(10.0, get_priority("ratio_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("slack_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("upgrade_views", {Shard, GroupId}))
- end).
-
-t_slack_view({ok, Shard, GroupId}) ->
- ?_test(begin
- meck:expect(couch_index, get_info, fun(_) ->
- {ok, [{sizes, {[{file, 33554432}, {active, 16777215}]}}]}
- end),
- ?assertEqual(0, get_priority("ratio_views", {Shard, GroupId})),
- ?assertEqual(16777217, get_priority("slack_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("upgrade_views", {Shard, GroupId}))
- end).
-
-t_no_data_view({ok, Shard, GroupId}) ->
- ?_test(begin
- meck:expect(couch_index, get_info, fun(_) ->
- {ok, [{sizes, {[{file, 5242880}, {active, 0}]}}]}
- end),
- ?assertEqual(5.0, get_priority("ratio_views", {Shard, GroupId})),
- ?assertEqual(16777216, get_priority("slack_views", {Shard, GroupId})),
- ?assertEqual(5.0, get_priority("upgrade_views", {Shard, GroupId}))
- end).
-
-t_below_min_priority_view({ok, Shard, GroupId}) ->
- ?_test(begin
- meck:expect(couch_index, get_info, fun(_) ->
- {ok, [{sizes, {[{file, 5242880}, {active, 1048576}]}}]}
- end),
- ?assertEqual(0, get_priority("ratio_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("slack_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("upgrade_views", {Shard, GroupId}))
- end).
-
-t_below_min_size_view({ok, Shard, GroupId}) ->
- ?_test(begin
- meck:expect(couch_index, get_info, fun(_) ->
- {ok, [{sizes, {[{file, 1048576}, {active, 512000}]}}]}
- end),
- ?assertEqual(0, get_priority("ratio_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("slack_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("upgrade_views", {Shard, GroupId}))
- end).
-
-t_timeout_view({ok, Shard, GroupId}) ->
- ?_test(begin
- meck:expect(couch_index, get_info, fun(_) ->
- exit({timeout, get_info})
- end),
- ?assertEqual(0, get_priority("ratio_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("slack_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("upgrade_views", {Shard, GroupId}))
- end).
-
-t_missing_view({ok, Shard, GroupId}) ->
- ?_test(begin
- meck:expect(couch_index_server, get_index, 3, {not_found, missing}),
- ?assertEqual(0, get_priority("ratio_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("slack_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("upgrade_views", {Shard, GroupId}))
- end).
-
-t_invalid_view({ok, Shard, GroupId}) ->
- ?_test(begin
- meck:expect(couch_index_server, get_index, 3, {error, undef}),
- ?assertEqual(0, get_priority("ratio_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("slack_views", {Shard, GroupId})),
- ?assertEqual(0, get_priority("upgrade_views", {Shard, GroupId}))
- end).
-
-config_listener_mon() ->
- IsConfigMonitor = fun(P) ->
- [M | _] = string:tokens(couch_debug:process_name(P), ":"),
- M =:= "config_listener_mon"
- end,
- [{_, MonitoredBy}] = process_info(whereis(?MODULE), [monitored_by]),
- case lists:filter(IsConfigMonitor, MonitoredBy) of
- [Pid] -> Pid;
- [] -> undefined
- end.
-
--endif.
diff --git a/src/smoosh/src/smoosh_sup.erl b/src/smoosh/src/smoosh_sup.erl
deleted file mode 100644
index 158498cd5..000000000
--- a/src/smoosh/src/smoosh_sup.erl
+++ /dev/null
@@ -1,38 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(smoosh_sup).
-
--behaviour(supervisor).
-
-%% API
--export([start_link/0]).
-
-%% Supervisor callbacks
--export([init/1]).
-
-%% Helper macro for declaring children of supervisor
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
-%% ===================================================================
-%% API functions
-%% ===================================================================
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-%% ===================================================================
-%% Supervisor callbacks
-%% ===================================================================
-
-init([]) ->
- {ok, { {one_for_one, 5, 10}, [?CHILD(smoosh_server, worker)]} }.
diff --git a/src/smoosh/src/smoosh_utils.erl b/src/smoosh/src/smoosh_utils.erl
deleted file mode 100644
index fcd0fcd6f..000000000
--- a/src/smoosh/src/smoosh_utils.erl
+++ /dev/null
@@ -1,92 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(smoosh_utils).
--include_lib("couch/include/couch_db.hrl").
-
--export([get/2, get/3, group_pid/1, split/1, stringify/1, ignore_db/1]).
--export([
- in_allowed_window/1
-]).
-
-group_pid({Shard, GroupId}) ->
- case couch_view_group:open_db_group(Shard, GroupId) of
- {ok, Group} ->
- try
- gen_server:call(couch_view, {get_group_server, Shard, Group})
- catch _:Error ->
- {error, Error}
- end;
- Else ->
- Else
- end.
-
-get(Channel, Key) ->
- ?MODULE:get(Channel, Key, undefined).
-
-get(Channel, Key, Default) ->
- config:get("smoosh." ++ Channel, Key, Default).
-
-split(CSV) ->
- re:split(CSV, "\\s*,\\s*", [{return,list}, trim]).
-
-stringify({DbName, GroupId}) ->
- io_lib:format("~s ~s", [DbName, GroupId]);
-stringify({schema, DbName, GroupId}) ->
- io_lib:format("schema: ~s ~s", [DbName, GroupId]);
-stringify(DbName) ->
- io_lib:format("~s", [DbName]).
-
-ignore_db({DbName, _GroupName}) ->
- ignore_db(DbName);
-ignore_db(DbName) when is_binary(DbName)->
- ignore_db(?b2l(DbName));
-ignore_db(DbName) when is_list(DbName) ->
- case config:get("smoosh.ignore", DbName, false) of
- "true" ->
- true;
- _ ->
- false
- end;
-ignore_db(Db) ->
- ignore_db(couch_db:name(Db)).
-
-in_allowed_window(Channel) ->
- From = parse_time(get(Channel, "from"), {00, 00}),
- To = parse_time(get(Channel, "to"), {24, 00}),
- in_allowed_window(From, To).
-
-in_allowed_window(From, To) ->
- {_, {HH, MM, _}} = calendar:universal_time(),
- case From < To of
- true ->
- ({HH, MM} >= From) andalso ({HH, MM} < To);
- false ->
- ({HH, MM} >= From) orelse ({HH, MM} < To)
- end.
-
-
-parse_time(undefined, Default) ->
- Default;
-parse_time(String, Default) ->
- case string:tokens(String, ":") of
- [HH, MM] ->
- try
- {list_to_integer(HH), list_to_integer(MM)}
- catch error:badarg ->
- couch_log:error("Malformed compaction schedule configuration: ~s", [String]),
- Default
- end;
- _Else ->
- couch_log:error("Malformed compaction schedule configuration: ~s", [String]),
- Default
- end.
diff --git a/src/smoosh/test/exunit/scheduling_window_test.exs b/src/smoosh/test/exunit/scheduling_window_test.exs
deleted file mode 100644
index 9da4a3150..000000000
--- a/src/smoosh/test/exunit/scheduling_window_test.exs
+++ /dev/null
@@ -1,79 +0,0 @@
-defmodule SmooshSchedulingWindowTest do
- use Couch.Test.ExUnit.Case
-
- setup_all(context) do
- test_ctx = :test_util.start_couch([])
-
- on_exit(fn ->
- :config.delete('smoosh.test_channel', 'from')
- :config.delete('smoosh.test_channel', 'to')
- :test_util.stop_couch(test_ctx)
- end)
-
- context
- end
-
- test "in_allowed_window returns true by default", _context do
- assert :smoosh_utils.in_allowed_window('nonexistent_channel') == true
- end
-
- test "in_allowed_window ignores bad input", _context do
- :config.set('smoosh.test_channel', 'from', 'midnight', false)
- :config.set('smoosh.test_channel', 'to', 'infinity', false)
- assert :smoosh_utils.in_allowed_window('test_channel') == true
- end
-
- test "in_allowed_window returns false when now < from < to", _context do
- now = DateTime.utc_now()
- from = DateTime.add(now, 18_000)
- to = DateTime.add(now, 36_000)
- :config.set('smoosh.test_channel', 'from', '#{from.hour}:#{from.minute}', false)
- :config.set('smoosh.test_channel', 'to', '#{to.hour}:#{to.minute}', false)
- assert :smoosh_utils.in_allowed_window('test_channel') == false
- end
-
- test "in_allowed_window returns true when from < now < to", _context do
- now = DateTime.utc_now()
- from = DateTime.add(now, -18_000)
- to = DateTime.add(now, 18_000)
- :config.set('smoosh.test_channel', 'from', '#{from.hour}:#{from.minute}', false)
- :config.set('smoosh.test_channel', 'to', '#{to.hour}:#{to.minute}', false)
- assert :smoosh_utils.in_allowed_window('test_channel') == true
- end
-
- test "in_allowed_window returns false when from < to < now", _context do
- now = DateTime.utc_now()
- from = DateTime.add(now, -36_000)
- to = DateTime.add(now, -18_000)
- :config.set('smoosh.test_channel', 'from', '#{from.hour}:#{from.minute}', false)
- :config.set('smoosh.test_channel', 'to', '#{to.hour}:#{to.minute}', false)
- assert :smoosh_utils.in_allowed_window('test_channel') == false
- end
-
- test "in_allowed_window returns true when to < from < now", _context do
- now = DateTime.utc_now()
- from = DateTime.add(now, -18_000)
- to = DateTime.add(now, -36_000)
- :config.set('smoosh.test_channel', 'from', '#{from.hour}:#{from.minute}', false)
- :config.set('smoosh.test_channel', 'to', '#{to.hour}:#{to.minute}', false)
- assert :smoosh_utils.in_allowed_window('test_channel') == true
- end
-
- test "in_allowed_window returns false when to < now < from", _context do
- now = DateTime.utc_now()
- from = DateTime.add(now, 18_000)
- to = DateTime.add(now, -18_000)
- :config.set('smoosh.test_channel', 'from', '#{from.hour}:#{from.minute}', false)
- :config.set('smoosh.test_channel', 'to', '#{to.hour}:#{to.minute}', false)
- assert :smoosh_utils.in_allowed_window('test_channel') == false
- end
-
- test "in_allowed_window returns true when now < to < from", _context do
- now = DateTime.utc_now()
- from = DateTime.add(now, 36_000)
- to = DateTime.add(now, 18_000)
- :config.set('smoosh.test_channel', 'from', '#{from.hour}:#{from.minute}', false)
- :config.set('smoosh.test_channel', 'to', '#{to.hour}:#{to.minute}', false)
- assert :smoosh_utils.in_allowed_window('test_channel') == true
- end
-end
diff --git a/src/smoosh/test/exunit/test_helper.exs b/src/smoosh/test/exunit/test_helper.exs
deleted file mode 100644
index 314050085..000000000
--- a/src/smoosh/test/exunit/test_helper.exs
+++ /dev/null
@@ -1,2 +0,0 @@
-ExUnit.configure(formatters: [JUnitFormatter, ExUnit.CLIFormatter])
-ExUnit.start()
diff --git a/support/build_js.escript b/support/build_js.escript
index 2d9de6112..5f1e92015 100644
--- a/support/build_js.escript
+++ b/support/build_js.escript
@@ -65,13 +65,7 @@ main([]) ->
[
"share/server/rewrite_fun.js"
];
- "60" ->
- [
- "share/server/60/esprima.js",
- "share/server/60/escodegen.js",
- "share/server/60/rewrite_fun.js"
- ];
- "68" ->
+ _ ->
[
"share/server/60/esprima.js",
"share/server/60/escodegen.js",
diff --git a/test/elixir/lib/couch.ex b/test/elixir/lib/couch.ex
index 5928d5dd6..08d03fa21 100644
--- a/test/elixir/lib/couch.ex
+++ b/test/elixir/lib/couch.ex
@@ -79,6 +79,11 @@ defmodule Couch do
# request_timeout largely just so we know which timeout fired.
@request_timeout 60_000
@inactivity_timeout 55_000
+ @max_sessions 1_000
+
+ def base_url() do
+ System.get_env("EX_COUCH_URL") || "http://127.0.0.1:15984"
+ end
def process_url("http://" <> _ = url) do
url
@@ -110,13 +115,14 @@ defmodule Couch do
end
def process_options(options) do
- base_url = System.get_env("EX_COUCH_URL") || "http://127.0.0.1:15984"
+ base_url = base_url()
options = Keyword.put_new(options, :base_url, base_url)
options
|> set_auth_options()
|> set_inactivity_timeout()
|> set_request_timeout()
+ |> set_max_sessions()
end
def process_request_body(body) do
@@ -176,6 +182,17 @@ defmodule Couch do
Keyword.put_new(options, :timeout, timeout)
end
+ def set_max_sessions(options) do
+ Keyword.update(
+ options,
+ :ibrowse,
+ [{:max_sessions, @max_sessions}],
+ fn ibrowse ->
+ Keyword.put_new(ibrowse, :max_sessions, @max_sessions)
+ end
+ )
+ end
+
def login(userinfo) do
[user, pass] = String.split(userinfo, ":", parts: 2)
login(user, pass)
diff --git a/test/elixir/lib/couch/db_test.ex b/test/elixir/lib/couch/db_test.ex
index 652fa6bb6..2aa0880b2 100644
--- a/test/elixir/lib/couch/db_test.ex
+++ b/test/elixir/lib/couch/db_test.ex
@@ -104,16 +104,12 @@ defmodule Couch.DBTest do
end
def set_config_raw(section, key, value) do
- resp = Couch.get("/_membership")
-
- Enum.map(resp.body["all_nodes"], fn node ->
- url = "/_node/#{node}/_config/#{section}/#{key}"
+ url = "/_node/_local/_config/#{section}/#{key}"
headers = ["X-Couch-Persist": "false"]
body = :jiffy.encode(value)
resp = Couch.put(url, headers: headers, body: body)
assert resp.status_code == 200
- {node, resp.body}
- end)
+ [{"_local", resp.body}]
end
def prepare_user_doc(user) do
@@ -449,9 +445,7 @@ defmodule Couch.DBTest do
end
def run_on_modified_server(settings, fun) do
- resp = Couch.get("/_membership")
- assert resp.status_code == 200
- nodes = resp.body["all_nodes"]
+ nodes = ["_local"]
prev_settings =
Enum.map(settings, fn setting ->
diff --git a/test/elixir/lib/setup/common.ex b/test/elixir/lib/setup/common.ex
index e81f109c9..65cd04fe3 100644
--- a/test/elixir/lib/setup/common.ex
+++ b/test/elixir/lib/setup/common.ex
@@ -19,9 +19,9 @@ defmodule Couch.Test.Setup.Common do
|> Step.Create.DB.new(:db)
end
- def with_db(setup) do
+ def with_db(setup, apps \\ []) do
setup
- |> Step.Start.new(:start, extra_apps: [:fabric])
+ |> Step.Start.new(:start, extra_apps: [:fabric] ++ apps)
|> Step.Create.DB.new(:db)
end
end \ No newline at end of file
diff --git a/test/elixir/lib/step/create_db.ex b/test/elixir/lib/step/create_db.ex
index d38e6722f..412c85898 100644
--- a/test/elixir/lib/step/create_db.ex
+++ b/test/elixir/lib/step/create_db.ex
@@ -36,7 +36,7 @@ defmodule Couch.Test.Setup.Step.Create.DB do
def setup(setup, %__MODULE__{name: name} = step) do
assert Setup.completed?(setup, Step.Start), "Require `Start` step"
assert :fabric in Step.Start.apps(), "Fabric is not started"
- res = :fabric.create_db(name, [@admin])
+ res = :fabric2_db.create_db(name, [@admin])
assert res in [:ok, :accepted], "Cannot create `#{name}` database"
step
end
diff --git a/test/elixir/lib/suite.ex b/test/elixir/lib/suite.ex
new file mode 100644
index 000000000..60b7766d0
--- /dev/null
+++ b/test/elixir/lib/suite.ex
@@ -0,0 +1,213 @@
+defmodule Couch.Test.Suite do
+ @moduledoc """
+ Common code to configure ExUnit runner.
+ It replaces the usual invocation of `ExUnit.start()` in
+ `test_helper.exs` related to integration tests with:
+ ```
+ Couch.Test.Suite.start()
+ ```
+ """
+ @doc """
+ This helper function can be used to create `suite.elixir`
+ as
+ ```
+ tests =
+ Couch.Test.Suite.list()
+ |> Enum.sort()
+ |> Couch.Test.Suite.group_by()
+
+ IO.puts(Couch.Test.Suite.pretty_print(tests))
+
+ ```
+ """
+ def list() do
+ test_paths = Keyword.get(Mix.Project.config(), :test_paths, [])
+ Enum.reduce(test_paths, [], fn directory, acc ->
+ list(directory) ++ acc
+ end)
+ end
+
+ @doc """
+ This helper function can be used to create `suite.elixir`
+ as
+ ```
+ tests =
+ Couch.Test.Suite.list(["test/elixir/test"])
+ |> Enum.sort()
+ |> Couch.Test.Suite.group_by()
+
+ IO.puts(Couch.Test.Suite.pretty_print(tests))
+ ```
+ """
+ def list(directory) do
+ ensure_exunit_started()
+ Enum.reduce(test_files(directory), [], fn file_path, acc ->
+ tests_in_file(file_path) ++ acc
+ end)
+ end
+
+ @doc """
+ This helper function is used in a snippet to create `suite.elixir`
+ see list/1
+ """
+ def group_by(tests) do
+ tests |> Enum.group_by(&module_name/1, &test_name/1)
+ end
+
+ @doc """
+ This helper function is used in a snippet to create `suite.elixir`
+ see list/1
+ """
+ def pretty_print(tests) do
+ tests = Enum.join(Enum.sort(Enum.map(tests, fn {module_name, test_names} ->
+ test_names = test_names
+ |> Enum.map(fn x -> ~s("#{x}") end) |> Enum.join(",\n ")
+ ~s( "#{module_name}": [\n #{test_names}\n ])
+ end)), ",\n")
+ "%{\n#{tests}\n}"
+ end
+
+ def start(exclude \\ []) do
+ # If build number detected assume we running on Jenkins
+ # and skip certain tests that fail on jenkins.
+ default_exclude =
+ case System.get_env("BUILD_NUMBER") !== nil do
+ true -> [:pending, :skip_on_jenkins]
+ false -> [:pending]
+ end
+
+ current_exclude = Keyword.get(ExUnit.configuration(), :exclude, [])
+ {ignores, current_exclude} = from_file(current_exclude)
+
+ current_include = Keyword.get(ExUnit.configuration(), :include, [])
+ {suite, current_include} = from_file(current_include)
+
+ only_test_ids =
+ case suite -- ignores do
+ [] ->
+ nil
+
+ test_ids ->
+ to_tests(test_ids)
+ end
+
+ ExUnit.configure(
+ exclude: Enum.uniq(default_exclude ++ current_exclude ++ exclude),
+ include: current_include,
+ formatters: [JUnitFormatter, ExUnit.CLIFormatter],
+ only_test_ids: only_test_ids
+ )
+
+ ExUnit.start()
+ end
+
+ # Helpers for start/0
+
+ defp split_files(opts) do
+ {files, opts} =
+ Enum.split_with(opts, fn x ->
+ String.ends_with?(Atom.to_string(x), ".elixir")
+ end)
+
+ {Enum.map(files, &Atom.to_string/1), opts}
+ end
+
+ defp read_from_file(file_name) do
+ {map, _} = Code.eval_file(file_name)
+
+ map
+ |> Enum.reduce([], fn {module, tests}, acc ->
+ Enum.map(tests, &{module, &1}) ++ acc
+ end)
+ end
+
+ defp from_file(opts) do
+ case split_files(opts) do
+ {[], opts} ->
+ {[], opts}
+
+ {[file_name], opts} ->
+ {read_from_file(file_name), opts}
+
+ {_, _} ->
+ throw("Only one file is supported in --exclude or --include")
+ end
+ end
+
+ defp to_tests(ids) do
+ MapSet.new(
+ Enum.map(ids, fn {module_name, test_name} ->
+ {String.to_atom("Elixir.#{module_name}"), String.to_atom("test #{test_name}")}
+ end)
+ )
+ end
+
+ # Helpers for list/0
+
+ defp ensure_exunit_started() do
+ if not Process.get(EXUNIT_STARTED, false) do
+ started? =
+ Application.started_applications()
+ |> Enum.map(&Kernel.elem(&1, 0))
+ |> Enum.member?(:ex_unit)
+
+ if not started? do
+ ExUnit.start(autorun: false)
+ Process.get(EXUNIT_STARTED, true)
+ end
+ end
+ end
+
+ defp test_files(directory) do
+ files = Path.wildcard(Path.join(directory, "*_test.exs"))
+ Enum.filter(files, &File.regular?/1)
+ end
+
+ def tests_in_file(file_path) do
+ ensure_exunit_started()
+ Code.compiler_options(ignore_module_conflict: true)
+
+ tests =
+ Enum.reduce(require_file(file_path), [], fn {module_name, _}, acc ->
+ if :erlang.function_exported(module_name, :__ex_unit__, 0) do
+ module_name.__ex_unit__().tests ++ acc
+ else
+ acc
+ end
+ end)
+
+ Code.unrequire_files([file_path])
+ tests
+ end
+
+ def require_file(file_path) do
+ drop_stderr(fn ->
+ Code.require_file(file_path)
+ end)
+ end
+
+ defp drop_stderr(fun) do
+ {:ok, pid} = StringIO.open("")
+ original_pid = Process.whereis(:standard_error)
+
+ try do
+ Process.unregister(:standard_error)
+ Process.register(pid, :standard_error)
+ fun.()
+ after
+ Process.unregister(:standard_error)
+ Process.register(original_pid, :standard_error)
+ StringIO.close(pid)
+ end
+ end
+
+ defp test_name(test) do
+ String.replace_leading(Atom.to_string(test.name), "test ", "")
+ end
+
+ defp module_name(test) do
+ test.module
+ |> Atom.to_string()
+ |> String.replace_leading("Elixir.", "")
+ end
+end
diff --git a/test/elixir/test/all_docs_test.exs b/test/elixir/test/all_docs_test.exs
index 0dff2a445..935859b29 100644
--- a/test/elixir/test/all_docs_test.exs
+++ b/test/elixir/test/all_docs_test.exs
@@ -420,11 +420,29 @@ defmodule AllDocsTest do
resp = Couch.get("/#{db_name}/_all_docs", query: %{:end_key => 0}).body
rows = resp["rows"]
- assert length(rows) === 0
+ assert Enum.empty?(rows)
end
defp get_ids(resp) do
%{"rows" => rows} = resp
Enum.map(rows, fn row -> row["id"] end)
end
+
+ @tag :with_db
+ test "POST boolean", context do
+ db_name = context[:db_name]
+
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: create_docs(0..3)})
+ assert resp.status_code in [201, 202]
+
+ resp = Couch.post(
+ "/#{db_name}/_all_docs",
+ body: %{
+ :stable => true,
+ :update => true
+ }
+ )
+
+ assert resp.status_code == 200
+ end
end
diff --git a/test/elixir/test/attachment_names_test.exs b/test/elixir/test/attachment_names_test.exs
index 66596c865..6a824a0a5 100644
--- a/test/elixir/test/attachment_names_test.exs
+++ b/test/elixir/test/attachment_names_test.exs
@@ -94,5 +94,20 @@ defmodule AttachmentNamesTest do
assert resp.body["reason"] ==
"Attachment name '_foo.txt' starts with prohibited character '_'"
+
+ resp =
+ Couch.post(
+ "/#{db_name}",
+ body: @leading_underscores_att
+ )
+
+ assert resp.status_code == 400
+
+ assert resp.body["reason"] ==
+ "Attachment name '_foo.txt' starts with prohibited character '_'"
+
+ resp = Couch.get("/#{db_name}/bin_doc2/_foo.txt")
+
+ assert resp.status_code == 404
end
end
diff --git a/test/elixir/test/basics_test.exs b/test/elixir/test/basics_test.exs
index b9338c63f..21c71f900 100644
--- a/test/elixir/test/basics_test.exs
+++ b/test/elixir/test/basics_test.exs
@@ -12,7 +12,7 @@ defmodule BasicsTest do
test "Session contains adm context" do
user_ctx = Couch.get("/_session").body["userCtx"]
assert user_ctx["name"] == "adm", "Should have adm user context"
- assert user_ctx["roles"] == ["_admin"], "Should have _admin role"
+ assert "_admin" in user_ctx["roles"], "Should have _admin role"
end
test "Welcome endpoint" do
@@ -518,4 +518,14 @@ defmodule BasicsTest do
assert resp.status_code == 200
assert resp.body == 999
end
+
+ @tag :with_db
+ test "Default headers are returned for doc with open_revs=all", context do
+ db_name = context[:db_name]
+ post_response = Couch.post("/#{db_name}", body: %{:foo => :bar})
+ id = post_response.body["id"]
+ head_response = Couch.head("/#{db_name}/#{id}?open_revs=all")
+ assert head_response.headers["X-Couch-Request-ID"]
+ assert head_response.headers["X-CouchDB-Body-Time"]
+ end
end
diff --git a/test/elixir/test/bulk_docs_test.exs b/test/elixir/test/bulk_docs_test.exs
index cbbc53340..2fd1f4830 100644
--- a/test/elixir/test/bulk_docs_test.exs
+++ b/test/elixir/test/bulk_docs_test.exs
@@ -130,6 +130,17 @@ defmodule BulkDocsTest do
assert Enum.at(rows, 2)["error"] == "conflict"
end
+ @tag :with_db
+ test "bulk docs raises error for transaction larger than 10MB", ctx do
+ docs = [%{_id: "0", a: random_string(16_000_000)}]
+ old_size = Couch.get("/_node/node1@127.0.0.1/_config/couchdb/max_document_size").body
+ set_config_raw("couchdb", "max_document_size", "67108864") # 64M
+ resp = Couch.post("/#{ctx[:db_name]}/_bulk_docs", body: %{docs: docs})
+ set_config_raw("couchdb", "max_document_size", old_size) # set back
+ assert resp.status_code == 500
+ assert resp.body["reason"] == "code: 2101, desc: Transaction exceeds byte limit"
+ end
+
defp bulk_post(docs, db) do
retry_until(fn ->
resp = Couch.post("/#{db}/_bulk_docs", body: %{docs: docs})
@@ -152,4 +163,11 @@ defmodule BulkDocsTest do
assert resp.body["error"] == "bad_request"
assert resp.body["reason"] == reason
end
+
+ defp random_string(length) do
+ raw = :crypto.strong_rand_bytes(length)
+ raw
+ |> Base.url_encode64
+ |> binary_part(0, length)
+ end
end
diff --git a/test/elixir/test/changes_async_test.exs b/test/elixir/test/changes_async_test.exs
index 001c5d58c..26a0c5037 100644
--- a/test/elixir/test/changes_async_test.exs
+++ b/test/elixir/test/changes_async_test.exs
@@ -34,12 +34,11 @@ defmodule ChangesAsyncTest do
)
changes = process_response(req_id.id, &parse_chunk/1)
- {changes_length, last_seq_prefix} = parse_changes_response(changes)
- assert changes_length == 1, "db should not be empty"
- assert last_seq_prefix == "1-", "seq must start with 1-"
-
+ assert length(changes["results"]) == 1, "db should not be empty"
last_seq = changes["last_seq"]
- {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.process_url(""))
+ assert last_seq != 0
+
+ {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.base_url())
req_id =
Couch.get("/#{db_name}/_changes?feed=longpoll&since=#{last_seq}",
@@ -51,13 +50,13 @@ defmodule ChangesAsyncTest do
create_doc_bar(db_name, "bar")
- {changes_length, last_seq_prefix} =
+ changes =
req_id.id
|> process_response(&parse_chunk/1)
- |> parse_changes_response()
- assert changes_length == 1, "should return one change"
- assert last_seq_prefix == "2-", "seq must start with 2-"
+ assert length(changes["results"]) == 1, "should return one change"
+ next_last_seq = changes["last_seq"]
+ assert next_last_seq > last_seq
req_id =
Couch.get("/#{db_name}/_changes?feed=longpoll&since=now",
@@ -70,10 +69,9 @@ defmodule ChangesAsyncTest do
create_doc_bar(db_name, "barzzzz")
changes = process_response(req_id.id, &parse_chunk/1)
- {changes_length, last_seq_prefix} = parse_changes_response(changes)
- assert changes_length == 1, "should return one change"
+ assert length(changes["results"]) == 1, "should return one change"
assert Enum.at(changes["results"], 0)["id"] == "barzzzz"
- assert last_seq_prefix == "3-", "seq must start with 3-"
+ assert changes["last_seq"] > next_last_seq
end
@tag :with_db
@@ -83,10 +81,10 @@ defmodule ChangesAsyncTest do
check_empty_db(db_name)
create_doc(db_name, sample_doc_foo())
- {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.process_url(""))
+ {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.base_url())
req_id =
- Rawresp.get("/#{db_name}/_changes?feed=eventsource&timeout=500",
+ Rawresp.get("/#{db_name}/_changes?feed=eventsource&timeout=2000",
stream_to: self(),
direct: worker_pid
)
@@ -95,7 +93,7 @@ defmodule ChangesAsyncTest do
create_doc_bar(db_name, "bar")
- changes = process_response(req_id.id, &parse_event/1)
+ changes = process_response(req_id.id, &parse_event/1, 5000)
assert length(changes) == 2
assert Enum.at(changes, 0)["id"] == "foo"
@@ -108,7 +106,7 @@ defmodule ChangesAsyncTest do
test "eventsource heartbeat", context do
db_name = context[:db_name]
- {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.process_url(""))
+ {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.base_url())
req_id =
Rawresp.get("/#{db_name}/_changes?feed=eventsource&heartbeat=10",
@@ -136,13 +134,12 @@ defmodule ChangesAsyncTest do
)
changes = process_response(req_id.id, &parse_chunk/1)
- {changes_length, last_seq_prefix} = parse_changes_response(changes)
- assert changes_length == 1, "db should not be empty"
- assert last_seq_prefix == "3-", "seq must start with 3-"
+ assert length(changes["results"]) == 1, "db should not be empty"
+ assert changes["last_seq"] != 0
last_seq = changes["last_seq"]
# longpoll waits until a matching change before returning
- {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.process_url(""))
+ {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.base_url())
req_id =
Couch.get(
@@ -160,9 +157,8 @@ defmodule ChangesAsyncTest do
# Doc matches the filter
create_doc(db_name, %{_id: "bingo", bop: "bingo"})
changes = process_response(req_id.id, &parse_chunk/1)
- {changes_length, last_seq_prefix} = parse_changes_response(changes)
- assert changes_length == 1, "db should not be empty"
- assert last_seq_prefix == "5-", "seq must start with 5-"
+ assert length(changes["results"]) == 1, "db should not be empty"
+ assert changes["last_seq"] > last_seq
assert Enum.at(changes["results"], 0)["id"] == "bingo"
end
@@ -174,11 +170,11 @@ defmodule ChangesAsyncTest do
create_doc(db_name, %{bop: false})
create_doc(db_name, %{_id: "bingo", bop: "bingo"})
- {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.process_url(""))
+ {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.base_url())
req_id =
Rawresp.get(
- "/#{db_name}/_changes?feed=continuous&filter=changes_filter/bop&timeout=500",
+ "/#{db_name}/_changes?feed=continuous&filter=changes_filter/bop&timeout=2000",
stream_to: self(),
direct: worker_pid
)
@@ -186,7 +182,7 @@ defmodule ChangesAsyncTest do
:ok = wait_for_headers(req_id.id, 200)
create_doc(db_name, %{_id: "rusty", bop: "plankton"})
- changes = process_response(req_id.id, &parse_changes_line_chunk/1)
+ changes = process_response(req_id.id, &parse_changes_line_chunk/1, 5000)
changes_ids =
changes
@@ -206,11 +202,11 @@ defmodule ChangesAsyncTest do
create_doc(db_name, %{_id: "doc1", value: 1})
create_doc(db_name, %{_id: "doc2", value: 2})
- {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.process_url(""))
+ {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.base_url())
req_id =
Rawresp.post(
- "/#{db_name}/_changes?feed=continuous&timeout=500&filter=_doc_ids",
+ "/#{db_name}/_changes?feed=continuous&timeout=2000&filter=_doc_ids",
body: doc_ids,
headers: ["Content-Type": "application/json"],
stream_to: self(),
@@ -220,7 +216,7 @@ defmodule ChangesAsyncTest do
:ok = wait_for_headers(req_id.id, 200)
create_doc(db_name, %{_id: "doc3", value: 3})
- changes = process_response(req_id.id, &parse_changes_line_chunk/1)
+ changes = process_response(req_id.id, &parse_changes_line_chunk/1, 5000)
changes_ids =
changes
@@ -245,7 +241,7 @@ defmodule ChangesAsyncTest do
assert length(resp.body["results"]) == 4
seq = Enum.at(resp.body["results"], 1)["seq"]
- {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.process_url(""))
+ {:ok, worker_pid} = HTTPotion.spawn_link_worker_process(Couch.base_url())
# simulate an EventSource request with a Last-Event-ID header
req_id =
@@ -287,7 +283,7 @@ defmodule ChangesAsyncTest do
end
end
- defp process_response(id, chunk_parser, timeout \\ 1000) do
+ defp process_response(id, chunk_parser, timeout \\ 2000) do
receive do
%HTTPotion.AsyncChunk{id: ^id} = msg ->
chunk_parser.(msg)
@@ -326,14 +322,10 @@ defmodule ChangesAsyncTest do
end
end
- defp parse_changes_response(changes) do
- {length(changes["results"]), String.slice(changes["last_seq"], 0..1)}
- end
-
defp check_empty_db(db_name) do
resp = Couch.get("/#{db_name}/_changes")
assert resp.body["results"] == [], "db must be empty"
- assert String.at(resp.body["last_seq"], 0) == "0", "seq must start with 0"
+ assert resp.body["last_seq"] == 0
end
defp test_changes(db_name, feed) do
@@ -344,7 +336,7 @@ defmodule ChangesAsyncTest do
# TODO: retry_part
resp = Couch.get("/#{db_name}/_changes")
assert length(resp.body["results"]) == 1, "db must not be empty"
- assert String.at(resp.body["last_seq"], 0) == "1", "seq must start with 1"
+ assert resp.body["last_seq"] != 0
# increase timeout to 100 to have enough time 2 assemble
# (seems like too little timeouts kill
@@ -356,10 +348,10 @@ defmodule ChangesAsyncTest do
# the sequence is not fully ordered and a complex structure now
change = Enum.at(changes, 1)
- assert String.at(change["last_seq"], 0) == "1"
+ assert change["last_seq"] != 0
- # create_doc_bar(db_name,"bar")
- {:ok, worker_pid} = HTTPotion.spawn_worker_process(Couch.process_url(""))
+ create_doc_bar(db_name, "bar1")
+ {:ok, worker_pid} = HTTPotion.spawn_worker_process(Couch.base_url())
%HTTPotion.AsyncResponse{id: req_id} =
Rawresp.get("/#{db_name}/_changes?feed=#{feed}&timeout=500",
@@ -368,7 +360,7 @@ defmodule ChangesAsyncTest do
)
:ok = wait_for_headers(req_id, 200)
- create_doc_bar(db_name, "bar")
+ create_doc_bar(db_name, "bar2")
changes = process_response(req_id, &parse_changes_line_chunk/1)
assert length(changes) == 3
diff --git a/test/elixir/test/compact_test.exs b/test/elixir/test/compact_test.exs
deleted file mode 100644
index 18aeab2de..000000000
--- a/test/elixir/test/compact_test.exs
+++ /dev/null
@@ -1,89 +0,0 @@
-defmodule CompactTest do
- use CouchTestCase
-
- @moduletag :compact
- @moduletag kind: :single_node
-
- @moduledoc """
- Test CouchDB compaction
- This is a port of compact.js
- """
-
- @att_doc_id "att_doc"
- @att_name "foo.txt"
- @att_plaintext "This is plain text"
-
- # Need to investigate why compaction is not compacting (or compactor cannot complete)
- # Refer:- https://github.com/apache/couchdb/pull/2127
- @tag :pending
- @tag :skip_on_jenkins
- @tag :with_db
- test "compaction reduces size of deleted docs", context do
- db = context[:db_name]
- docs = populate(db)
- info = get_info(db)
- orig_data_size = info["sizes"]["active"]
- orig_disk_size = info["sizes"]["file"]
- start_time = info["instance_start_time"]
- assert is_integer(orig_data_size) and is_integer(orig_disk_size)
- assert orig_data_size < orig_disk_size
-
- delete(db, docs)
-
- retry_until(fn ->
- deleted_data_size = get_info(db)["data_size"]
- assert deleted_data_size > orig_data_size
- end)
-
- deleted_data_size = get_info(db)["data_size"]
-
- compact(db)
-
- retry_until(fn ->
- assert get_info(db)["instance_start_time"] == start_time
- assert_attachment_available(db)
- info = get_info(db)
- final_data_size = info["sizes"]["active"]
- final_disk_size = info["sizes"]["file"]
- assert final_data_size < final_disk_size
- assert is_integer(final_data_size) and is_integer(final_disk_size)
- assert final_data_size < deleted_data_size
- end)
- end
-
- defp assert_attachment_available(db) do
- resp = Couch.get("/#{db}/#{@att_doc_id}/#{@att_name}")
- assert resp.body == @att_plaintext
- assert resp.headers["content-type"] == "text/plain"
- assert Couch.get("/#{db}").body["doc_count"] == 1
- end
-
- defp populate(db) do
- docs = create_docs(0..19)
- resp = Couch.post("/#{db}/_bulk_docs", body: %{docs: docs})
- assert resp.status_code in [201, 202]
- docs = rev(docs, resp.body)
-
- doc = %{
- _id: "#{@att_doc_id}",
- _attachments: %{
- "#{@att_name}": %{content_type: "text/plain", data: Base.encode64(@att_plaintext)}
- }
- }
-
- resp = Couch.put("/#{db}/#{doc._id}", body: doc)
- assert resp.status_code in [201, 202]
- docs
- end
-
- defp delete(db, docs) do
- docs = Enum.map(docs, &Map.put(&1, :_deleted, true))
- resp = Couch.post("/#{db}/_bulk_docs", body: %{docs: docs})
- assert resp.status_code in [201, 202]
- assert Couch.post("/#{db}/_ensure_full_commit").body["ok"] == true
- end
-
- defp get_info(db) do
- Couch.get("/#{db}").body
- end
-end
diff --git a/test/elixir/test/concurrent_writes_test.exs b/test/elixir/test/concurrent_writes_test.exs
new file mode 100644
index 000000000..8bc33df6f
--- /dev/null
+++ b/test/elixir/test/concurrent_writes_test.exs
@@ -0,0 +1,151 @@
+defmodule ConcurrentWritesTest do
+ use CouchTestCase
+
+ @moduletag :concurrent_writes
+ @moduletag kind: :single_node
+
+ @moduledoc """
+ Test CouchDB under concurrent write load
+ """
+
+ @tag :with_db
+ test "Primary data tests", context do
+ n = 120
+ db_name = context[:db_name]
+ parent = self()
+ Enum.each(1..n,
+ fn x -> spawn fn ->
+ r = Couch.put("/#{db_name}/doc#{x}", body: %{:a => x})
+ assert r.status_code == 201
+ send parent, :done
+ end end)
+ Enum.each(1..n, fn _x -> receive do :done -> :done end end)
+ Enum.each(1..n, fn x ->
+ assert Couch.get("/#{db_name}/doc#{x}").body["a"] == x
+ end)
+ assert Couch.get("/#{db_name}").body["doc_count"] == n
+ end
+
+ @tag :with_db
+ test "Secondary data tests", context do
+ n = 120
+ db_name = context[:db_name]
+ map_fun = "function(doc) { emit(null, doc.a); }"
+ red_fun = "_sum"
+ ddoc_id = "_design/foo"
+ ddoc = %{:views => %{:foo => %{:map => map_fun, :reduce => red_fun}}}
+ Couch.put("/#{db_name}/#{ddoc_id}", body: ddoc)
+ parent = self()
+ Enum.each(1..n,
+ fn x -> spawn fn ->
+ r = Couch.put("/#{db_name}/doc#{x}", body: %{:a => x})
+ assert r.status_code == 201
+ send parent, :done
+ end end)
+ Enum.each(1..n, fn _x -> receive do :done -> :done end end)
+ rows = Couch.get("/#{db_name}/#{ddoc_id}/_view/foo").body["rows"]
+ result = hd(rows)["value"]
+ assert result == Enum.sum(1..n)
+ end
+
+ @tag :with_db
+ test "Secondary data tests with updates", context do
+ n = 120
+ db_name = context[:db_name]
+ map_fun = "function(doc) { emit(null, doc.a); }"
+ red_fun = "_sum"
+ ddoc_id = "_design/foo"
+ ddoc = %{:views => %{:foo => %{:map => map_fun, :reduce => red_fun}}}
+ Couch.put("/#{db_name}/#{ddoc_id}", body: ddoc)
+ parent = self()
+ Enum.each(1..n,
+ fn x -> spawn fn ->
+ r = Couch.put("/#{db_name}/doc#{x}", body: %{:a => x})
+ assert r.status_code == 201
+ rev = r.body["rev"]
+ Couch.put("/#{db_name}/doc#{x}", body: %{:_rev => rev, :a => x + 1})
+ send parent, :done
+ end end)
+ Enum.each(1..n, fn _x -> receive do :done -> :done end end)
+ rows = Couch.get("/#{db_name}/#{ddoc_id}/_view/foo").body["rows"]
+ result = hd(rows)["value"]
+ assert result == Enum.sum(2..n + 1)
+ end
+
+ @tag :with_db
+ test "Secondary data tests with updates and queries", context do
+ n = 120
+ query_every_n = 40
+ db_name = context[:db_name]
+ map_fun = "function(doc) { emit(null, doc.a); }"
+ red_fun = "_sum"
+ ddoc_id = "_design/foo"
+ ddoc = %{:views => %{:foo => %{:map => map_fun, :reduce => red_fun}}}
+ Couch.put("/#{db_name}/#{ddoc_id}", body: ddoc)
+ parent = self()
+ Enum.each(1..n,
+ fn x -> spawn fn ->
+ r = Couch.put("/#{db_name}/doc#{x}", body: %{:a => x})
+ assert r.status_code == 201
+ rev = r.body["rev"]
+ if rem(x, query_every_n) == 0 do
+ r = Couch.get("/#{db_name}/#{ddoc_id}/_view/foo")
+ assert r.status_code == 200
+ end
+ r = Couch.put("/#{db_name}/doc#{x}", body: %{:_rev => rev, :a => x + 1})
+ assert r.status_code == 201
+ send parent, :done
+ end end)
+ Enum.each(1..n, fn _x -> receive do :done -> :done end end)
+ rows = Couch.get("/#{db_name}/#{ddoc_id}/_view/foo").body["rows"]
+ result = hd(rows)["value"]
+ assert result == Enum.sum(2..n + 1)
+ end
+
+ # The following test was specifically crafted to trigger the issue fixed in:
+ # https://github.com/apache/couchdb/commit/ec4b2132918338d893a800a823cf5f12d5b2efd5
+ #
+ @tag :with_db
+ test "Secondary data tests with deletes and queries", context do
+ n = 120
+ query_every_n = 40
+ db_name = context[:db_name]
+ map_fun = "function(doc) { emit(null, doc.a); }"
+ red_fun = "_sum"
+ ddoc_id = "_design/foo"
+ ddoc = %{:views => %{:foo => %{:map => map_fun, :reduce => red_fun}}}
+ Couch.put("/#{db_name}/#{ddoc_id}", body: ddoc)
+ parent = self()
+ Enum.each(1..n,
+ fn x -> spawn fn ->
+ r = Couch.put("/#{db_name}/doc#{x}", body: %{:a => x})
+ assert r.status_code == 201
+ rev = r.body["rev"]
+ :timer.sleep(:rand.uniform(1000))
+ r = Couch.delete("/#{db_name}/doc#{x}?rev=#{rev}")
+ assert r.status_code == 200
+ if rem(x, query_every_n) == 0 do
+ r = Couch.get("/#{db_name}/#{ddoc_id}/_view/foo")
+ assert r.status_code == 200
+ end
+ send parent, :done
+ end end)
+ Enum.each(1..n, fn _x -> receive do :done -> :done end end)
+
+ # Keep trying to query the view for a bit to account for the case when
+ # partial view results can be returned. After the following commits merges
+ # `retry_until` can be removed:
+ # https://github.com/apache/couchdb/pull/3391/commits/5a82664d1b0b58dd6c9fe6a79faa51e89211969e
+ #
+ try do
+ retry_until(fn ->
+ [] == Couch.get("/#{db_name}/#{ddoc_id}/_view/foo").body["rows"]
+ end, 1000, 5000)
+ rescue
+ RuntimeError -> :ok
+ end
+
+ assert [] == Couch.get("/#{db_name}/#{ddoc_id}/_view/foo").body["rows"]
+ end
+
+end
diff --git a/test/elixir/test/config/skip.elixir b/test/elixir/test/config/skip.elixir
new file mode 100644
index 000000000..5b00b6d21
--- /dev/null
+++ b/test/elixir/test/config/skip.elixir
@@ -0,0 +1,299 @@
+%{
+ "AllDocsTest": [
+ ],
+ "AttachmentMultipartTest": [
+ "manages attachments multipart requests successfully",
+ "manages compressed attachments successfully"
+ ],
+ "AttachmentNamesTest": [
+ "saves attachment names successfully"
+ ],
+ "AttachmentPathsTest": [
+ "manages attachment paths successfully - design docs"
+ ],
+ "AttachmentRangesTest": [
+ ],
+ "AttachmentViewTest": [
+ "manages attachments in views successfully"
+ ],
+ "AttachmentsTest": [
+ "COUCHDB-809 - stubs should only require the 'stub' field",
+ "empty attachments",
+ "etags for attachments",
+ "implicit doc creation allows creating docs with a reserved id. COUCHDB-565",
+ "large attachments COUCHDB-366",
+ "md5 header for attachments",
+ "saves binary",
+ "COUCHDB-497 - empty attachments",
+ "update attachment"
+ ],
+ "AuthCacheTest": [
+ ],
+ "BasicsTest": [
+ ],
+ "BatchSaveTest": [
+ ],
+ "BulkDocsTest": [
+ ],
+ "ChangesAsyncTest": [
+ ],
+ "ChangesTest": [
+ ],
+ "CoffeeTest": [
+ "CoffeeScript basic functionality"
+ ],
+ "ConfigTest": [
+ "Atoms, binaries, and strings suffice as whitelist sections and keys.",
+ "Blacklist is functional",
+ "Keys not in the whitelist may not be modified",
+ "Non-2-tuples in the whitelist are ignored",
+ "Non-list whitelist values allow further modification of the whitelist",
+ "Non-term whitelist values allow further modification of the whitelist",
+ "Reload config",
+ "Server-side password hashing, and raw updates disabling that",
+ "Settings can be altered with undefined whitelist allowing any change"
+ ],
+ "CookieAuthTest": [
+ "cookie auth"
+ ],
+ "CopyDocTest": [
+ "Copy doc tests"
+ ],
+ "DesignDocsQueryTest": [
+ ],
+ "DesignDocsTest": [
+ "circular commonjs dependencies",
+ "commonjs require",
+ "module id values are as expected"
+ ],
+ "DesignOptionsTest": [
+ "design doc options - include_desing=false"
+ ],
+ "DesignPathTest": [
+ "design doc path",
+ "design doc path with slash in db name"
+ ],
+ "ErlangViewsTest": [
+ "Erlang map function",
+ "Erlang reduce function",
+ "Erlang reduce function larger dataset"
+ ],
+ "EtagsHeadTest": [
+ ],
+ "FormSubmitTest": [
+ ],
+ "HelperTest": [
+ ],
+ "HttpTest": [
+ "COUCHDB-708: newlines document names",
+ "location header"
+ ],
+ "InvalidDocIDsTest": [
+ ],
+ "JsonpTest": [
+ ],
+ "JwtAuthTest": [
+ ],
+ "ListViewsTest": [
+ "COUCHDB-1113",
+ "HTTP header response set after getRow() called in _list function",
+ "abort iteration with reduce",
+ "empty list",
+ "extra qs params",
+ "get with query params",
+ "handling _all_docs by _list functions. the result should be equal",
+ "multi-key fetch with GET",
+ "multi-key fetch with POST",
+ "multiple languages in design docs",
+ "no multi-key fetch allowed when group=false",
+ "reduce with 0 rows",
+ "secObj is available",
+ "standard GET",
+ "standard OPTIONS",
+ "stop iteration",
+ "the richness of the arguments",
+ "too many Get Rows",
+ "we can run lists and views from separate docs",
+ "we do multi-key requests on lists and views in separate docs",
+ "when there is a reduce present, and used",
+ "when there is a reduce present, but not used",
+ "with 0 rows",
+ "with accept headers for HTML",
+ "with include_docs and a reference to the doc"
+ ],
+ "LocalDocsTest": [
+ ],
+ "LotsOfDocsTest": [
+ ],
+ "MethodOverrideTest": [
+ ],
+ "MultipleRowsTest": [
+ ],
+ "ProxyAuthTest": [
+ "proxy auth with secret",
+ "proxy auth without secret"
+ ],
+ "PurgeTest": [
+ "COUCHDB-1065",
+ "purge documents"
+ ],
+ "ReaderACLTest": [
+ "can't set non string reader names or roles",
+ "members can query views",
+ "restricted db can be read by authorized users",
+ "unrestricted db can be read",
+ "works with readers (backwards compat with 1.0)"
+ ],
+ "RecreateDocTest": [
+ ],
+ "ReduceBuiltinTest": [
+ ],
+ "ReduceFalseTest": [
+ ],
+ "ReduceTest": [
+ ],
+ "ReplicationBadIdTest": [
+ "replication doc with bad rep id"
+ ],
+ "ReplicationTest": [
+ ],
+ "ReplicatorDBByDocIdTest": [
+ "replicatior db by doc id"
+ ],
+ "RevStemmingTest": [
+ "revs limit is kept after compaction"
+ ],
+ "RevisionTest": [
+ ],
+ "RewriteJSTest": [
+ "Test basic js rewrites on test_rewrite_suite_db",
+ "Test basic js rewrites on test_rewrite_suite_db%2Fwith_slashes",
+ "early response on test_rewrite_suite_db",
+ "early response on test_rewrite_suite_db%2Fwith_slashes",
+ "loop on test_rewrite_suite_db",
+ "loop on test_rewrite_suite_db%2Fwith_slashes",
+ "path relative to server on test_rewrite_suite_db",
+ "path relative to server on test_rewrite_suite_db%2Fwith_slashes",
+ "requests with body preserve the query string rewrite on test_rewrite_suite_db",
+ "requests with body preserve the query string rewrite on test_rewrite_suite_db%2Fwith_slashes"
+ ],
+ "RewriteTest": [
+ "Test basic rewrites on test_rewrite_suite_db",
+ "Test basic rewrites on test_rewrite_suite_db%2Fwith_slashes",
+ "loop detection on test_rewrite_suite_db",
+ "loop detection on test_rewrite_suite_db%2Fwith_slashes",
+ "path relative to server on test_rewrite_suite_db",
+ "path relative to server on test_rewrite_suite_db%2Fwith_slashes",
+ "serial execution is not spuriously counted as loop on test_rewrite_suite_db",
+ "serial execution is not spuriously counted as loop on test_rewrite_suite_db%2Fwith_slashes"
+ ],
+ "SecurityValidationTest": [
+ ],
+ "ShowDocumentsTest": [
+ "JS can't set etag",
+ "accept header switching - different mime has different etag",
+ "deleted docs",
+ "id with slash",
+ "list() compatible API",
+ "list() compatible API with provides function",
+ "missing design doc",
+ "registering types works",
+ "security object",
+ "should keep next result order: chunks + return value + provided chunks + provided return value",
+ "show error",
+ "show fail with non-existing docid",
+ "show query parameters",
+ "show with doc",
+ "show with doc - etags",
+ "show with existing doc",
+ "show with missing doc",
+ "show with non-existing docid",
+ "show without docid",
+ "the provides mime matcher",
+ "the provides mime matcher without a match"
+ ],
+ "UTF8Test": [
+ ],
+ "UUIDsTest": [
+ "sequential uuids are sequential",
+ "utc_id uuids are correct",
+ "utc_random uuids are roughly random"
+ ],
+ "UpdateDocumentsTest": [
+ "COUCHDB-1229 - allow slashes in doc ids for update handlers",
+ "COUCHDB-648 - the code in the JSON response should be honored",
+ "Insert doc with empty id",
+ "base64 response",
+ "bump counter",
+ "doc can be created",
+ "form update via application/x-www-form-urlencoded",
+ "in place update",
+ "update document"
+ ],
+ "UsersDbSecurityTest": [
+ "user db security"
+ ],
+ "UsersDbTest": [
+ "users db",
+ "users password requirements"
+ ],
+ "ViewCollationRawTest": [
+ ],
+ "ViewCollationTest": [
+ ],
+ "ViewCompactionTest": [
+ "view compaction"
+ ],
+ "ViewConflictsTest": [
+ ],
+ "ViewErrorsTest": [
+ "infinite loop",
+ "reduce overflow error",
+ "temporary view should give error message"
+ ],
+ "ViewIncludeDocsTest": [
+ "emitted _rev controls things"
+ ],
+ "ViewMapTest": [
+ ],
+ "ViewMultiKeyAllDocsTest": [
+ ],
+ "ViewMultiKeyDesignTest": [
+ ],
+ "ViewOffsetTest": [
+ "basic view offsets",
+ "repeated view offsets"
+ ],
+ "ViewPaginationTest": [
+ "aliases start_key and start_key_doc_id should work",
+ "basic view pagination",
+ "descending view pagination",
+ "descending=false parameter should just be ignored",
+ "endkey document id",
+ "endkey document id, but with end_key_doc_id alias"
+ ],
+ "ViewSandboxingTest": [
+ ],
+ "ViewTest": [
+ ],
+ "ViewUpdateSeqTest": [
+ "_all_docs update seq",
+ "db info update seq",
+ "view update seq"
+ ],
+ "WithQuorumTest": [
+ "Attachments overriden quorum should return 202-Acepted",
+ "Attachments should return 201-Created",
+ "Bulk docs overriden quorum should return 202-Acepted",
+ "Creating-Updating/Deleting doc with overriden quorum should return 202-Acepted/200-OK",
+ "Creating/Deleting DB should return 201-Created/202-Acepted"
+ ],
+ "WithoutQuorumTest": [
+ "Attachments overriden quorum should return 201-Created",
+ "Attachments should return 202-Acepted",
+ "Bulk docs should return 202-Acepted",
+ "Copy doc should return 202-Acepted",
+ "Creating/Deleting DB should return 202-Acepted",
+ "Creating/Updating/Deleting doc should return 202-Acepted"
+ ]
+}
diff --git a/test/elixir/test/config/suite.elixir b/test/elixir/test/config/suite.elixir
new file mode 100644
index 000000000..7d2fc7966
--- /dev/null
+++ b/test/elixir/test/config/suite.elixir
@@ -0,0 +1,594 @@
+%{
+ "AllDocsTest": [
+ "All Docs tests",
+ "GET with one key",
+ "POST boolean",
+ "POST edge case with colliding parameters - query takes precedence",
+ "POST with empty body",
+ "POST with keys and limit",
+ "POST with missing keys",
+ "POST with query parameter and JSON body",
+ "_local_docs POST with keys and limit",
+ "all_docs ordering"
+ ],
+ "AttachmentMultipartTest": [
+ "manages attachments multipart requests successfully",
+ "manages compressed attachments successfully"
+ ],
+ "AttachmentNamesTest": [
+ "saves attachment names successfully"
+ ],
+ "AttachmentPathsTest": [
+ "manages attachment paths successfully",
+ "manages attachment paths successfully - design docs"
+ ],
+ "AttachmentRangesTest": [
+ "manages attachment range requests successfully"
+ ],
+ "AttachmentViewTest": [
+ "manages attachments in views successfully"
+ ],
+ "AttachmentsTest": [
+ "COUCHDB-809 - stubs should only require the 'stub' field",
+ "attachment via multipart/form-data",
+ "delete attachment",
+ "empty attachments",
+ "errors for bad attachment",
+ "etags for attachments",
+ "implicit doc creation allows creating docs with a reserved id. COUCHDB-565",
+ "large attachments COUCHDB-366",
+ "md5 header for attachments",
+ "reads attachment successfully",
+ "saves attachment successfully",
+ "saves binary",
+ "COUCHDB-497 - empty attachments",
+ "update attachment"
+ ],
+ "AuthCacheTest": [
+ "auth cache management"
+ ],
+ "BasicsTest": [
+ "A document read with etag works",
+ "Can create several documents",
+ "Check _revs_limit",
+ "Check for invalid document members",
+ "Create a document and save it to the database",
+ "Created database has appropriate db info name",
+ "Creating a new DB should return location header",
+ "Creating a new DB with slashes should return Location header (COUCHDB-411)",
+ "DELETE'ing a non-existent doc should 404",
+ "Database should be in _all_dbs",
+ "Default headers are returned for doc with open_revs=all",
+ "Empty database should have zero docs",
+ "Exceeding configured DB name size limit returns an error",
+ "Make sure you can do a seq=true option",
+ "On restart, a request for creating an already existing db can not override",
+ "POST doc response has a Location header",
+ "POST doc with an _id field isn't overwritten by uuid",
+ "PUT doc has a Location header",
+ "PUT error when body not an object",
+ "PUT on existing DB should return 412 instead of 500",
+ "Ready endpoint",
+ "Regression test for COUCHDB-954",
+ "Revs info status is good",
+ "Session contains adm context",
+ "Simple map functions",
+ "Welcome endpoint",
+ "_all_docs POST error when multi-get is not a {'key': [...]} structure",
+ "_all_docs/queries works",
+ "_bulk_docs POST error when body not an object",
+ "_design_docs works",
+ "_local_docs works",
+ "oops, the doc id got lost in code nirwana"
+ ],
+ "BatchSaveTest": [
+ "batch post",
+ "batch put",
+ "batch put with identical doc ids"
+ ],
+ "BulkDocsTest": [
+ "bulk docs can create, update, & delete many docs per request",
+ "bulk docs can detect conflicts",
+ "bulk docs emits conflict error for duplicate doc `_id`s",
+ "bulk docs raises conflict error for combined update & delete",
+ "bulk docs raises error for `all_or_nothing` option",
+ "bulk docs raises error for invlaid `docs` parameter",
+ "bulk docs raises error for invlaid `new_edits` parameter",
+ "bulk docs raises error for missing `docs` parameter",
+ "bulk docs raises error for transaction larger than 10MB",
+ "bulk docs supplies `id` if not provided in doc"
+ ],
+ "ChangesAsyncTest": [
+ "COUCHDB-1852",
+ "continuous changes",
+ "continuous filtered changes",
+ "continuous filtered changes with doc ids",
+ "eventsource changes",
+ "eventsource heartbeat",
+ "live changes",
+ "longpoll changes",
+ "longpoll filtered changes"
+ ],
+ "ChangesTest": [
+ "COUCHDB-1037-empty result for ?limit=1&filter=foo/bar in some cases",
+ "COUCHDB-1256",
+ "COUCHDB-1923",
+ "Changes feed negative heartbeat",
+ "Changes feed non-integer heartbeat",
+ "changes filtering on design docs",
+ "changes filtering on docids",
+ "changes limit",
+ "erlang function filtered changes",
+ "function filtered changes",
+ "map function filtered changes",
+ "non-existing desing doc and funcion for filtered changes",
+ "non-existing desing doc for filtered changes",
+ "non-existing function for filtered changes"
+ ],
+ "CoffeeTest": [
+ "CoffeeScript basic functionality"
+ ],
+ "ConcurrentWritesTest": [
+ "Primary data tests",
+ "Secondary data tests",
+ "Secondary data tests with updates"
+ ],
+ "ConfigTest": [
+ "Atoms, binaries, and strings suffice as whitelist sections and keys.",
+ "Blacklist is functional",
+ "CouchDB respects configured protocols",
+ "Keys not in the whitelist may not be modified",
+ "Non-2-tuples in the whitelist are ignored",
+ "Non-list whitelist values allow further modification of the whitelist",
+ "Non-term whitelist values allow further modification of the whitelist",
+ "PORT `BUGGED` ?raw tests from config.js",
+ "Reload config",
+ "Server-side password hashing, and raw updates disabling that",
+ "Settings can be altered with undefined whitelist allowing any change",
+ "Standard config options are present"
+ ],
+ "CookieAuthTest": [
+ "cookie auth"
+ ],
+ "CopyDocTest": [
+ "Copy doc tests"
+ ],
+ "DesignDocsQueryTest": [
+ "POST edge case with colliding parameters - query takes precedence",
+ "POST with empty body",
+ "POST with keys and limit",
+ "POST with query parameter and JSON body",
+ "query _design_docs (GET with no parameters)",
+ "query _design_docs descending=false",
+ "query _design_docs descending=true",
+ "query _design_docs end_key",
+ "query _design_docs end_key inclusive_end=false",
+ "query _design_docs end_key inclusive_end=false descending",
+ "query _design_docs end_key inclusive_end=true",
+ "query _design_docs end_key limit",
+ "query _design_docs end_key skip",
+ "query _design_docs endkey",
+ "query _design_docs post with keys",
+ "query _design_docs start_key",
+ "query _design_docs startkey",
+ "query _design_docs update_seq",
+ "query _design_docs with multiple key",
+ "query _design_docs with single key"
+ ],
+ "DesignDocsTest": [
+ "_all_docs view returns correctly with keys",
+ "all_docs_twice",
+ "circular commonjs dependencies",
+ "commonjs in map functions",
+ "commonjs require",
+ "consistent _rev for design docs",
+ "design doc deletion",
+ "language not specified, Javascript is implied",
+ "module id values are as expected",
+ "startkey and endkey",
+ "that we get correct design doc info back",
+ "validate doc update"
+ ],
+ "DesignOptionsTest": [
+ "design doc options - include_design default value",
+ "design doc options - include_desing=false",
+ "design doc options - include_desing=true",
+ "design doc options - local_seq=true"
+ ],
+ "DesignPathTest": [
+ "design doc path",
+ "design doc path with slash in db name"
+ ],
+ "ErlangViewsTest": [
+ "Erlang map function",
+ "Erlang reduce function",
+ "Erlang reduce function larger dataset"
+ ],
+ "EtagsHeadTest": [
+ "etag header on creation",
+ "etag header on head",
+ "etag header on retrieval",
+ "etags head"
+ ],
+ "FormSubmitTest": [
+ "form submission gives back invalid content-type"
+ ],
+ "HelperTest": [
+ "retry_until handles assertions",
+ "retry_until handles boolean conditions",
+ "retry_until times out"
+ ],
+ "HttpTest": [
+ "COUCHDB-708: newlines document names",
+ "location header",
+ "location header should include X-Forwarded-Host",
+ "location header should include custom header"
+ ],
+ "InvalidDocIDsTest": [
+ "_local-prefixed ids are illegal",
+ "a PUT request with absent _id is forbidden",
+ "accidental POST to form handling code",
+ "explicit _bulk_docks policy",
+ "invalid _prefix",
+ "using a non-string id is forbidden"
+ ],
+ "JsonpTest": [
+ "jsonp chunked callbacks",
+ "jsonp not configured callbacks",
+ "jsonp unchunked callbacks"
+ ],
+ "JwtAuthTest": [
+ "jwt auth with EC secret",
+ "jwt auth with HMAC secret",
+ "jwt auth with RSA secret",
+ "jwt auth with required iss claim",
+ "jwt auth without secret"
+ ],
+ "ListViewsTest": [
+ "COUCHDB-1113",
+ "HTTP header response set after getRow() called in _list function",
+ "abort iteration with reduce",
+ "empty list",
+ "extra qs params",
+ "get with query params",
+ "handling _all_docs by _list functions. the result should be equal",
+ "multi-key fetch with GET",
+ "multi-key fetch with POST",
+ "multiple languages in design docs",
+ "no multi-key fetch allowed when group=false",
+ "reduce with 0 rows",
+ "secObj is available",
+ "standard GET",
+ "standard OPTIONS",
+ "stop iteration",
+ "the richness of the arguments",
+ "too many Get Rows",
+ "we can run lists and views from separate docs",
+ "we do multi-key requests on lists and views in separate docs",
+ "when there is a reduce present, and used",
+ "when there is a reduce present, but not used",
+ "with 0 rows",
+ "with accept headers for HTML",
+ "with include_docs and a reference to the doc"
+ ],
+ "LocalDocsTest": [
+ "GET with multiple keys",
+ "GET with no parameters",
+ "POST edge case with colliding parameters - query takes precedence",
+ "POST with empty body",
+ "POST with keys and limit",
+ "POST with query parameter and JSON body"
+ ],
+ "LotsOfDocsTest": [
+ "lots of docs with _all_docs",
+ "lots of docs with a regular view"
+ ],
+ "MethodOverrideTest": [
+ "Method Override is ignored when original Method isn't POST",
+ "method override DELETE",
+ "method override PUT"
+ ],
+ "MultipleRowsTest": [
+ "multiple rows"
+ ],
+ "ProxyAuthTest": [
+ "proxy auth with secret",
+ "proxy auth without secret"
+ ],
+ "PurgeTest": [
+ "COUCHDB-1065",
+ "purge documents"
+ ],
+ "ReaderACLTest": [
+ "can't set non string reader names or roles",
+ "members can query views",
+ "restricted db can be read by authorized users",
+ "unrestricted db can be read",
+ "works with readers (backwards compat with 1.0)"
+ ],
+ "RecreateDocTest": [
+ "COUCHDB-1265 - changes feed after we try and break the update_seq tree",
+ "COUCHDB-292 - recreate a deleted document",
+ "Recreate a deleted document with non-exsistant rev",
+ "recreate document"
+ ],
+ "ReduceBuiltinTest": [
+ "Builtin count and sum reduce for key as array",
+ "Builtin reduce functions",
+ "Builtin reduce functions with trailings"
+ ],
+ "ReduceFalseTest": [
+ "Basic reduce functions"
+ ],
+ "ReduceTest": [
+ "Basic reduce functions",
+ "More complex array key view row testing",
+ "More complex reductions that need to use the combine option",
+ "Reduce pagination"
+ ],
+ "ReplicationBadIdTest": [
+ "replication doc with bad rep id"
+ ],
+ "ReplicationTest": [
+ "compressed attachment replication - remote-to-remote",
+ "continuous replication - remote-to-remote",
+ "create_target filter option - remote-to-remote",
+ "filtered replications - remote-to-remote",
+ "non-admin or reader user on source - remote-to-remote",
+ "non-admin user on target - remote-to-remote",
+ "replicate with since_seq - remote-to-remote",
+ "replicating attachment without conflict - COUCHDB-885",
+ "replication by doc ids - remote-to-remote",
+ "replication cancellation",
+ "replication restarts after filter change - COUCHDB-892 - remote-to-remote",
+ "simple remote-to-remote replication - remote-to-remote",
+ "source database not found with host",
+ "unauthorized replication cancellation",
+ "validate_doc_update failure replications - remote-to-remote"
+ ],
+ "ReplicatorDBByDocIdTest": [
+ "replicatior db by doc id"
+ ],
+ "RevStemmingTest": [
+ "revs limit is kept after compaction",
+ "revs limit produces replication conflict ",
+ "revs limit update"
+ ],
+ "RevisionTest": [
+ "`new_edits: false` prevents bulk updates (COUCHDB-1178)",
+ "mismatched rev in body and etag returns error",
+ "mismatched rev in body and query string returns error",
+ "multiple updates with same _rev raise conflict errors"
+ ],
+ "RewriteJSTest": [
+ "Test basic js rewrites on test_rewrite_suite_db",
+ "Test basic js rewrites on test_rewrite_suite_db%2Fwith_slashes",
+ "early response on test_rewrite_suite_db",
+ "early response on test_rewrite_suite_db%2Fwith_slashes",
+ "loop on test_rewrite_suite_db",
+ "loop on test_rewrite_suite_db%2Fwith_slashes",
+ "path relative to server on test_rewrite_suite_db",
+ "path relative to server on test_rewrite_suite_db%2Fwith_slashes",
+ "requests with body preserve the query string rewrite on test_rewrite_suite_db",
+ "requests with body preserve the query string rewrite on test_rewrite_suite_db%2Fwith_slashes"
+ ],
+ "RewriteTest": [
+ "Test basic rewrites on test_rewrite_suite_db",
+ "Test basic rewrites on test_rewrite_suite_db%2Fwith_slashes",
+ "loop detection on test_rewrite_suite_db",
+ "loop detection on test_rewrite_suite_db%2Fwith_slashes",
+ "path relative to server on test_rewrite_suite_db",
+ "path relative to server on test_rewrite_suite_db%2Fwith_slashes",
+ "serial execution is not spuriously counted as loop on test_rewrite_suite_db",
+ "serial execution is not spuriously counted as loop on test_rewrite_suite_db%2Fwith_slashes"
+ ],
+ "SecurityValidationTest": [
+ "Author presence and user security",
+ "Author presence and user security when replicated",
+ "Ddoc writes with admin and replication contexts",
+ "Force basic login",
+ "Jerry can save a document normally",
+ "Non-admin user cannot save a ddoc",
+ "Saving document using the wrong credentials",
+ "_session API",
+ "try to set a wrong value for _security"
+ ],
+ "ShowDocumentsTest": [
+ "JS can't set etag",
+ "accept header switching - different mime has different etag",
+ "deleted docs",
+ "id with slash",
+ "list() compatible API",
+ "list() compatible API with provides function",
+ "missing design doc",
+ "registering types works",
+ "security object",
+ "should keep next result order: chunks + return value + provided chunks + provided return value",
+ "show error",
+ "show fail with non-existing docid",
+ "show query parameters",
+ "show with doc",
+ "show with doc - etags",
+ "show with existing doc",
+ "show with missing doc",
+ "show with non-existing docid",
+ "show without docid",
+ "the provides mime matcher",
+ "the provides mime matcher without a match"
+ ],
+ "UTF8Test": [
+ "UTF8 support"
+ ],
+ "UUIDsTest": [
+ "Bad Request error when exceeding max UUID count",
+ "Method Not Allowed error on POST",
+ "cache busting headers are set",
+ "can return single uuid",
+ "no duplicates in 1,000 UUIDs",
+ "sequential uuids are sequential",
+ "utc_id uuids are correct",
+ "utc_random uuids are roughly random"
+ ],
+ "UpdateDocumentsTest": [
+ "COUCHDB-1229 - allow slashes in doc ids for update handlers",
+ "COUCHDB-648 - the code in the JSON response should be honored",
+ "GET is not allowed",
+ "Insert doc with empty id",
+ "Server provides UUID when POSTing without an ID in the URL",
+ "base64 response",
+ "bump counter",
+ "doc can be created",
+ "form update via application/x-www-form-urlencoded",
+ "in place update",
+ "update document",
+ "update error invalid path"
+ ],
+ "UsersDbSecurityTest": [
+ "user db security"
+ ],
+ "UsersDbTest": [
+ "users db"
+ ],
+ "ViewCollationRawTest": [
+ "ascending collation order",
+ "descending collation order",
+ "inclusive_end=false",
+ "inclusive_end=true",
+ "key query option",
+ "raw semantics in key ranges"
+ ],
+ "ViewCollationTest": [
+ "ascending collation order",
+ "descending collation order",
+ "inclusive_end=false",
+ "inclusive_end=true",
+ "key query option"
+ ],
+ "ViewCompactionTest": [
+ "view compaction"
+ ],
+ "ViewConflictsTest": [
+ "view conflict"
+ ],
+ "ViewErrorsTest": [
+ "emit undefined key results as null",
+ "emit undefined value results as null",
+ "error responses for invalid multi-get bodies",
+ "exception in map function",
+ "infinite loop",
+ "query parse error",
+ "query view with invalid params",
+ "reduce overflow error",
+ "temporary view should give error message"
+ ],
+ "ViewIncludeDocsTest": [
+ "COUCHDB-549 - include_docs=true with conflicts=true",
+ "Not an error with include_docs=false&reduce=true",
+ "Reduce support when reduce=false",
+ "emitted _rev controls things",
+ "include docs in all_docs",
+ "include docs in view",
+ "link to another doc from a value",
+ "no reduce support"
+ ],
+ "ViewMapTest": [
+ "_conflict is supported",
+ "_local_seq is supported",
+ "bad range returns error",
+ "can do design doc swap",
+ "can index design docs",
+ "can use key in query string",
+ "can use keys in query string",
+ "descending=true query with startkey_docid",
+ "inclusive = false",
+ "multiple emits in correct value order",
+ "query returns docs",
+ "supports linked documents",
+ "updated docs rebuilds index"
+ ],
+ "ViewMultiKeyAllDocsTest": [
+ "GET - get invalid rows when the key doesn't exist",
+ "POST - get invalid rows when the key doesn't exist",
+ "empty keys",
+ "keys in GET parameters",
+ "keys in GET parameters (descending)",
+ "keys in GET parameters (descending, skip, limit)",
+ "keys in GET parameters (limit)",
+ "keys in GET parameters (skip)",
+ "keys in POST body",
+ "keys in POST body (descending)",
+ "keys in POST body (descending, skip, limit)",
+ "keys in POST body (limit)",
+ "keys in POST body (skip)"
+ ],
+ "ViewMultiKeyDesignTest": [
+ "GET - invalid parameter combinations get rejected ",
+ "POST - invalid parameter combinations get rejected ",
+ "argument combinations",
+ "dir works",
+ "empty keys",
+ "keys in GET body (group)",
+ "keys in GET parameters",
+ "keys in POST body",
+ "keys in POST body (group)",
+ "limit works",
+ "offset works",
+ "that a map & reduce containing func support keys when reduce=false",
+ "that limiting by startkey_docid and endkey_docid get applied",
+ "that missing keys work too"
+ ],
+ "ViewOffsetTest": [
+ "basic view offsets",
+ "repeated view offsets"
+ ],
+ "ViewPaginationTest": [
+ "aliases start_key and start_key_doc_id should work",
+ "basic view pagination",
+ "descending view pagination",
+ "descending=false parameter should just be ignored",
+ "endkey document id",
+ "endkey document id, but with end_key_doc_id alias"
+ ],
+ "ViewSandboxingTest": [
+ "COUCHDB-925 - altering 'doc' variable in map function affects other map functions",
+ "attempting to change the document has no effect",
+ "runtime code evaluation can be prevented",
+ "view cannot access the map_funs and map_results array",
+ "view cannot invoke interpreter internals"
+ ],
+ "ViewTest": [
+ "GET with multiple keys",
+ "GET with no parameters",
+ "GET with one key",
+ "POST edge case with colliding parameters - query takes precedence",
+ "POST with boolean parameter",
+ "POST with empty body",
+ "POST with keys and limit",
+ "POST with query parameter and JSON body"
+ ],
+ "ViewUpdateSeqTest": [
+ "_all_docs update seq",
+ "db info update seq",
+ "view update seq"
+ ],
+ "WithQuorumTest": [
+ "Attachments overriden quorum should return 202-Acepted",
+ "Attachments should return 201-Created",
+ "Bulk docs overriden quorum should return 202-Acepted",
+ "Bulk docs should return 201-Created",
+ "Copy doc should return 201-Created",
+ "Creating-Updating/Deleting doc should return 201-Created/200-OK",
+ "Creating-Updating/Deleting doc with overriden quorum should return 202-Acepted/200-OK",
+ "Creating/Deleting DB should return 201-Created/202-Acepted"
+ ],
+ "WithoutQuorumTest": [
+ "Attachments overriden quorum should return 201-Created",
+ "Attachments should return 202-Acepted",
+ "Bulk docs overriden quorum should return 201-Created",
+ "Bulk docs should return 202-Acepted",
+ "Copy doc should return 202-Acepted",
+ "Creating-Updating/Deleting doc with overriden quorum should return 201-Created/200-OK",
+ "Creating/Deleting DB should return 202-Acepted",
+ "Creating/Updating/Deleting doc should return 202-Acepted"
+ ]
+}
diff --git a/test/elixir/test/config_test.exs b/test/elixir/test/config_test.exs
index bb89d8683..e49d2aa8f 100644
--- a/test/elixir/test/config_test.exs
+++ b/test/elixir/test/config_test.exs
@@ -10,8 +10,7 @@ defmodule ConfigTest do
"""
setup do
- # TODO: switch this to _local when that's landed
- config_url = "/_node/node1@127.0.0.1/_config"
+ config_url = "/_node/_local/_config"
resp = Couch.get(config_url)
assert resp.status_code == 200
{:ok, config: resp.body, config_url: config_url}
@@ -71,13 +70,9 @@ defmodule ConfigTest do
end
end
- # TODO: port sever_port tests from config.js
- @tag :pending
- test "CouchDB respects configured protocols"
test "Standard config options are present", context do
- assert context[:config]["couchdb"]["database_dir"]
- assert context[:config]["log"]["level"]
+ assert context[:config]["chttpd"]["port"]
end
test "Settings can be altered with undefined whitelist allowing any change", context do
@@ -100,9 +95,6 @@ defmodule ConfigTest do
assert Couch.delete("/_session").body["ok"]
end
- @tag :pending
- test "PORT `BUGGED` ?raw tests from config.js"
-
test "Non-term whitelist values allow further modification of the whitelist", context do
val = "!This is an invalid Erlang term!"
set_config(context, "httpd", "config_whitelist", val)
diff --git a/test/elixir/test/test_helper.exs b/test/elixir/test/test_helper.exs
index 6311fca44..72989e7c5 100644
--- a/test/elixir/test/test_helper.exs
+++ b/test/elixir/test/test_helper.exs
@@ -1,16 +1 @@
-# If build number detected assume we running on Jenkins
-# and skip certain tests that fail on jenkins.
-exclude =
- case System.get_env("BUILD_NUMBER") !== nil do
- true -> [:pending, :skip_on_jenkins]
- false -> [:pending]
- end
-
-current_exclude = Keyword.get(ExUnit.configuration(), :exclude, [])
-
-ExUnit.configure(
- exclude: Enum.uniq(exclude ++ current_exclude),
- formatters: [JUnitFormatter, ExUnit.CLIFormatter]
-)
-
-ExUnit.start()
+Couch.Test.Suite.start()
diff --git a/test/elixir/test/users_db_test.exs b/test/elixir/test/users_db_test.exs
index db86b2739..7c678c4f0 100644
--- a/test/elixir/test/users_db_test.exs
+++ b/test/elixir/test/users_db_test.exs
@@ -299,4 +299,129 @@ defmodule UsersDbTest do
assert resp.body["userCtx"]["name"] == "foo@example.org"
end
+
+ test "users password requirements", _context do
+ set_config({
+ "couch_httpd_auth",
+ "password_regexp",
+ Enum.join(
+ [
+ "[{\".{10,}\"},", # 10 chars
+ "{\"[A-Z]+\", \"Requirement 2.\"},", # a uppercase char
+ "{\"[a-z]+\", \"\"},", # a lowercase char
+ "{\"\\\\d+\", \"Req 4.\"},", # A number
+ "\"[!\.,\(\)]+\"]" # A special char
+ ],
+ " "
+ )
+ })
+
+ session = login("jan", "apple")
+
+ # With password that doesn't confirm to any requirement.
+ # Requirement doesn't have a reason text.
+ jchris_user_doc =
+ prepare_user_doc([
+ {:name, "jchris@apache.org"},
+ {:password, "funnybone"}
+ ])
+ save_as(
+ @users_db_name,
+ jchris_user_doc,
+ use_session: session,
+ expect_response: 400,
+ error_message: "bad_request",
+ error_reason: "Password does not conform to requirements."
+ )
+
+ # With password that match the first requirement.
+ # Requirement does have a reason text.
+ jchris_user_doc2 = Map.put(jchris_user_doc, "password", "funnnnnybone")
+ save_as(
+ @users_db_name,
+ jchris_user_doc2,
+ use_session: session,
+ expect_response: 400,
+ error_message: "bad_request",
+ error_reason: "Password does not conform to requirements. Requirement 2."
+ )
+
+ # With password that match the first two requirements.
+ # Requirement does have an empty string as reason text.
+ jchris_user_doc3 = Map.put(jchris_user_doc, "password", "FUNNNNNYBONE")
+ save_as(
+ @users_db_name,
+ jchris_user_doc3,
+ use_session: session,
+ expect_response: 400,
+ error_message: "bad_request",
+ error_reason: "Password does not conform to requirements."
+ )
+
+ # With password that match the first three requirements.
+ # Requirement does have a reason text.
+ jchris_user_doc4 = Map.put(jchris_user_doc, "password", "funnnnnyBONE")
+ save_as(
+ @users_db_name,
+ jchris_user_doc4,
+ use_session: session,
+ expect_response: 400,
+ error_message: "bad_request",
+ error_reason: "Password does not conform to requirements. Req 4."
+ )
+
+ # With password that match all but the last requirements.
+ # Requirement does have a reason text.
+ jchris_user_doc5 = Map.put(jchris_user_doc, "password", "funnnnnyB0N3")
+ save_as(
+ @users_db_name,
+ jchris_user_doc5,
+ use_session: session,
+ expect_response: 400,
+ error_message: "bad_request",
+ error_reason: "Password does not conform to requirements."
+ )
+
+ # With password that match all requirements.
+ jchris_user_doc6 = Map.put(jchris_user_doc, "password", "funnnnnyB0N3!")
+ save_as(@users_db_name, jchris_user_doc6, use_session: session, expect_response: 201)
+
+ # with non list value
+ set_config({
+ "couch_httpd_auth",
+ "password_regexp",
+ "{{\".{10,}\"}}"
+ })
+
+ joe_user_doc =
+ prepare_user_doc([
+ {:name, "joe_erlang"},
+ {:password, "querty"}
+ ])
+
+ save_as(
+ @users_db_name,
+ joe_user_doc,
+ use_session: session,
+ expect_response: 403,
+ error_message: "forbidden",
+ error_reason: "Server cannot hash passwords at this time."
+ )
+
+ # Not correct syntax
+ set_config({
+ "couch_httpd_auth",
+ "password_regexp",
+ "[{\".{10,}\"]"
+ })
+
+ save_as(
+ @users_db_name,
+ joe_user_doc,
+ use_session: session,
+ expect_response: 403,
+ error_message: "forbidden",
+ error_reason: "Server cannot hash passwords at this time."
+ )
+ end
end
diff --git a/test/elixir/test/view_test.exs b/test/elixir/test/view_test.exs
index f768cef16..bfe28b573 100644
--- a/test/elixir/test/view_test.exs
+++ b/test/elixir/test/view_test.exs
@@ -141,4 +141,16 @@ defmodule ViewTest do
assert resp.status_code == 200
assert length(Map.get(resp, :body)["rows"]) == 1
end
+
+ test "POST with boolean parameter", context do
+ resp = Couch.post(
+ "/#{context[:db_name]}/_design/map/_view/some",
+ body: %{
+ :stable => true,
+ :update => true
+ }
+ )
+
+ assert resp.status_code == 200
+ end
end