summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.credo.exs1
-rw-r--r--.gitignore10
-rw-r--r--FDB_NOTES.md57
-rw-r--r--Makefile28
-rw-r--r--Makefile.win3
-rw-r--r--build-aux/Jenkinsfile.pr2
-rwxr-xr-xconfigure19
-rwxr-xr-xdev/run9
-rw-r--r--mix.exs6
-rw-r--r--mix.lock1
-rw-r--r--rebar.config.script31
-rw-r--r--rel/apps/couch_epi.config2
-rw-r--r--rel/files/eunit.config3
-rw-r--r--rel/files/eunit.ini9
-rw-r--r--rel/overlay/etc/default.ini211
-rw-r--r--rel/reltool.config12
-rw-r--r--src/aegis/rebar.config.script35
-rw-r--r--src/aegis/src/aegis.app.src.script53
-rw-r--r--src/aegis/src/aegis.erl72
-rw-r--r--src/aegis/src/aegis.hrl57
-rw-r--r--src/aegis/src/aegis_app.erl26
-rw-r--r--src/aegis/src/aegis_key_manager.erl22
-rw-r--r--src/aegis/src/aegis_keywrap.erl97
-rw-r--r--src/aegis/src/aegis_noop_key_manager.erl31
-rw-r--r--src/aegis/src/aegis_server.erl421
-rw-r--r--src/aegis/src/aegis_sup.erl46
-rw-r--r--src/aegis/test/aegis_basic_test.erl17
-rw-r--r--src/aegis/test/aegis_server_test.erl314
-rw-r--r--src/chttpd/src/chttpd.app.src1
-rw-r--r--src/chttpd/src/chttpd.erl240
-rw-r--r--src/chttpd/src/chttpd_app.erl4
-rw-r--r--src/chttpd/src/chttpd_auth_cache.erl86
-rw-r--r--src/chttpd/src/chttpd_auth_request.erl8
-rw-r--r--src/chttpd/src/chttpd_changes.erl760
-rw-r--r--src/chttpd/src/chttpd_db.erl809
-rw-r--r--src/chttpd/src/chttpd_external.erl35
-rw-r--r--src/chttpd/src/chttpd_handlers.erl23
-rw-r--r--src/chttpd/src/chttpd_httpd_handlers.erl482
-rw-r--r--src/chttpd/src/chttpd_misc.erl192
-rw-r--r--src/chttpd/src/chttpd_node.erl4
-rw-r--r--src/chttpd/src/chttpd_show.erl5
-rw-r--r--src/chttpd/src/chttpd_stats.erl96
-rw-r--r--src/chttpd/src/chttpd_sup.erl16
-rw-r--r--src/chttpd/src/chttpd_test_util.erl2
-rw-r--r--src/chttpd/src/chttpd_view.erl189
-rw-r--r--src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl31
-rw-r--r--src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl30
-rw-r--r--src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl44
-rw-r--r--src/chttpd/test/eunit/chttpd_db_test.erl50
-rw-r--r--src/chttpd/test/eunit/chttpd_dbs_info_test.erl13
-rw-r--r--src/chttpd/test/eunit/chttpd_delayed_test.erl73
-rw-r--r--src/chttpd/test/eunit/chttpd_deleted_dbs_test.erl234
-rw-r--r--src/chttpd/test/eunit/chttpd_handlers_tests.erl2
-rw-r--r--src/chttpd/test/eunit/chttpd_open_revs_error_test.erl112
-rw-r--r--src/chttpd/test/eunit/chttpd_purge_tests.erl6
-rw-r--r--src/chttpd/test/eunit/chttpd_security_tests.erl57
-rw-r--r--src/chttpd/test/eunit/chttpd_session_tests.erl74
-rw-r--r--src/chttpd/test/eunit/chttpd_stats_tests.erl77
-rw-r--r--src/chttpd/test/eunit/chttpd_test.hrl35
-rw-r--r--src/chttpd/test/eunit/chttpd_view_test.erl4
-rw-r--r--src/chttpd/test/exunit/pagination_test.exs1393
-rw-r--r--src/chttpd/test/exunit/test_helper.exs2
-rw-r--r--src/chttpd/test/exunit/tracing_test.exs101
-rw-r--r--src/couch/.gitignore2
-rw-r--r--src/couch/include/couch_db.hrl3
-rw-r--r--src/couch/priv/icu_driver/couch_icu_driver.c74
-rw-r--r--src/couch/priv/stats_descriptions.cfg4
-rw-r--r--src/couch/rebar.config.script9
-rw-r--r--src/couch/src/couch_att.erl1077
-rw-r--r--src/couch/src/couch_db.erl2
-rw-r--r--src/couch/src/couch_doc.erl11
-rw-r--r--src/couch/src/couch_httpd_auth.erl3
-rw-r--r--src/couch/src/couch_lru.erl5
-rw-r--r--src/couch/src/couch_multidb_changes.erl14
-rw-r--r--src/couch/src/couch_native_process.erl17
-rw-r--r--src/couch/src/couch_proc_manager.erl17
-rw-r--r--src/couch/src/couch_server.erl12
-rw-r--r--src/couch/src/couch_stream.erl16
-rw-r--r--src/couch/src/couch_util.erl50
-rw-r--r--src/couch/src/couch_work_queue.erl25
-rw-r--r--src/couch/test/eunit/couch_util_tests.erl140
-rw-r--r--src/couch_epi/test/eunit/couch_epi_tests.erl3
-rw-r--r--src/couch_eval/README.md5
-rw-r--r--src/couch_eval/rebar.config (renamed from src/mango/src/mango_idx_view.hrl)5
-rw-r--r--src/couch_eval/src/couch_eval.app.src23
-rw-r--r--src/couch_eval/src/couch_eval.erl100
-rw-r--r--src/couch_expiring_cache/README.md71
-rw-r--r--src/couch_expiring_cache/include/couch_expiring_cache.hrl17
-rw-r--r--src/couch_expiring_cache/rebar.config14
-rw-r--r--src/couch_expiring_cache/src/couch_expiring_cache.app.src27
-rw-r--r--src/couch_expiring_cache/src/couch_expiring_cache.erl56
-rw-r--r--src/couch_expiring_cache/src/couch_expiring_cache_fdb.erl155
-rw-r--r--src/couch_expiring_cache/src/couch_expiring_cache_server.erl123
-rw-r--r--src/couch_expiring_cache/test/couch_expiring_cache_tests.erl147
-rw-r--r--src/couch_index/src/couch_index.erl19
-rw-r--r--src/couch_jobs/.gitignore4
-rw-r--r--src/couch_jobs/README.md62
-rw-r--r--src/couch_jobs/rebar.config14
-rw-r--r--src/couch_jobs/src/couch_jobs.app.src31
-rw-r--r--src/couch_jobs/src/couch_jobs.erl423
-rw-r--r--src/couch_jobs/src/couch_jobs.hrl52
-rw-r--r--src/couch_jobs/src/couch_jobs_activity_monitor.erl140
-rw-r--r--src/couch_jobs/src/couch_jobs_activity_monitor_sup.erl64
-rw-r--r--src/couch_jobs/src/couch_jobs_app.erl26
-rw-r--r--src/couch_jobs/src/couch_jobs_fdb.erl725
-rw-r--r--src/couch_jobs/src/couch_jobs_notifier.erl314
-rw-r--r--src/couch_jobs/src/couch_jobs_notifier_sup.erl64
-rw-r--r--src/couch_jobs/src/couch_jobs_pending.erl143
-rw-r--r--src/couch_jobs/src/couch_jobs_server.erl193
-rw-r--r--src/couch_jobs/src/couch_jobs_sup.erl66
-rw-r--r--src/couch_jobs/src/couch_jobs_type_monitor.erl84
-rw-r--r--src/couch_jobs/test/couch_jobs_tests.erl762
-rw-r--r--src/couch_js/README.md6
-rw-r--r--src/couch_js/src/couch_js.app.src27
-rw-r--r--src/couch_js/src/couch_js.erl51
-rw-r--r--src/couch_js/src/couch_js_app.erl31
-rw-r--r--src/couch_js/src/couch_js_io_logger.erl107
-rw-r--r--src/couch_js/src/couch_js_native_process.erl468
-rw-r--r--src/couch_js/src/couch_js_os_process.erl265
-rw-r--r--src/couch_js/src/couch_js_proc_manager.erl615
-rw-r--r--src/couch_js/src/couch_js_query_servers.erl683
-rw-r--r--src/couch_js/src/couch_js_sup.erl45
-rw-r--r--src/couch_js/test/couch_js_proc_manager_tests.erl373
-rw-r--r--src/couch_js/test/couch_js_query_servers_tests.erl96
-rw-r--r--src/couch_log/src/couch_log_config.erl11
-rw-r--r--src/couch_log/src/couch_log_config_dyn.erl3
-rw-r--r--src/couch_log/src/couch_log_formatter.erl24
-rw-r--r--src/couch_log/src/couch_log_sup.erl2
-rw-r--r--src/couch_log/test/eunit/couch_log_config_test.erl37
-rw-r--r--src/couch_log/test/eunit/couch_log_formatter_test.erl114
-rw-r--r--src/couch_mrview/include/couch_mrview.hrl8
-rw-r--r--src/couch_mrview/src/couch_mrview.erl26
-rw-r--r--src/couch_mrview/src/couch_mrview_http.erl24
-rw-r--r--src/couch_mrview/src/couch_mrview_index.erl12
-rw-r--r--src/couch_mrview/src/couch_mrview_util.erl2
-rw-r--r--src/couch_peruser/src/couch_peruser.erl13
-rw-r--r--src/couch_rate/README.md155
-rw-r--r--src/couch_rate/src/couch_rate.app.src24
-rw-r--r--src/couch_rate/src/couch_rate.erl318
-rw-r--r--src/couch_rate/src/couch_rate.hrl19
-rw-r--r--src/couch_rate/src/couch_rate_app.erl28
-rw-r--r--src/couch_rate/src/couch_rate_config.erl66
-rw-r--r--src/couch_rate/src/couch_rate_ets.erl119
-rw-r--r--src/couch_rate/src/couch_rate_limiter.erl392
-rw-r--r--src/couch_rate/src/couch_rate_pd.erl90
-rw-r--r--src/couch_rate/src/couch_rate_sup.erl36
-rw-r--r--src/couch_rate/test/exunit/couch_rate_config_test.exs88
-rw-r--r--src/couch_rate/test/exunit/couch_rate_limiter_test.exs350
-rw-r--r--src/couch_rate/test/exunit/test_helper.exs14
-rw-r--r--src/couch_replicator/src/couch_replicator_auth_session.erl2
-rw-r--r--src/couch_replicator/src/couch_replicator_httpc_pool.erl14
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler.erl3
-rw-r--r--src/couch_stats/src/couch_stats_aggregator.erl17
-rw-r--r--src/couch_views/.gitignore19
-rw-r--r--src/couch_views/README.md48
-rw-r--r--src/couch_views/include/couch_views.hrl33
-rw-r--r--src/couch_views/rebar.config14
-rw-r--r--src/couch_views/src/couch_views.app.src34
-rw-r--r--src/couch_views/src/couch_views.erl236
-rw-r--r--src/couch_views/src/couch_views_app.erl31
-rw-r--r--src/couch_views/src/couch_views_ddoc.erl42
-rw-r--r--src/couch_views/src/couch_views_encoding.erl117
-rw-r--r--src/couch_views/src/couch_views_epi.erl60
-rw-r--r--src/couch_views/src/couch_views_fabric2_plugin.erl24
-rw-r--r--src/couch_views/src/couch_views_fdb.erl475
-rw-r--r--src/couch_views/src/couch_views_http.erl359
-rw-r--r--src/couch_views/src/couch_views_indexer.erl575
-rw-r--r--src/couch_views/src/couch_views_jobs.erl163
-rw-r--r--src/couch_views/src/couch_views_plugin.erl40
-rw-r--r--src/couch_views/src/couch_views_reader.erl216
-rw-r--r--src/couch_views/src/couch_views_server.erl176
-rw-r--r--src/couch_views/src/couch_views_sup.erl66
-rw-r--r--src/couch_views/src/couch_views_updater.erl110
-rw-r--r--src/couch_views/src/couch_views_util.erl305
-rw-r--r--src/couch_views/test/couch_views_active_tasks_test.erl168
-rw-r--r--src/couch_views/test/couch_views_cleanup_test.erl411
-rw-r--r--src/couch_views/test/couch_views_encoding_test.erl117
-rw-r--r--src/couch_views/test/couch_views_indexer_test.erl754
-rw-r--r--src/couch_views/test/couch_views_info_test.erl174
-rw-r--r--src/couch_views/test/couch_views_map_test.erl610
-rw-r--r--src/couch_views/test/couch_views_server_test.erl218
-rw-r--r--src/couch_views/test/couch_views_size_test.erl564
-rw-r--r--src/couch_views/test/couch_views_trace_index_test.erl145
-rw-r--r--src/couch_views/test/couch_views_updater_test.erl240
-rw-r--r--src/ctrace/README.md308
-rw-r--r--src/ctrace/rebar.config14
-rw-r--r--src/ctrace/src/ctrace.app.src27
-rw-r--r--src/ctrace/src/ctrace.erl361
-rw-r--r--src/ctrace/src/ctrace.hrl15
-rw-r--r--src/ctrace/src/ctrace_app.erl26
-rw-r--r--src/ctrace/src/ctrace_config.erl153
-rw-r--r--src/ctrace/src/ctrace_dsl.erl106
-rw-r--r--src/ctrace/src/ctrace_sup.erl41
-rw-r--r--src/ctrace/test/ctrace_config_test.erl153
-rw-r--r--src/ctrace/test/ctrace_dsl_test.erl123
-rw-r--r--src/ctrace/test/ctrace_test.erl412
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry.erl21
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl2
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl2
-rw-r--r--src/dreyfus/src/dreyfus_index.erl26
-rw-r--r--src/ebtree/.gitignore3
-rw-r--r--src/ebtree/README.md19
-rw-r--r--src/ebtree/rebar.config17
-rw-r--r--src/ebtree/src/ebtree.app.src27
-rw-r--r--src/ebtree/src/ebtree.erl1803
-rw-r--r--src/fabric/include/fabric2.hrl87
-rw-r--r--src/fabric/src/fabric.app.src11
-rw-r--r--src/fabric/src/fabric.erl100
-rw-r--r--src/fabric/src/fabric2_active_tasks.erl52
-rw-r--r--src/fabric/src/fabric2_app.erl32
-rw-r--r--src/fabric/src/fabric2_db.erl2354
-rw-r--r--src/fabric/src/fabric2_db_expiration.erl246
-rw-r--r--src/fabric/src/fabric2_db_plugin.erl102
-rw-r--r--src/fabric/src/fabric2_epi.erl48
-rw-r--r--src/fabric/src/fabric2_events.erl102
-rw-r--r--src/fabric/src/fabric2_fdb.erl2082
-rw-r--r--src/fabric/src/fabric2_index.erl241
-rw-r--r--src/fabric/src/fabric2_node_types.erl52
-rw-r--r--src/fabric/src/fabric2_server.erl276
-rw-r--r--src/fabric/src/fabric2_sup.erl69
-rw-r--r--src/fabric/src/fabric2_txids.erl153
-rw-r--r--src/fabric/src/fabric2_users_db.erl144
-rw-r--r--src/fabric/src/fabric2_util.erl405
-rw-r--r--src/fabric/src/fabric_db_create.erl82
-rw-r--r--src/fabric/src/fabric_db_info.erl62
-rw-r--r--src/fabric/src/fabric_doc_open.erl852
-rw-r--r--src/fabric/src/fabric_doc_open_revs.erl968
-rw-r--r--src/fabric/src/fabric_doc_purge.erl690
-rw-r--r--src/fabric/src/fabric_doc_update.erl310
-rw-r--r--src/fabric/src/fabric_rpc.erl38
-rw-r--r--src/fabric/src/fabric_streams.erl157
-rw-r--r--src/fabric/src/fabric_util.erl48
-rw-r--r--src/fabric/src/fabric_view.erl126
-rw-r--r--src/fabric/src/fabric_view_changes.erl362
-rw-r--r--src/fabric/test/eunit/fabric_rpc_purge_tests.erl307
-rw-r--r--src/fabric/test/fabric2_active_tasks_tests.erl120
-rw-r--r--src/fabric/test/fabric2_changes_fold_tests.erl241
-rw-r--r--src/fabric/test/fabric2_db_crud_tests.erl750
-rw-r--r--src/fabric/test/fabric2_db_fold_doc_docids_tests.erl150
-rw-r--r--src/fabric/test/fabric2_db_misc_tests.erl445
-rw-r--r--src/fabric/test/fabric2_db_security_tests.erl219
-rw-r--r--src/fabric/test/fabric2_db_size_tests.erl918
-rw-r--r--src/fabric/test/fabric2_dir_prefix_tests.erl71
-rw-r--r--src/fabric/test/fabric2_doc_att_tests.erl331
-rw-r--r--src/fabric/test/fabric2_doc_count_tests.erl278
-rw-r--r--src/fabric/test/fabric2_doc_crud_tests.erl1018
-rw-r--r--src/fabric/test/fabric2_doc_fold_tests.erl378
-rw-r--r--src/fabric/test/fabric2_doc_size_tests.erl320
-rw-r--r--src/fabric/test/fabric2_fdb_tx_retry_tests.erl176
-rw-r--r--src/fabric/test/fabric2_get_design_docs_tests.erl138
-rw-r--r--src/fabric/test/fabric2_index_tests.erl304
-rw-r--r--src/fabric/test/fabric2_local_doc_fold_tests.erl295
-rw-r--r--src/fabric/test/fabric2_node_types_tests.erl66
-rw-r--r--src/fabric/test/fabric2_rev_stemming.erl205
-rw-r--r--src/fabric/test/fabric2_test.hrl33
-rw-r--r--src/fabric/test/fabric2_test_util.erl76
-rw-r--r--src/fabric/test/fabric2_trace_db_create_tests.erl47
-rw-r--r--src/fabric/test/fabric2_trace_db_delete_tests.erl50
-rw-r--r--src/fabric/test/fabric2_trace_db_open_tests.erl51
-rw-r--r--src/fabric/test/fabric2_trace_doc_create_tests.erl87
-rw-r--r--src/fabric/test/fabric2_tx_options_tests.erl103
-rw-r--r--src/fabric/test/fabric2_update_docs_tests.erl208
-rw-r--r--src/global_changes/src/global_changes_httpd_handlers.erl8
-rw-r--r--src/global_changes/src/global_changes_server.erl11
-rw-r--r--src/ken/src/ken_server.erl16
-rw-r--r--src/mango/src/mango_crud.erl35
-rw-r--r--src/mango/src/mango_cursor.erl10
-rw-r--r--src/mango/src/mango_cursor_text.erl9
-rw-r--r--src/mango/src/mango_cursor_view.erl183
-rw-r--r--src/mango/src/mango_epi.erl4
-rw-r--r--src/mango/src/mango_error.erl14
-rw-r--r--src/mango/src/mango_eval.erl115
-rw-r--r--src/mango/src/mango_execution_stats.erl8
-rw-r--r--src/mango/src/mango_execution_stats.hrl1
-rw-r--r--src/mango/src/mango_httpd.erl166
-rw-r--r--src/mango/src/mango_httpd_handlers.erl31
-rw-r--r--src/mango/src/mango_idx.erl207
-rw-r--r--src/mango/src/mango_idx.hrl5
-rw-r--r--src/mango/src/mango_idx_special.erl4
-rw-r--r--src/mango/src/mango_idx_text.erl1
-rw-r--r--src/mango/src/mango_idx_view.erl28
-rw-r--r--src/mango/src/mango_native_proc.erl378
-rw-r--r--src/mango/src/mango_opts.erl30
-rw-r--r--src/mango/src/mango_plugin.erl46
-rw-r--r--src/mango/src/mango_util.erl58
-rw-r--r--src/mango/test/01-index-crud-test.py15
-rw-r--r--src/mango/test/02-basic-find-test.py15
-rw-r--r--src/mango/test/05-index-selection-test.py7
-rw-r--r--src/mango/test/12-use-correct-index-test.py38
-rw-r--r--src/mango/test/13-stable-update-test.py51
-rw-r--r--src/mango/test/13-users-db-find-test.py7
-rw-r--r--src/mango/test/15-execution-stats-test.py7
-rw-r--r--src/mango/test/16-index-selectors-test.py8
-rw-r--r--src/mango/test/17-multi-type-value-test.py4
-rw-r--r--src/mango/test/19-find-conflicts.py7
-rw-r--r--src/mango/test/20-no-timeout-test.py32
-rw-r--r--src/mango/test/22-build-wait-selected-index.py50
-rw-r--r--src/mango/test/mango.py26
-rw-r--r--src/mango/test/user_docs.py7
-rw-r--r--src/mem3/src/mem3_httpd_handlers.erl38
-rw-r--r--src/mem3/src/mem3_reshard.erl5
-rw-r--r--src/setup/src/setup.erl2
-rw-r--r--src/setup/src/setup_httpd.erl17
-rw-r--r--src/setup/src/setup_httpd_handlers.erl12
-rw-r--r--test/elixir/lib/couch.ex54
-rwxr-xr-xtest/elixir/run-only3
-rw-r--r--test/elixir/test/all_docs_test.exs250
-rw-r--r--test/elixir/test/auth_cache_test.exs2
-rw-r--r--test/elixir/test/basics_test.exs227
-rw-r--r--test/elixir/test/map_test.exs627
-rw-r--r--test/elixir/test/partition_all_docs_test.exs204
-rw-r--r--test/elixir/test/partition_crud_test.exs369
-rw-r--r--test/elixir/test/partition_ddoc_test.exs179
-rw-r--r--test/elixir/test/partition_design_docs_test.exs16
-rw-r--r--test/elixir/test/partition_helpers.exs76
-rw-r--r--test/elixir/test/partition_mango_test.exs683
-rw-r--r--test/elixir/test/partition_size_test.exs361
-rw-r--r--test/elixir/test/partition_view_test.exs374
-rw-r--r--test/elixir/test/partition_view_update_test.exs160
-rw-r--r--test/elixir/test/replication_test.exs151
-rw-r--r--test/elixir/test/reshard_all_docs_test.exs79
-rw-r--r--test/elixir/test/reshard_basic_test.exs174
-rw-r--r--test/elixir/test/reshard_changes_feed.exs81
-rw-r--r--test/elixir/test/reshard_helpers.exs114
-rw-r--r--test/elixir/test/security_validation_test.exs4
-rw-r--r--test/elixir/test/test_helper.exs2
-rw-r--r--test/elixir/test/view_collation_test.exs28
327 files changed, 43069 insertions, 7780 deletions
diff --git a/.credo.exs b/.credo.exs
index 64d281e5e..e27ad06ad 100644
--- a/.credo.exs
+++ b/.credo.exs
@@ -37,6 +37,7 @@
~r"/src/metrics",
~r"/src/minerl",
~r"/src/parse_trans",
+ ~r"/src/stream_data",
~r"/src/ssl_verify_fun",
~r"/test/elixir/deps/"
]
diff --git a/.gitignore b/.gitignore
index 6223d7322..ead569d1d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,6 +11,8 @@
.DS_Store
.vscode
.rebar/
+.rebar3/
+.erlfdb/
.eunit/
cover/
core
@@ -28,6 +30,7 @@ ebin/
erl_crash.dump
erln8.config
install.mk
+rebar.config
rel/*.config
rel/couchdb
rel/dev*
@@ -48,6 +51,7 @@ src/couch/priv/icu_driver/couch_icu_driver.d
src/mango/src/mango_cursor_text.nocompile
src/docs/
src/emilio/
+src/erlfdb/
src/ets_lru/
src/excoveralls/
src/fauxton/
@@ -58,21 +62,27 @@ src/hyper/
src/ibrowse/
src/idna/
src/ioq/
+src/hqueue/
+src/jaeger_passage/
src/jiffy/
src/ken/
src/khash/
+src/local/
src/meck/
src/metrics/
src/mimerl/
src/mochiweb/
src/oauth/
src/parse_trans/
+src/passage/
src/proper/
src/rebar/
src/recon/
src/smoosh/
src/snappy/
+src/stream_data/
src/ssl_verify_fun/
+src/thrift_protocol/
src/triq/
src/unicode_util_compat/
tmp/
diff --git a/FDB_NOTES.md b/FDB_NOTES.md
new file mode 100644
index 000000000..c0cdc8cc2
--- /dev/null
+++ b/FDB_NOTES.md
@@ -0,0 +1,57 @@
+Things of Note
+===
+
+
+1. If a replication sends us two revisions A and B where one is an
+ ancestor of the other, we likely have divergent behavior. However,
+ this should never happen In Theory.
+
+2. Multiple updates to the same document in a _bulk_docs (or if they
+ just happen to be in the same update batch in non-fdb CouchDB)
+ we likely have subtly different behavior.
+
+3. I'm relying on repeated reads in an fdb transaction to be "cheap"
+ in that the reads would be cached in the fdb_transaction object.
+ This needs to be checked for certainty but that appeared to
+ be how things behaved in testing.
+
+4. When attempting to create a doc from scratch in an interacitve_edit
+ update, with revisions specified *and* attachment stubs, the reported
+ error is now a conflict. Previously the missing_stubs error was
+ raised earlier.
+
+5. There may be a difference in behavior if a) there are no VDU functions
+ set on a db and no design documents in a batch. This is because in
+ this situation we don't run the prep_and_validate code on pre-fdb
+ CouchDB. The new code always checks stubs before merging revision trees.
+ I'm sure the old way would fail somehow, but it would fail further on
+ which means we may have failed with a different reason (conflict, etc)
+ before we got to the next place we check for missing stubs.
+
+6. For multi-doc updates we'll need to investigate user versions on
+ versionstamps within a transaction. Also this likely prevents the
+ ability to have multiple updates to the same doc in a single
+ _bulk_docs transaction
+
+7. Document body storage needs to be implemented beyond the single
+ key/value approach.
+
+8. We'll want to look at how we currently apply open options to individual
+ elements of an open_revs call. Might turn out that we have to grab a
+ full FDI even if we could look up a rev directly. (i.e., revs_info
+ would require us having the entire FDI, however it'd be wasteful to return
+ all of that in an open_revs call, but bug compatibility ftw!)
+
+9. Is it possible that a server_admin can delete a db without being able
+ to open it? If so that's probably changed behavior.
+
+10. All docs on large active databases might be a thing getting the doc
+ count. If we allow range requests up to 5s, and we continue to return
+ the doc count total we may have to play games with snapshot reads on
+ the doc count key or else it'll whack any _all_docs range requests
+
+11. Revision infos need to track their size f we want to maintain a database
+ size counter we'll want to store the size of a given doc body for each
+ revision so that we don't have to read the old body when updating the tree.
+
+12. Update sequences do not yet include an incarnation value. \ No newline at end of file
diff --git a/Makefile b/Makefile
index 53cea3bc8..6812e0c37 100644
--- a/Makefile
+++ b/Makefile
@@ -144,9 +144,11 @@ fauxton: share/www
################################################################################
-.PHONY: check
+# When we can run all the tests with FDB switch this back to be the default
+# "make check" command
+.PHONY: check-all-tests
# target: check - Test everything
-check: all python-black
+check-all-tests: all python-black
@$(MAKE) emilio
@$(MAKE) eunit
@$(MAKE) javascript
@@ -159,6 +161,14 @@ else
subdirs=$(shell ls src)
endif
+.PHONY: check
+check: all
+ @$(MAKE) emilio
+ make eunit apps=couch_eval,couch_expiring_cache,ctrace,couch_jobs,couch_views,fabric,mango,chttpd
+ make elixir tests=test/elixir/test/basics_test.exs,test/elixir/test/replication_test.exs,test/elixir/test/map_test.exs,test/elixir/test/all_docs_test.exs,test/elixir/test/bulk_docs_test.exs
+ make exunit apps=couch_rate,chttpd
+ make mango-test
+
.PHONY: eunit
# target: eunit - Run EUnit tests, use EUNIT_OPTS to provide custom options
eunit: export BUILDDIR = $(shell pwd)
@@ -179,6 +189,7 @@ exunit: export MIX_ENV=test
exunit: export ERL_LIBS = $(shell pwd)/src
exunit: export ERL_AFLAGS = -config $(shell pwd)/rel/files/eunit.config
exunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell pwd)/bin/couchjs $(shell pwd)/share/server/main.js
+exunit: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
exunit: couch elixir-init setup-eunit elixir-check-formatted elixir-credo
@mix test --cover --trace $(EXUNIT_OPTS)
@@ -200,7 +211,7 @@ soak-eunit: couch
while [ $$? -eq 0 ] ; do $(REBAR) -r eunit $(EUNIT_OPTS) ; done
emilio:
- @bin/emilio -c emilio.config src/ | bin/warnings_in_scope -s 3
+ @bin/emilio -c emilio.config src/ | bin/warnings_in_scope -s 3 || exit 0
.venv/bin/black:
@python3 -m venv .venv
@@ -227,10 +238,11 @@ python-black-update: .venv/bin/black
elixir: export MIX_ENV=integration
elixir: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
elixir: elixir-init elixir-check-formatted elixir-credo devclean
- @dev/run "$(TEST_OPTS)" -a adm:pass -n 1 \
- --enable-erlang-views \
- --locald-config test/elixir/test/config/test-config.ini \
- --no-eval 'mix test --trace --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
+ @dev/run "$(TEST_OPTS)" -a adm:pass -n 1 --enable-erlang-views --no-eval --erlang-config=rel/files/eunit.config 'mix test --trace --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
+
+.PHONY: elixir-only
+elixir-only: devclean
+ @dev/run "$(TEST_OPTS)" -a adm:pass -n 1 --enable-erlang-views --no-eval --erlang-config=rel/files/eunit.config 'mix test --trace --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
.PHONY: elixir-init
elixir-init: MIX_ENV=test
@@ -348,7 +360,7 @@ mango-test: devclean all
@cd src/mango && \
python3 -m venv .venv && \
.venv/bin/python3 -m pip install -r requirements.txt
- @cd src/mango && ../../dev/run "$(TEST_OPTS)" -n 1 --admin=testuser:testpass '.venv/bin/python3 -m nose --with-xunit'
+ @cd src/mango && ../../dev/run "$(TEST_OPTS)" -n 1 --admin=adm:pass --erlang-config=rel/files/eunit.config '.venv/bin/python3 -m nose -v --with-xunit'
################################################################################
# Developing
diff --git a/Makefile.win b/Makefile.win
index 7e14a53cc..edfc1bf3b 100644
--- a/Makefile.win
+++ b/Makefile.win
@@ -163,6 +163,7 @@ exunit: export MIX_ENV=test
exunit: export ERL_LIBS = $(shell echo %cd%)\src
exunit: export ERL_AFLAGS = -config $(shell echo %cd%)/rel/files/eunit.config
exunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell echo %cd%)/bin/couchjs $(shell echo %cd%)/share/server/main.js
+exunit: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
exunit: couch elixir-init setup-eunit elixir-check-formatted elixir-credo
@mix test --cover --trace $(EXUNIT_OPTS)
@@ -177,7 +178,7 @@ just-eunit:
@$(REBAR) -r eunit $(EUNIT_OPTS)
emilio:
- @bin\emilio -c emilio.config src\ | python.exe bin\warnings_in_scope -s 3
+ @bin\emilio -c emilio.config src\ | python.exe bin\warnings_in_scope -s 3 || exit 0
.venv/bin/black:
@python.exe -m venv .venv
diff --git a/build-aux/Jenkinsfile.pr b/build-aux/Jenkinsfile.pr
index 8c9cbd930..ca548ff27 100644
--- a/build-aux/Jenkinsfile.pr
+++ b/build-aux/Jenkinsfile.pr
@@ -21,7 +21,7 @@ cd build
tar -xf ${WORKSPACE}/apache-couchdb-*.tar.gz
cd apache-couchdb-*
. /usr/local/kerl/${KERL_VER}/activate
-./configure --with-curl --spidermonkey-version 60
+./configure --with-curl
make check || (make build-report && false)
'''
diff --git a/configure b/configure
index 854366c8a..b91b18da7 100755
--- a/configure
+++ b/configure
@@ -96,6 +96,24 @@ parse_opts() {
continue
;;
+ --key-manager)
+ if [ -n "$2" ]; then
+ eval AEGIS_KEY_MANAGER=$2
+ shift 2
+ continue
+ else
+ printf 'ERROR: "--key-manager" requires a non-empty argument.\n' >&2
+ exit 1
+ fi
+ ;;
+ --key-manager=?*)
+ eval AEGIS_KEY_MANAGER=${1#*=}
+ ;;
+ --key-manager=)
+ printf 'ERROR: "--key-manager" requires a non-empty argument.\n' >&2
+ exit 1
+ ;;
+
--dev)
WITH_DOCS=0
WITH_FAUXTON=0
@@ -241,6 +259,7 @@ cat > $rootdir/config.erl << EOF
{with_curl, $WITH_CURL}.
{with_proper, $WITH_PROPER}.
{erlang_md5, $ERLANG_MD5}.
+{aegis_key_manager, "$AEGIS_KEY_MANAGER"}.
{spidermonkey_version, "$SM_VSN"}.
EOF
diff --git a/dev/run b/dev/run
index 573c80c9b..96b6b5ccf 100755
--- a/dev/run
+++ b/dev/run
@@ -186,6 +186,12 @@ def get_args_parser():
help="Optional key=val config overrides. Can be repeated",
)
parser.add_option(
+ "--erlang-config",
+ dest="erlang_config",
+ default="rel/files/sys.config",
+ help="Specify an alternative Erlang application configuration"
+ )
+ parser.add_option(
"--degrade-cluster",
dest="degrade_cluster",
type=int,
@@ -241,6 +247,7 @@ def setup_context(opts, args):
"haproxy": opts.haproxy,
"haproxy_port": opts.haproxy_port,
"config_overrides": opts.config_overrides,
+ "erlang_config": opts.erlang_config,
"no_eval": opts.no_eval,
"extra_args": opts.extra_args,
"reset_logs": True,
@@ -602,7 +609,7 @@ def boot_node(ctx, node):
"-args_file",
os.path.join(node_etcdir, "vm.args"),
"-config",
- os.path.join(reldir, "files", "sys"),
+ os.path.join(ctx["rootdir"], ctx["erlang_config"]),
"-couch_ini",
os.path.join(node_etcdir, "default.ini"),
os.path.join(node_etcdir, "local.ini"),
diff --git a/mix.exs b/mix.exs
index ae42af5d6..8e930abbc 100644
--- a/mix.exs
+++ b/mix.exs
@@ -49,11 +49,14 @@ defmodule CouchDBTest.Mixfile do
# Run "mix help compile.app" to learn about applications.
def application do
[
- extra_applications: [:logger],
+ extra_applications: extra_applications(Mix.env()),
applications: [:httpotion]
]
end
+ defp extra_applications(:test), do: [:logger, :stream_data]
+ defp extra_applications(_), do: [:logger]
+
# Specifies which paths to compile per environment.
defp elixirc_paths(:test), do: ["test/elixir/lib", "test/elixir/test/support"]
defp elixirc_paths(:integration), do: ["test/elixir/lib", "test/elixir/test/support"]
@@ -71,6 +74,7 @@ defmodule CouchDBTest.Mixfile do
{:ibrowse,
path: Path.expand("src/ibrowse", __DIR__), override: true, compile: false},
{:credo, "~> 1.4.0", only: [:dev, :test, :integration], runtime: false}
+ {:stream_data, "~> 0.4.3", only: [:dev, :test, :integration], runtime: false}
]
end
diff --git a/mix.lock b/mix.lock
index 8b6489f0c..4b2d70412 100644
--- a/mix.lock
+++ b/mix.lock
@@ -14,5 +14,6 @@
"mimerl": {:hex, :mimerl, "1.2.0", "67e2d3f571088d5cfd3e550c383094b47159f3eee8ffa08e64106cdf5e981be3", [:rebar3], [], "hexpm", "f278585650aa581986264638ebf698f8bb19df297f66ad91b18910dfc6e19323"},
"parse_trans": {:hex, :parse_trans, "3.3.0", "09765507a3c7590a784615cfd421d101aec25098d50b89d7aa1d66646bc571c1", [:rebar3], [], "hexpm", "17ef63abde837ad30680ea7f857dd9e7ced9476cdd7b0394432af4bfc241b960"},
"ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.5", "6eaf7ad16cb568bb01753dbbd7a95ff8b91c7979482b95f38443fe2c8852a79b", [:make, :mix, :rebar3], [], "hexpm", "13104d7897e38ed7f044c4de953a6c28597d1c952075eb2e328bc6d6f2bfc496"},
+ "stream_data": {:hex, :stream_data, "0.4.3", "62aafd870caff0849a5057a7ec270fad0eb86889f4d433b937d996de99e3db25", [:mix], [], "hexpm", "7dafd5a801f0bc897f74fcd414651632b77ca367a7ae4568778191fc3bf3a19a"},
"unicode_util_compat": {:hex, :unicode_util_compat, "0.4.1", "d869e4c68901dd9531385bb0c8c40444ebf624e60b6962d95952775cac5e90cd", [:rebar3], [], "hexpm", "1d1848c40487cdb0b30e8ed975e34e025860c02e419cb615d255849f3427439d"},
}
diff --git a/rebar.config.script b/rebar.config.script
index d8afc10e3..2a12c9df5 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -117,19 +117,27 @@ SubDirs = [
"src/couch_log",
"src/chttpd",
"src/couch",
+ "src/couch_eval",
"src/couch_event",
"src/mem3",
"src/couch_index",
"src/couch_mrview",
+ "src/couch_js",
+ "src/couch_rate",
"src/couch_replicator",
"src/couch_plugins",
"src/couch_pse_tests",
"src/couch_stats",
"src/couch_peruser",
"src/couch_tests",
+ "src/couch_views",
+ "src/ctrace",
"src/ddoc_cache",
"src/dreyfus",
"src/fabric",
+ "src/aegis",
+ "src/couch_jobs",
+ "src/couch_expiring_cache",
"src/global_changes",
"src/ioq",
"src/jwtf",
@@ -138,13 +146,15 @@ SubDirs = [
"src/rexi",
"src/setup",
"src/smoosh",
+ "src/ebtree",
"rel"
].
DepDescs = [
%% Independent Apps
-{config, "config", {tag, "2.1.7"}},
+{config, "config", {tag, "2.1.8"}},
{b64url, "b64url", {tag, "1.0.2"}},
+{erlfdb, "erlfdb", {tag, "v1.2.2"}},
{ets_lru, "ets-lru", {tag, "1.1.0"}},
{khash, "khash", {tag, "1.1.0"}},
{snappy, "snappy", {tag, "CouchDB-1.0.4"}},
@@ -157,12 +167,16 @@ DepDescs = [
%% Third party deps
{folsom, "folsom", {tag, "CouchDB-0.8.3"}},
{hyper, "hyper", {tag, "CouchDB-2.2.0-6"}},
-{ibrowse, "ibrowse", {tag, "CouchDB-4.0.1-1"}},
+{ibrowse, "ibrowse", {tag, "CouchDB-4.0.1-2"}},
+{jaeger_passage, "jaeger-passage", {tag, "CouchDB-0.1.14-1"}},
{jiffy, "jiffy", {tag, "CouchDB-1.0.4-1"}},
+{local, "local", {tag, "0.2.1"}},
{mochiweb, "mochiweb", {tag, "v2.20.0"}},
{meck, "meck", {tag, "0.8.8"}},
-{recon, "recon", {tag, "2.5.0"}}
-].
+{recon, "recon", {tag, "2.5.0"}},
+{passage, "passage", {tag, "0.2.6"}},
+{thrift_protocol, "thrift-protocol", {tag, "0.1.5"}}
+],
WithProper = lists:keyfind(with_proper, 1, CouchConfig) == {with_proper, true}.
@@ -212,6 +226,11 @@ AddConfig = [
{post_hooks, [{compile, "escript support/build_js.escript"}]}
].
-C = lists:foldl(fun({K, V}, CfgAcc) ->
- lists:keystore(K, 1, CfgAcc, {K, V})
+lists:foldl(fun({K, V}, CfgAcc) ->
+ case lists:keyfind(K, 1, CfgAcc) of
+ {K, Existent} when is_list(Existent) andalso is_list(V) ->
+ lists:keystore(K, 1, CfgAcc, {K, Existent ++ V});
+ false ->
+ lists:keystore(K, 1, CfgAcc, {K, V})
+ end
end, CONFIG, AddConfig).
diff --git a/rel/apps/couch_epi.config b/rel/apps/couch_epi.config
index a53721a48..d3711636f 100644
--- a/rel/apps/couch_epi.config
+++ b/rel/apps/couch_epi.config
@@ -12,8 +12,10 @@
{plugins, [
couch_db_epi,
+ fabric2_epi,
chttpd_epi,
couch_index_epi,
+ couch_views_epi,
dreyfus_epi,
global_changes_epi,
mango_epi,
diff --git a/rel/files/eunit.config b/rel/files/eunit.config
index 3c7457d3a..5e96fae9e 100644
--- a/rel/files/eunit.config
+++ b/rel/files/eunit.config
@@ -12,5 +12,6 @@
[
{kernel, [{error_logger, silent}]},
- {sasl, [{sasl_error_logger, false}]}
+ {sasl, [{sasl_error_logger, false}]},
+ {fabric, [{eunit_run, true}]}
].
diff --git a/rel/files/eunit.ini b/rel/files/eunit.ini
index 361ea6669..20277f288 100644
--- a/rel/files/eunit.ini
+++ b/rel/files/eunit.ini
@@ -35,4 +35,11 @@ level = info
[replicator]
; disable jitter to reduce test run times
-startup_jitter = 0 \ No newline at end of file
+startup_jitter = 0
+
+[fabric]
+; disable index auto-updater to avoid interfering with some of the tests
+index_updater_enabled = false
+
+[couch_rate.views]
+opts = #{budget => 100, target => 500, window => 6000, sensitivity => 200, congested_delay => 1} \ No newline at end of file
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index f3f12ca96..aef81c356 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -36,6 +36,13 @@ default_security = admin_only
; influenced directly with this setting - increase for faster processing at the
; expense of more memory usage.
changes_doc_ids_optimization_threshold = 100
+;
+; Maximum database name length. The default setting is chosen for CouchDB < 4.x
+; compatibility, where it was determined by the maximum file name size. On most
+; current file systems that is 255, and with timestamp and ".couch" extension
+; subtracted it ends up as 238.
+;max_database_name_length = 238
+;
; Maximum document ID length. Can be set to an integer or 'infinity'.
;max_document_id_length = infinity
;
@@ -48,6 +55,14 @@ changes_doc_ids_optimization_threshold = 100
; for size calculation instead of 7.
max_document_size = 8000000 ; bytes
;
+; Maximum number of documents in a _bulk_docs request. Anything larger
+; returns a 413 error for the whole request
+;max_bulk_docs_count = 10000
+;
+; Maximum number of documents in a _bulk_get request. Anything larger
+; returns a 413 error for the whole request
+;max_bulk_get_count = 10000
+;
; Maximum attachment size.
; max_attachment_size = infinity
;
@@ -130,6 +145,9 @@ prefer_minimal = Cache-Control, Content-Length, Content-Range, Content-Type, ETa
; _dbs_info in a request
max_db_number_for_dbs_info_req = 100
+; set to true to delay the start of a response until the end has been calculated
+;buffer_response = false
+
; authentication handlers
; authentication_handlers = {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
; uncomment the next line to enable proxy authentication
@@ -195,8 +213,8 @@ enable_xframe_options = false
; CouchDB can optionally enforce a maximum uri length;
; max_uri_length = 8000
; changes_timeout = 60000
-; config_whitelist =
-; max_uri_length =
+; config_whitelist =
+; max_uri_length =
; rewrite_limit = 100
; x_forwarded_host = X-Forwarded-Host
; x_forwarded_proto = X-Forwarded-Proto
@@ -205,7 +223,7 @@ enable_xframe_options = false
max_http_request_size = 4294967296 ; 4GB
; [httpd_design_handlers]
-; _view =
+; _view =
; [ioq]
; concurrency = 10
@@ -219,7 +237,7 @@ port = 6984
; [chttpd_auth_cache]
; max_lifetime = 600000
-; max_objects =
+; max_objects =
; max_size = 104857600
; [mem3]
@@ -230,7 +248,7 @@ port = 6984
; [fabric]
; all_docs_concurrency = 10
-; changes_duration =
+; changes_duration =
; shard_timeout_factor = 2
; uuid_prefix_len = 7
; request_timeout = 60000
@@ -238,6 +256,30 @@ port = 6984
; attachments_timeout = 60000
; view_timeout = 3600000
; partition_view_timeout = 3600000
+;
+; Custom FDB directory prefix. All the nodes of the same CouchDB instance
+; should have a matching directory prefix in order to read and write the same
+; data. Changes to this value take effect only on node start-up.
+;fdb_directory = couchdb
+;
+; Enable or disable index auto-updater
+;index_updater_enabled = true
+;
+; How long to wait from the first db update event until index building is
+; triggered.
+;index_updater_delay_msec = 60000
+;
+; How often to check if databases may need their indices updated.
+;index_updater_resolution_msec = 10000
+;
+; Enable or disable automatic stale index removal in the auto-updater
+;index_updater_remove_old_indices = false
+;
+; Byte size of binary chunks written to FDB values. Defaults to FDB max limit.
+;binary_chunk_size = 100000
+;
+; Bulk docs transaction batch size in bytes
+;update_docs_batch_size = 2500000
; [rexi]
; buffer_count = 2000
@@ -275,12 +317,26 @@ iterations = 10 ; iterations for password hashing
; proxy_use_secret = false
; comma-separated list of public fields, 404 if empty
; public_fields =
-; secret =
+; secret =
; users_db_public = false
; cookie_domain = example.com
; Set the SameSite cookie property for the auth cookie. If empty, the SameSite property is not set.
; same_site =
+; Settings for view indexing
+[couch_views]
+; Maximum acceptors waiting to accept view indexing jobs
+;max_acceptors = 5
+;
+; Maximum number of view indexing workers
+;max_workers = 100
+;
+; The maximum allowed key size emitted from a view for a document (in bytes)
+;key_size_limit = 8000
+;
+; The maximum allowed value size emitted from a view for a document (in bytes)
+;value_size_limit = 64000
+
; CSP (Content Security Policy) Support for _utils
[csp]
enable = true
@@ -344,6 +400,13 @@ os_process_limit = 100
;query_limit = 268435456
;partition_query_limit = 268435456
+[couch_eval.languages]
+; The list of modules that implement the couch_eval
+; beahvior for executing provided code in design
+; documents.
+javascript = couch_js
+query = mango_eval
+
[mango]
; Set to true to disable the "index all fields" text index, which can lead
; to out of memory issues when users have documents with nested array fields.
@@ -359,7 +422,7 @@ os_process_limit = 100
couch_mrview = true
[feature_flags]
-; This enables any database to be created as a partitioned databases (except system db's).
+; This enables any database to be created as a partitioned databases (except system db's).
; Setting this to false will stop the creation of paritioned databases.
; paritioned||allowed* = true will scope the creation of partitioned databases
; to databases with 'allowed' prefix.
@@ -602,7 +665,7 @@ compaction = false
; The default number of results returned from a search on a partition
; of a database.
; limit_partitions = 2000
-
+
; The maximum number of results that can be returned from a global
; search query (or any search query on a database without user-defined
; partitions). Attempts to set ?limit=N higher than this value will
@@ -626,3 +689,135 @@ compaction = false
;source_close_timeout_sec = 600
;require_node_param = false
;require_range_param = false
+
+[couch_jobs]
+;
+; Maximum jitter used when checking for active job timeouts
+;activity_monitor_max_jitter_msec = 10000
+;
+; Hold-off applied before notifying subscribers. Since active jobs can be
+; queried more effiently using a range read, increasing this value should make
+; notifications more performant, however, it would also increase notification
+; latency.
+;type_monitor_holdoff_msec = 50
+;
+; Timeout used when waiting for the job type notification watches. The default
+; value of "infinity" should work well in most cases.
+;type_monitor_timeout_msec = infinity
+;
+; How often to check for the presense of new job types.
+;type_check_period_msec = 15000
+;
+; Jitter applied when checking for new job types.
+;type_check_max_jitter_msec = 5000
+
+[tracing]
+;
+; Configuration settings for the `ctrace` OpenTracing
+; API. There are two reporter which we support.
+; - jaeger.thrift over udp
+; - jaeger.thrift over http
+; ## Common settings
+; enabled = false ; true | false
+; app_name = couchdb ; value to use for the `location.application` tag
+; protocol = udp ; udp | http - which reporter to use
+; ## jaeger.thrift over udp reporter
+; thrift_format = compact ; compact | binary
+; agent_host = 127.0.0.1
+; agent_port = 6831
+; ## jaeger.thrift over udp reporter
+; endpoint = http://127.0.0.1:14268
+
+[tracing.filters]
+;
+; Configure tracing for each individual operation. Keys should be set as
+; operation names (i.e., `database-info.read` or `view.build`). Values
+; are essentially an anonymous function that accepts a single argument
+; that is the tags provided to the root span. These definitions
+; should not include a function name or a trailing `.`. Return values
+; must be one of `true`, `false`, or `float()`. A boolean return
+; indicates whether or not to include the trace while a `float()`
+; value between 0 and 1 gives the probability that the trace should
+; be included or not. I.e., if the value is `0.9` then 90% of the
+; traces will be logged. See the `src/ctrace/README.md` for a
+; thorough description of the filter DSL.
+;
+; database-info.read = (#{'http.method' := Method}) when Method == 'GET' -> true
+; view.build = (#{'view.name' := Name}) when Name == "foo" -> 0.25
+;
+; The key `all` is checked for any trace that does not have a
+; corresponding operation name key configured. Thus, users can easily
+; log every generated trace by including the following:
+;
+; all = (#{}) -> true
+
+[couch_rate.views]
+limiter = couch_rate_limiter
+opts = #{budget => 100, target => 2500, window => 60000, sensitivity => 1000}
+
+
+; Some low-level FDB transaction options. These options will be applied to the
+; database handle and inherited by each transaction started with that handle.
+; The description of these can be found in fdb_c_option.g.h include file from
+; the client libraries. The default values which were not specified the
+; fdb_c_option.g.h file were not included here either.
+
+[fdb_tx_options]
+; Specify the machine ID that was passed to fdbserver processes running on the
+; same machine as this client, for better location-aware load balancing.
+; Type is a hexadecimal string, less than 16 bytes in size.
+;machine_id =
+
+; Specify the datacenter ID that was passed to fdbserver processes running in
+; the same datacenter as this client, for better location-aware load balancing.
+; Type is hexadecimal string, less than 16 bytes in size.
+;datacenter_id =
+
+; Sets the maximum escaped length of key and value fields to be logged to the
+; trace file via the LOG_TRANSACTION option, after which the field will be
+; truncated. A negative value disables truncation.
+;transaction_logging_max_field_length =
+
+; Set a timeout in milliseconds which, when elapsed, will cause the transaction
+; automatically to be cancelled. Valid parameter values are [0, INT_MAX].
+; If set to 0, will disable all timeouts. All pending and any future uses of
+; the transaction will throw an exception. The transaction can be used again
+; after it is reset.
+;timeout = 60000
+
+; Set a maximum number of retries after which additional calls to 'on_error`
+; will throw the most recently seen error code. Valid parameter values are
+; [-1, INT_MAX]. If set to -1, will disable the retry limit.
+;retry_limit = 100
+
+; Set the maximum amount of backoff delay incurred in the call to 'on_error'
+; if the error is retryable. Defaults to 1000 ms. Valid parameter values are
+; [0, INT_MAX]. If the maximum retry delay is less than the current retry
+; delay of the transaction, then the current retry delay will be clamped to the
+; maximum retry delay. The retry limit is not reset after an
+; 'on_erro' call.
+;max_retry_delay = 1000
+
+; Set the transaction size limit in bytes. The size is calculated by combining
+; the sizes of all keys and values written or mutated, all key ranges cleared,
+; and all read and write conflict ranges. (In other words, it includes the
+; total size of all data included in the request to the cluster to commit the
+; transaction.) Large transactions can cause performance problems on
+; FoundationDB clusters, so setting this limit to a smaller value than the
+; default can help prevent the client from accidentally degrading the cluster's
+; performance. This value must be at least 10000 and cannot be set to higher than
+; 10000000, the default transaction size limit.
+;size_limit = 10000000
+
+[aegis]
+; The maximum number of entries in the key cache.
+; Once the limit reached the least recently used entries are eviceted.
+;cache_limit = 100000
+
+; The period in seconds for how long each entry kept in cache.
+; This is not affected by access time, i.e. the keys are always removed
+; once expired and re-fetched on a next encrypt/decrypt operation.
+;cache_max_age_sec = 1800
+
+; The interval in seconds of how often the expiration check runs.
+;cache_expiration_check_sec = 10
diff --git a/rel/reltool.config b/rel/reltool.config
index 6acba378b..0cc8dbb13 100644
--- a/rel/reltool.config
+++ b/rel/reltool.config
@@ -26,22 +26,28 @@
syntax_tools,
xmerl,
%% couchdb
+ aegis,
b64url,
bear,
chttpd,
config,
couch,
couch_epi,
+ couch_jobs,
couch_index,
couch_log,
couch_mrview,
couch_plugins,
couch_replicator,
couch_stats,
+ couch_eval,
+ couch_js,
couch_event,
couch_peruser,
+ couch_views,
ddoc_cache,
dreyfus,
+ ebtree,
ets_lru,
fabric,
folsom,
@@ -85,12 +91,16 @@
{app, xmerl, [{incl_cond, include}]},
%% couchdb
+ {app, aegis, [{incl_cond, include}]},
{app, b64url, [{incl_cond, include}]},
{app, bear, [{incl_cond, include}]},
{app, chttpd, [{incl_cond, include}]},
{app, config, [{incl_cond, include}]},
{app, couch, [{incl_cond, include}]},
{app, couch_epi, [{incl_cond, include}]},
+ {app, couch_eval, [{incl_cond, include}]},
+ {app, couch_js, [{incl_cond, include}]},
+ {app, couch_jobs, [{incl_cond, include}]},
{app, couch_index, [{incl_cond, include}]},
{app, couch_log, [{incl_cond, include}]},
{app, couch_mrview, [{incl_cond, include}]},
@@ -99,8 +109,10 @@
{app, couch_stats, [{incl_cond, include}]},
{app, couch_event, [{incl_cond, include}]},
{app, couch_peruser, [{incl_cond, include}]},
+ {app, couch_views, [{incl_cond, include}]},
{app, ddoc_cache, [{incl_cond, include}]},
{app, dreyfus, [{incl_cond, include}]},
+ {app, ebtree, [{incl_cond, include}]},
{app, ets_lru, [{incl_cond, include}]},
{app, fabric, [{incl_cond, include}]},
{app, folsom, [{incl_cond, include}]},
diff --git a/src/aegis/rebar.config.script b/src/aegis/rebar.config.script
new file mode 100644
index 000000000..ef148bfbe
--- /dev/null
+++ b/src/aegis/rebar.config.script
@@ -0,0 +1,35 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+CouchConfig = case filelib:is_file(os:getenv("COUCHDB_CONFIG")) of
+ true ->
+ {ok, Result} = file:consult(os:getenv("COUCHDB_CONFIG")),
+ Result;
+ false ->
+ []
+end.
+
+AegisKeyManager = case lists:keyfind(aegis_key_manager, 1, CouchConfig) of
+ {aegis_key_manager, Module} when Module /= "" ->
+ list_to_atom(Module);
+ _ ->
+ aegis_noop_key_manager
+end,
+
+CurrentOpts = case lists:keyfind(erl_opts, 1, CONFIG) of
+ {erl_opts, Opts} -> Opts;
+ false -> []
+end,
+
+AegisOpts = {d, 'AEGIS_KEY_MANAGER', AegisKeyManager},
+lists:keystore(erl_opts, 1, CONFIG, {erl_opts, [AegisOpts | CurrentOpts]}).
diff --git a/src/aegis/src/aegis.app.src.script b/src/aegis/src/aegis.app.src.script
new file mode 100644
index 000000000..f54688cf2
--- /dev/null
+++ b/src/aegis/src/aegis.app.src.script
@@ -0,0 +1,53 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+CouchConfig = case filelib:is_file(os:getenv("COUCHDB_CONFIG")) of
+ true ->
+ {ok, Result} = file:consult(os:getenv("COUCHDB_CONFIG")),
+ Result;
+ false ->
+ []
+end.
+
+AegisKeyManagerApp = case lists:keyfind(aegis_key_manager_app, 1, CouchConfig) of
+ {aegis_key_manager_app, AppName} when AppName /= "" ->
+ [list_to_atom(AppName)];
+ _ ->
+ []
+end.
+
+BaseApplications = [
+ kernel,
+ stdlib,
+ crypto,
+ couch_log,
+ erlfdb
+].
+
+Applications = AegisKeyManagerApp ++ BaseApplications.
+
+{application, aegis,
+ [
+ {description, "If it's good enough for Zeus, it's good enough for CouchDB"},
+ {vsn, git},
+ {mod, {aegis_app, []}},
+ {registered, [
+ aegis_server
+ ]},
+ {applications, Applications},
+ {env,[]},
+ {modules, []},
+ {maintainers, []},
+ {licenses, []},
+ {links, []}
+ ]
+}.
diff --git a/src/aegis/src/aegis.erl b/src/aegis/src/aegis.erl
new file mode 100644
index 000000000..e8a0b4bfb
--- /dev/null
+++ b/src/aegis/src/aegis.erl
@@ -0,0 +1,72 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(aegis).
+-include_lib("fabric/include/fabric2.hrl").
+
+
+-define(WRAPPED_KEY, {?DB_AEGIS, 1}).
+
+
+-export([
+ init_db/2,
+ open_db/1,
+
+ decrypt/2,
+ decrypt/3,
+ encrypt/3,
+ wrap_fold_fun/2
+]).
+
+init_db(#{} = Db, Options) ->
+ Db#{
+ is_encrypted => aegis_server:init_db(Db, Options)
+ }.
+
+
+open_db(#{} = Db) ->
+ Db#{
+ is_encrypted => aegis_server:open_db(Db)
+ }.
+
+
+encrypt(#{} = _Db, _Key, <<>>) ->
+ <<>>;
+
+encrypt(#{is_encrypted := false}, _Key, Value) when is_binary(Value) ->
+ Value;
+
+encrypt(#{is_encrypted := true} = Db, Key, Value)
+ when is_binary(Key), is_binary(Value) ->
+ aegis_server:encrypt(Db, Key, Value).
+
+
+decrypt(#{} = Db, Rows) when is_list(Rows) ->
+ lists:map(fun({Key, Value}) ->
+ {Key, decrypt(Db, Key, Value)}
+ end, Rows).
+
+decrypt(#{} = _Db, _Key, <<>>) ->
+ <<>>;
+
+decrypt(#{is_encrypted := false}, _Key, Value) when is_binary(Value) ->
+ Value;
+
+decrypt(#{is_encrypted := true} = Db, Key, Value)
+ when is_binary(Key), is_binary(Value) ->
+ aegis_server:decrypt(Db, Key, Value).
+
+
+wrap_fold_fun(Db, Fun) when is_function(Fun, 2) ->
+ fun({Key, Value}, Acc) ->
+ Fun({Key, decrypt(Db, Key, Value)}, Acc)
+ end.
diff --git a/src/aegis/src/aegis.hrl b/src/aegis/src/aegis.hrl
new file mode 100644
index 000000000..2a2a2dcde
--- /dev/null
+++ b/src/aegis/src/aegis.hrl
@@ -0,0 +1,57 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%% Assume old crypto api
+
+-define(sha256_hmac(Key, PlainText), crypto:hmac(sha256, Key, PlainText)).
+
+-define(aes_gcm_encrypt(Key, IV, AAD, Data),
+ crypto:block_encrypt(aes_gcm, Key, IV, {AAD, Data, 16})).
+
+-define(aes_gcm_decrypt(Key, IV, AAD, CipherText, CipherTag),
+ crypto:block_decrypt(aes_gcm, Key, IV, {AAD, CipherText, CipherTag})).
+
+-define(aes_ecb_encrypt(Key, Data),
+ crypto:block_encrypt(aes_ecb, Key, Data)).
+
+-define(aes_ecb_decrypt(Key, Data),
+ crypto:block_decrypt(aes_ecb, Key, Data)).
+
+%% Replace macros if new crypto api is available
+-ifdef(OTP_RELEASE).
+-if(?OTP_RELEASE >= 22).
+
+-undef(sha256_hmac).
+-define(sha256_hmac(Key, PlainText), crypto:mac(hmac, sha256, Key, PlainText)).
+
+-undef(aes_gcm_encrypt).
+-define(aes_gcm_encrypt(Key, IV, AAD, Data),
+ crypto:crypto_one_time_aead(aes_256_gcm, Key, IV, Data, AAD, 16, true)).
+
+-undef(aes_gcm_decrypt).
+-define(aes_gcm_decrypt(Key, IV, AAD, CipherText, CipherTag),
+ crypto:crypto_one_time_aead(aes_256_gcm, Key, IV, CipherText,
+ AAD, CipherTag, false)).
+
+-define(key_alg(Key), case bit_size(Key) of
+ 128 -> aes_128_ecb; 192 -> aes_192_ecb; 256 -> aes_256_ecb end).
+
+-undef(aes_ecb_encrypt).
+-define(aes_ecb_encrypt(Key, Data),
+ crypto:crypto_one_time(?key_alg(Key), Key, Data, true)).
+
+-undef(aes_ecb_decrypt).
+-define(aes_ecb_decrypt(Key, Data),
+ crypto:crypto_one_time(?key_alg(Key), Key, Data, false)).
+
+-endif.
+-endif. \ No newline at end of file
diff --git a/src/aegis/src/aegis_app.erl b/src/aegis/src/aegis_app.erl
new file mode 100644
index 000000000..4a5a11f0c
--- /dev/null
+++ b/src/aegis/src/aegis_app.erl
@@ -0,0 +1,26 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(aegis_app).
+
+-behaviour(application).
+
+
+-export([start/2, stop/1]).
+
+
+start(_StartType, _StartArgs) ->
+ aegis_sup:start_link().
+
+
+stop(_State) ->
+ ok.
diff --git a/src/aegis/src/aegis_key_manager.erl b/src/aegis/src/aegis_key_manager.erl
new file mode 100644
index 000000000..aa9e3429a
--- /dev/null
+++ b/src/aegis/src/aegis_key_manager.erl
@@ -0,0 +1,22 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(aegis_key_manager).
+
+
+
+-callback init_db(
+ Db :: #{},
+ DbOptions :: list()) -> {ok, binary()} | false.
+
+
+-callback open_db(Db :: #{}) -> {ok, binary()} | false.
diff --git a/src/aegis/src/aegis_keywrap.erl b/src/aegis/src/aegis_keywrap.erl
new file mode 100644
index 000000000..58c7668e8
--- /dev/null
+++ b/src/aegis/src/aegis_keywrap.erl
@@ -0,0 +1,97 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(aegis_keywrap).
+-include("aegis.hrl").
+
+%% Implementation of NIST Special Publication 800-38F
+%% For wrapping and unwrapping keys with AES.
+
+-export([key_wrap/2, key_unwrap/2]).
+
+-define(ICV1, 16#A6A6A6A6A6A6A6A6).
+
+-spec key_wrap(WrappingKey :: binary(), KeyToWrap :: binary()) -> binary().
+key_wrap(WrappingKey, KeyToWrap)
+ when is_binary(WrappingKey), bit_size(KeyToWrap) rem 64 == 0 ->
+ N = bit_size(KeyToWrap) div 64,
+ wrap(WrappingKey, <<?ICV1:64>>, KeyToWrap, 1, 6 * N).
+
+wrap(_WrappingKey, A, R, T, End) when T > End ->
+ <<A/binary, R/binary>>;
+wrap(WrappingKey, A, R, T, End) ->
+ <<R1:64, Rest/binary>> = R,
+ <<MSB_B:64, LSB_B:64>> = ?aes_ecb_encrypt(WrappingKey, <<A/binary, R1:64>>),
+ wrap(WrappingKey, <<(MSB_B bxor T):64>>, <<Rest/binary, LSB_B:64>>, T + 1, End).
+
+
+-spec key_unwrap(WrappingKey :: binary(), KeyToUnwrap :: binary()) -> binary() | fail.
+key_unwrap(WrappingKey, KeyToUnwrap)
+ when is_binary(WrappingKey), bit_size(KeyToUnwrap) rem 64 == 0 ->
+ N = (bit_size(KeyToUnwrap) div 64),
+ <<A:64, R/binary>> = KeyToUnwrap,
+ case unwrap(WrappingKey, <<A:64>>, R, 6 * (N - 1)) of
+ <<?ICV1:64, UnwrappedKey/binary>> ->
+ UnwrappedKey;
+ _ ->
+ fail
+ end.
+
+unwrap(_WrappingKey, A, R, 0) ->
+ <<A/binary, R/binary>>;
+unwrap(WrappingKey, <<A:64>>, R, T) ->
+ RestSize = bit_size(R) - 64,
+ <<Rest:RestSize, R2: 64>> = R,
+ <<MSB_B:64, LSB_B:64>> = ?aes_ecb_decrypt(WrappingKey, <<(A bxor T):64, R2:64>>),
+ unwrap(WrappingKey, <<MSB_B:64>>, <<LSB_B:64, Rest:RestSize>>, T - 1).
+
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+wrap_test_() ->
+ [
+ %% 128 KEK / 128 DATA
+ test_wrap_unwrap(<<16#000102030405060708090A0B0C0D0E0F:128>>,
+ <<16#00112233445566778899AABBCCDDEEFF:128>>,
+ <<16#1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5:192>>),
+ %% 192 KEK / 128 DATA
+ test_wrap_unwrap(<<16#000102030405060708090A0B0C0D0E0F1011121314151617:192>>,
+ <<16#00112233445566778899AABBCCDDEEFF:128>>,
+ <<16#96778B25AE6CA435F92B5B97C050AED2468AB8A17AD84E5D:192>>),
+ %% 256 KEK / 128 DATA
+ test_wrap_unwrap(<<16#000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F:256>>,
+ <<16#00112233445566778899AABBCCDDEEFF:128>>,
+ <<16#64E8C3F9CE0F5BA263E9777905818A2A93C8191E7D6E8AE7:192>>),
+ %% 192 KEK / 192 DATA
+ test_wrap_unwrap(<<16#000102030405060708090A0B0C0D0E0F1011121314151617:192>>,
+ <<16#00112233445566778899AABBCCDDEEFF0001020304050607:192>>,
+ <<16#031D33264E15D33268F24EC260743EDCE1C6C7DDEE725A936BA814915C6762D2:256>>),
+ %% 256 KEK / 192 DATA
+ test_wrap_unwrap(<<16#000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F:256>>,
+ <<16#00112233445566778899AABBCCDDEEFF0001020304050607:192>>,
+ <<16#A8F9BC1612C68B3FF6E6F4FBE30E71E4769C8B80A32CB8958CD5D17D6B254DA1:256>>),
+ %% 256 KEK / 256 DATA
+ test_wrap_unwrap(<<16#000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F:256>>,
+ <<16#00112233445566778899AABBCCDDEEFF000102030405060708090A0B0C0D0E0F:256>>,
+ <<16#28C9F404C4B810F4CBCCB35CFB87F8263F5786E2D80ED326CBC7F0E71A99F43BFB988B9B7A02DD21:320>>)].
+
+test_wrap_unwrap(WrappingKey, KeyToWrap, ExpectedWrappedKey) ->
+ [?_assertEqual(ExpectedWrappedKey, key_wrap(WrappingKey, KeyToWrap)),
+ ?_assertEqual(KeyToWrap, key_unwrap(WrappingKey, key_wrap(WrappingKey, KeyToWrap)))].
+
+fail_test() ->
+ KEK = <<16#000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F:256>>,
+ CipherText = <<16#28C9F404C4B810F4CBCCB35CFB87F8263F5786E2D80ED326CBC7F0E71A99F43BFB988B9B7A02DD20:320>>,
+ ?assertEqual(fail, key_unwrap(KEK, CipherText)).
+
+-endif.
diff --git a/src/aegis/src/aegis_noop_key_manager.erl b/src/aegis/src/aegis_noop_key_manager.erl
new file mode 100644
index 000000000..2b61f1d29
--- /dev/null
+++ b/src/aegis/src/aegis_noop_key_manager.erl
@@ -0,0 +1,31 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(aegis_noop_key_manager).
+
+
+-behaviour(aegis_key_manager).
+
+
+-export([
+ init_db/2,
+ open_db/1
+]).
+
+
+
+init_db(#{} = _Db, _Options) ->
+ false.
+
+
+open_db(#{} = _Db) ->
+ false.
diff --git a/src/aegis/src/aegis_server.erl b/src/aegis/src/aegis_server.erl
new file mode 100644
index 000000000..15fea4c63
--- /dev/null
+++ b/src/aegis/src/aegis_server.erl
@@ -0,0 +1,421 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(aegis_server).
+
+-behaviour(gen_server).
+
+-vsn(1).
+
+
+-include("aegis.hrl").
+
+
+%% aegis_server API
+-export([
+ start_link/0,
+ init_db/2,
+ open_db/1,
+ encrypt/3,
+ decrypt/3
+]).
+
+%% gen_server callbacks
+-export([
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3
+]).
+
+
+
+-define(KEY_CHECK, aegis_key_check).
+-define(INIT_TIMEOUT, 60000).
+-define(TIMEOUT, 10000).
+-define(CACHE_LIMIT, 100000).
+-define(CACHE_MAX_AGE_SEC, 1800).
+-define(CACHE_EXPIRATION_CHECK_SEC, 10).
+-define(LAST_ACCESSED_INACTIVITY_SEC, 10).
+
+
+-record(entry, {uuid, encryption_key, counter, last_accessed, expires_at}).
+
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+-spec init_db(Db :: #{}, Options :: list()) -> boolean().
+init_db(#{uuid := UUID} = Db, Options) ->
+ sensitive(fun() ->
+ case ?AEGIS_KEY_MANAGER:init_db(Db, Options) of
+ {ok, DbKey} ->
+ gen_server:call(?MODULE, {insert_key, UUID, DbKey}),
+ true;
+ false ->
+ false
+ end
+ end).
+
+
+-spec open_db(Db :: #{}) -> boolean().
+open_db(#{} = Db) ->
+ sensitive(fun() ->
+ case do_open_db(Db) of
+ {ok, _DbKey} ->
+ true;
+ false ->
+ false
+ end
+ end).
+
+
+-spec encrypt(Db :: #{}, Key :: binary(), Value :: binary()) -> binary().
+encrypt(#{} = Db, Key, Value) when is_binary(Key), is_binary(Value) ->
+ #{
+ uuid := UUID
+ } = Db,
+
+ case is_key_fresh(UUID) of
+ true ->
+ case gen_server:call(?MODULE, {encrypt, Db, Key, Value}) of
+ CipherText when is_binary(CipherText) ->
+ CipherText;
+ {error, {_Tag, {_C_FileName,_LineNumber}, _Desc} = Reason} ->
+ couch_log:error("aegis encryption failure: ~p ", [Reason]),
+ erlang:error(decryption_failed);
+ {error, Reason} ->
+ erlang:error(Reason)
+ end;
+ false ->
+ sensitive(fun() ->
+ {ok, DbKey} = do_open_db(Db),
+ do_encrypt(DbKey, Db, Key, Value)
+ end)
+ end.
+
+
+-spec decrypt(Db :: #{}, Key :: binary(), Value :: binary()) -> binary().
+decrypt(#{} = Db, Key, Value) when is_binary(Key), is_binary(Value) ->
+ #{
+ uuid := UUID
+ } = Db,
+
+ case is_key_fresh(UUID) of
+ true ->
+ case gen_server:call(?MODULE, {decrypt, Db, Key, Value}) of
+ PlainText when is_binary(PlainText) ->
+ PlainText;
+ {error, {_Tag, {_C_FileName,_LineNumber}, _Desc} = Reason} ->
+ couch_log:error("aegis decryption failure: ~p ", [Reason]),
+ erlang:error(decryption_failed);
+ {error, Reason} ->
+ erlang:error(Reason)
+ end;
+ false ->
+ sensitive(fun() ->
+ {ok, DbKey} = do_open_db(Db),
+ do_decrypt(DbKey, Db, Key, Value)
+ end)
+ end.
+
+
+%% gen_server functions
+
+init([]) ->
+ process_flag(sensitive, true),
+ Cache = ets:new(?MODULE, [set, private, {keypos, #entry.uuid}]),
+ ByAccess = ets:new(?MODULE,
+ [ordered_set, private, {keypos, #entry.counter}]),
+ ets:new(?KEY_CHECK, [named_table, protected, {read_concurrency, true}]),
+
+ erlang:send_after(0, self(), maybe_remove_expired),
+
+ St = #{
+ cache => Cache,
+ by_access => ByAccess,
+ counter => 0
+ },
+ {ok, St, ?INIT_TIMEOUT}.
+
+
+terminate(_Reason, _St) ->
+ ok.
+
+
+handle_call({insert_key, UUID, DbKey}, _From, #{cache := Cache} = St) ->
+ case ets:lookup(Cache, UUID) of
+ [#entry{uuid = UUID} = Entry] ->
+ delete(St, Entry);
+ [] ->
+ ok
+ end,
+ NewSt = insert(St, UUID, DbKey),
+ {reply, ok, NewSt, ?TIMEOUT};
+
+handle_call({encrypt, #{uuid := UUID} = Db, Key, Value}, From, St) ->
+
+ {ok, DbKey} = lookup(St, UUID),
+
+ erlang:spawn(fun() ->
+ process_flag(sensitive, true),
+ try
+ do_encrypt(DbKey, Db, Key, Value)
+ of
+ Resp ->
+ gen_server:reply(From, Resp)
+ catch
+ _:Error ->
+ gen_server:reply(From, {error, Error})
+ end
+ end),
+
+ {noreply, St, ?TIMEOUT};
+
+handle_call({decrypt, #{uuid := UUID} = Db, Key, Value}, From, St) ->
+
+ {ok, DbKey} = lookup(St, UUID),
+
+ erlang:spawn(fun() ->
+ process_flag(sensitive, true),
+ try
+ do_decrypt(DbKey, Db, Key, Value)
+ of
+ Resp ->
+ gen_server:reply(From, Resp)
+ catch
+ _:Error ->
+ gen_server:reply(From, {error, Error})
+ end
+ end),
+
+ {noreply, St, ?TIMEOUT};
+
+handle_call(_Msg, _From, St) ->
+ {noreply, St}.
+
+
+handle_cast({accessed, UUID}, St) ->
+ NewSt = bump_last_accessed(St, UUID),
+ {noreply, NewSt};
+
+
+handle_cast(_Msg, St) ->
+ {noreply, St}.
+
+
+handle_info(maybe_remove_expired, St) ->
+ remove_expired_entries(St),
+ CheckInterval = erlang:convert_time_unit(
+ expiration_check_interval(), second, millisecond),
+ erlang:send_after(CheckInterval, self(), maybe_remove_expired),
+ {noreply, St};
+
+handle_info(_Msg, St) ->
+ {noreply, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+ {ok, St}.
+
+
+%% private functions
+
+do_open_db(#{uuid := UUID} = Db) ->
+ case ?AEGIS_KEY_MANAGER:open_db(Db) of
+ {ok, DbKey} ->
+ gen_server:call(?MODULE, {insert_key, UUID, DbKey}),
+ {ok, DbKey};
+ false ->
+ false
+ end.
+
+
+do_encrypt(DbKey, #{uuid := UUID}, Key, Value) ->
+ EncryptionKey = crypto:strong_rand_bytes(32),
+ <<WrappedKey:320>> = aegis_keywrap:key_wrap(DbKey, EncryptionKey),
+
+ {CipherText, <<CipherTag:128>>} =
+ ?aes_gcm_encrypt(
+ EncryptionKey,
+ <<0:96>>,
+ <<UUID/binary, 0:8, Key/binary>>,
+ Value),
+ <<1:8, WrappedKey:320, CipherTag:128, CipherText/binary>>.
+
+
+do_decrypt(DbKey, #{uuid := UUID}, Key, Value) ->
+ case Value of
+ <<1:8, WrappedKey:320, CipherTag:128, CipherText/binary>> ->
+ case aegis_keywrap:key_unwrap(DbKey, <<WrappedKey:320>>) of
+ fail ->
+ erlang:error(decryption_failed);
+ DecryptionKey ->
+ Decrypted =
+ ?aes_gcm_decrypt(
+ DecryptionKey,
+ <<0:96>>,
+ <<UUID/binary, 0:8, Key/binary>>,
+ CipherText,
+ <<CipherTag:128>>),
+ if Decrypted /= error -> Decrypted; true ->
+ erlang:error(decryption_failed)
+ end
+ end;
+ _ ->
+ erlang:error(not_ciphertext)
+ end.
+
+
+is_key_fresh(UUID) ->
+ Now = fabric2_util:now(sec),
+
+ case ets:lookup(?KEY_CHECK, UUID) of
+ [{UUID, ExpiresAt}] when ExpiresAt >= Now -> true;
+ _ -> false
+ end.
+
+
+%% cache functions
+
+insert(St, UUID, DbKey) ->
+ #{
+ cache := Cache,
+ by_access := ByAccess,
+ counter := Counter
+ } = St,
+
+ Now = fabric2_util:now(sec),
+ ExpiresAt = Now + max_age(),
+
+ Entry = #entry{
+ uuid = UUID,
+ encryption_key = DbKey,
+ counter = Counter,
+ last_accessed = Now,
+ expires_at = ExpiresAt
+ },
+
+ true = ets:insert(Cache, Entry),
+ true = ets:insert_new(ByAccess, Entry),
+ true = ets:insert(?KEY_CHECK, {UUID, ExpiresAt}),
+
+ CacheLimit = cache_limit(),
+ CacheSize = ets:info(Cache, size),
+
+ case CacheSize > CacheLimit of
+ true ->
+ LRUKey = ets:first(ByAccess),
+ [LRUEntry] = ets:lookup(ByAccess, LRUKey),
+ delete(St, LRUEntry);
+ false ->
+ ok
+ end,
+
+ St#{counter := Counter + 1}.
+
+
+lookup(#{cache := Cache}, UUID) ->
+ case ets:lookup(Cache, UUID) of
+ [#entry{uuid = UUID, encryption_key = DbKey} = Entry] ->
+ maybe_bump_last_accessed(Entry),
+ {ok, DbKey};
+ [] ->
+ {error, not_found}
+ end.
+
+
+delete(St, #entry{uuid = UUID} = Entry) ->
+ #{
+ cache := Cache,
+ by_access := ByAccess
+ } = St,
+
+ true = ets:delete(?KEY_CHECK, UUID),
+ true = ets:delete_object(Cache, Entry),
+ true = ets:delete_object(ByAccess, Entry).
+
+
+maybe_bump_last_accessed(#entry{last_accessed = LastAccessed} = Entry) ->
+ case fabric2_util:now(sec) > LastAccessed + ?LAST_ACCESSED_INACTIVITY_SEC of
+ true ->
+ gen_server:cast(?MODULE, {accessed, Entry#entry.uuid});
+ false ->
+ ok
+ end.
+
+
+bump_last_accessed(St, UUID) ->
+ #{
+ cache := Cache,
+ by_access := ByAccess,
+ counter := Counter
+ } = St,
+
+
+ [#entry{counter = OldCounter} = Entry0] = ets:lookup(Cache, UUID),
+
+ Entry = Entry0#entry{
+ last_accessed = fabric2_util:now(sec),
+ counter = Counter
+ },
+
+ true = ets:insert(Cache, Entry),
+ true = ets:insert_new(ByAccess, Entry),
+
+ ets:delete(ByAccess, OldCounter),
+
+ St#{counter := Counter + 1}.
+
+
+remove_expired_entries(St) ->
+ #{
+ cache := Cache,
+ by_access := ByAccess
+ } = St,
+
+ MatchConditions = [{'=<', '$1', fabric2_util:now(sec)}],
+
+ KeyCheckMatchHead = {'_', '$1'},
+ KeyCheckExpired = [{KeyCheckMatchHead, MatchConditions, [true]}],
+ Count = ets:select_delete(?KEY_CHECK, KeyCheckExpired),
+
+ CacheMatchHead = #entry{expires_at = '$1', _ = '_'},
+ CacheExpired = [{CacheMatchHead, MatchConditions, [true]}],
+ Count = ets:select_delete(Cache, CacheExpired),
+ Count = ets:select_delete(ByAccess, CacheExpired).
+
+
+
+max_age() ->
+ config:get_integer("aegis", "cache_max_age_sec", ?CACHE_MAX_AGE_SEC).
+
+
+expiration_check_interval() ->
+ config:get_integer(
+ "aegis", "cache_expiration_check_sec", ?CACHE_EXPIRATION_CHECK_SEC).
+
+
+cache_limit() ->
+ config:get_integer("aegis", "cache_limit", ?CACHE_LIMIT).
+
+
+sensitive(Fun) when is_function(Fun, 0) ->
+ OldValue = process_flag(sensitive, true),
+ try
+ Fun()
+ after
+ process_flag(sensitive, OldValue)
+ end.
diff --git a/src/aegis/src/aegis_sup.erl b/src/aegis/src/aegis_sup.erl
new file mode 100644
index 000000000..6d3ee83d8
--- /dev/null
+++ b/src/aegis/src/aegis_sup.erl
@@ -0,0 +1,46 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(aegis_sup).
+
+-behaviour(supervisor).
+
+-vsn(1).
+
+
+-export([
+ start_link/0
+]).
+
+-export([
+ init/1
+]).
+
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+
+init([]) ->
+ Flags = #{
+ strategy => one_for_one,
+ intensity => 5,
+ period => 10
+ },
+ Children = [
+ #{
+ id => aegis_server,
+ start => {aegis_server, start_link, []},
+ shutdown => 5000
+ }
+ ],
+ {ok, {Flags, Children}}.
diff --git a/src/aegis/test/aegis_basic_test.erl b/src/aegis/test/aegis_basic_test.erl
new file mode 100644
index 000000000..61d9737dd
--- /dev/null
+++ b/src/aegis/test/aegis_basic_test.erl
@@ -0,0 +1,17 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(aegis_basic_test).
+
+-include_lib("eunit/include/eunit.hrl").
+
+-define(DB, #{uuid => <<"foo">>}).
diff --git a/src/aegis/test/aegis_server_test.erl b/src/aegis/test/aegis_server_test.erl
new file mode 100644
index 000000000..0f96798b7
--- /dev/null
+++ b/src/aegis/test/aegis_server_test.erl
@@ -0,0 +1,314 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(aegis_server_test).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+
+-define(DB, #{uuid => <<0:64>>}).
+-define(VALUE, <<0:8>>).
+-define(ENCRYPTED, <<1,155,242,89,190,54,112,151,18,145,25,251,217,
+ 49,147,125,14,162,146,201,189,100,232,38,239,111,163,84,25,60,
+ 147,167,237,107,24,204,171,232,227,16,72,203,101,118,150,252,
+ 204,80,245,66,98,213,223,63,111,105,101,154>>).
+-define(TIMEOUT, 10000).
+
+
+
+basic_test_() ->
+ {
+ foreach,
+ fun setup/0,
+ fun teardown/1,
+ [
+ {"init_db returns true when encryption enabled",
+ {timeout, ?TIMEOUT, fun test_init_db/0}},
+ {"open_db returns true when encryption enabled",
+ {timeout, ?TIMEOUT, fun test_open_db/0}},
+ {"init_db caches key",
+ {timeout, ?TIMEOUT, fun test_init_db_cache/0}},
+ {"open_db caches key",
+ {timeout, ?TIMEOUT, fun test_open_db_cache/0}},
+ {"encrypt fetches and caches key when it's missing",
+ {timeout, ?TIMEOUT, fun test_encrypt_cache/0}},
+ {"decrypt fetches and caches key when it's missing",
+ {timeout, ?TIMEOUT, fun test_decrypt_cache/0}}
+ ]
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([fabric]),
+ meck:new([?AEGIS_KEY_MANAGER], [passthrough]),
+ ok = meck:expect(?AEGIS_KEY_MANAGER, init_db, 2, {ok, <<0:256>>}),
+ ok = meck:expect(?AEGIS_KEY_MANAGER, open_db, 1, {ok, <<0:256>>}),
+ Ctx.
+
+
+teardown(Ctx) ->
+ meck:unload(),
+ test_util:stop_couch(Ctx).
+
+
+test_init_db() ->
+ ?assert(aegis_server:init_db(?DB, [])),
+ ?assertEqual(1, meck:num_calls(?AEGIS_KEY_MANAGER, init_db, 2)).
+
+
+test_open_db() ->
+ ?assert(aegis_server:open_db(?DB)),
+ ?assertEqual(1, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)).
+
+
+test_init_db_cache() ->
+ ?assertEqual(0, meck:num_calls(?AEGIS_KEY_MANAGER, init_db, 2)),
+
+ ?assert(aegis_server:init_db(?DB, [])),
+
+ lists:foreach(fun(I) ->
+ Encrypted = aegis_server:encrypt(?DB, <<I:64>>, ?VALUE),
+ ?assertNotEqual(?VALUE, Encrypted),
+ ?assertMatch(<<1:8, _/binary>>, Encrypted)
+ end, lists:seq(1, 12)),
+
+ ?assertEqual(1, meck:num_calls(?AEGIS_KEY_MANAGER, init_db, 2)).
+
+
+test_open_db_cache() ->
+ ?assertEqual(0, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)),
+
+ ?assert(aegis_server:open_db(?DB)),
+
+ lists:foreach(fun(I) ->
+ Encrypted = aegis_server:encrypt(?DB, <<I:64>>, ?VALUE),
+ ?assertNotEqual(?VALUE, Encrypted),
+ ?assertMatch(<<1:8, _/binary>>, Encrypted)
+ end, lists:seq(1, 12)),
+
+ ?assertEqual(1, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)).
+
+
+test_encrypt_cache() ->
+ ?assertEqual(0, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)),
+
+ Encrypted = aegis_server:encrypt(?DB, <<1:64>>, ?VALUE),
+ ?assertNotEqual(?VALUE, Encrypted),
+ ?assertMatch(<<1:8, _/binary>>, Encrypted),
+
+ ?assertEqual(1, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)).
+
+
+test_decrypt_cache() ->
+ ?assertEqual(0, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)),
+
+ Decrypted = aegis_server:decrypt(?DB, <<1:64>>, ?ENCRYPTED),
+ ?assertEqual(<<0>>, Decrypted),
+
+ ?assertEqual(1, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)).
+
+
+
+disabled_test_() ->
+ {
+ foreach,
+ fun() ->
+ Ctx = setup(),
+ ok = meck:delete(?AEGIS_KEY_MANAGER, init_db, 2),
+ ok = meck:expect(?AEGIS_KEY_MANAGER, init_db, 2, false),
+ ok = meck:delete(?AEGIS_KEY_MANAGER, open_db, 1),
+ ok = meck:expect(?AEGIS_KEY_MANAGER, open_db, 1, false),
+ Ctx
+ end,
+ fun teardown/1,
+ [
+ {"init_db returns false when encryptions disabled",
+ {timeout, ?TIMEOUT, fun test_disabled_init_db/0}},
+ {"open_db returns false when encryptions disabled",
+ {timeout, ?TIMEOUT, fun test_disabled_open_db/0}},
+ {"pass through on encrypt when encryption disabled",
+ {timeout, ?TIMEOUT, fun test_disabled_encrypt/0}},
+ {"pass through on decrypt when encryption disabled",
+ {timeout, ?TIMEOUT, fun test_disabled_decrypt/0}}
+ ]
+ }.
+
+
+test_disabled_init_db() ->
+ ?assertNot(aegis_server:init_db(?DB, [])),
+ ?assertEqual(1, meck:num_calls(?AEGIS_KEY_MANAGER, init_db, 2)).
+
+
+test_disabled_open_db() ->
+ ?assertNot(aegis_server:open_db(?DB)),
+ ?assertEqual(1, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)).
+
+
+test_disabled_encrypt() ->
+ Db = ?DB#{is_encrypted => aegis_server:open_db(?DB)},
+ Encrypted = aegis:encrypt(Db, <<1:64>>, ?VALUE),
+ ?assertEqual(?VALUE, Encrypted).
+
+
+test_disabled_decrypt() ->
+ Db = ?DB#{is_encrypted => aegis_server:open_db(?DB)},
+ Decrypted = aegis:decrypt(Db, <<1:64>>, ?ENCRYPTED),
+ ?assertEqual(?ENCRYPTED, Decrypted).
+
+
+
+lru_cache_with_expiration_test_() ->
+ {
+ foreach,
+ fun() ->
+ %% this has to be be set before start of aegis server
+ %% for config param "cache_expiration_check_sec" to be picked up
+ meck:new([config, aegis_server, fabric2_util], [passthrough]),
+ ok = meck:expect(config, get_integer, fun
+ ("aegis", "cache_limit", _) -> 5;
+ ("aegis", "cache_max_age_sec", _) -> 130;
+ ("aegis", "cache_expiration_check_sec", _) -> 1;
+ (_, _, Default) -> Default
+ end),
+ Ctx = setup(),
+ ok = meck:expect(fabric2_util, now, fun(sec) ->
+ get(time) == undefined andalso put(time, 10),
+ Now = get(time),
+ put(time, Now + 10),
+ Now
+ end),
+ Ctx
+ end,
+ fun teardown/1,
+ [
+ {"counter moves forward on access bump",
+ {timeout, ?TIMEOUT, fun test_advance_counter/0}},
+ {"oldest entries evicted",
+ {timeout, ?TIMEOUT, fun test_evict_old_entries/0}},
+ {"access bump preserves entries",
+ {timeout, ?TIMEOUT, fun test_bump_accessed/0}},
+ {"expired entries removed",
+ {timeout, ?TIMEOUT, fun test_remove_expired/0}}
+ ]
+ }.
+
+
+test_advance_counter() ->
+ ?assertEqual(0, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)),
+
+ ok = meck:expect(aegis_server, handle_cast, fun({accessed, _} = Msg, St) ->
+ #{counter := Counter} = St,
+ get(counter) == undefined andalso put(counter, 0),
+ OldCounter = get(counter),
+ put(counter, Counter),
+ ?assert(Counter > OldCounter),
+ meck:passthrough([Msg, St])
+ end),
+
+ lists:foreach(fun(I) ->
+ Db = ?DB#{uuid => <<I:64>>},
+ aegis_server:encrypt(Db, <<I:64>>, ?VALUE),
+ aegis_server:encrypt(Db, <<(I+1):64>>, ?VALUE)
+ end, lists:seq(1, 10)),
+
+ ?assertEqual(10, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)).
+
+
+test_evict_old_entries() ->
+ ?assertEqual(0, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)),
+
+ %% overflow cache
+ lists:foreach(fun(I) ->
+ Db = ?DB#{uuid => <<I:64>>},
+ aegis_server:encrypt(Db, <<I:64>>, ?VALUE)
+ end, lists:seq(1, 10)),
+
+ ?assertEqual(10, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)),
+
+ %% confirm that newest keys are still in cache
+ lists:foreach(fun(I) ->
+ Db = ?DB#{uuid => <<I:64>>},
+ aegis_server:encrypt(Db, <<(I+1):64>>, ?VALUE)
+ end, lists:seq(6, 10)),
+
+ ?assertEqual(10, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)),
+
+ %% confirm that oldest keys been eviced and needed re-fetch
+ lists:foreach(fun(I) ->
+ Db = ?DB#{uuid => <<I:64>>},
+ aegis_server:encrypt(Db, <<(I+1):64>>, ?VALUE)
+ end, lists:seq(1, 5)),
+
+ ?assertEqual(15, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)).
+
+
+test_bump_accessed() ->
+ ?assertEqual(0, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)),
+
+ %% fill the cache
+ lists:foreach(fun(I) ->
+ Db = ?DB#{uuid => <<I:64>>},
+ aegis_server:encrypt(Db, <<I:64>>, ?VALUE)
+ end, lists:seq(1, 5)),
+
+ ?assertEqual(5, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)),
+
+ %% bump oldest key and then insert a new key to trigger eviction
+ aegis_server:encrypt(?DB#{uuid => <<1:64>>}, <<1:64>>, ?VALUE),
+ aegis_server:encrypt(?DB#{uuid => <<6:64>>}, <<6:64>>, ?VALUE),
+ ?assertEqual(6, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)),
+
+ %% confirm that former oldest key is still in cache
+ aegis_server:encrypt(?DB#{uuid => <<1:64>>}, <<2:64>>, ?VALUE),
+ ?assertEqual(6, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)),
+
+ %% confirm that the second oldest key been evicted by the new insert
+ aegis_server:encrypt(?DB#{uuid => <<2:64>>}, <<3:64>>, ?VALUE),
+ ?assertEqual(7, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)).
+
+
+test_remove_expired() ->
+ ?assertEqual(0, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)),
+
+ %% to detect when maybe_remove_expired called
+ ok = meck:expect(aegis_server, handle_info,fun
+ (maybe_remove_expired, St) ->
+ meck:passthrough([maybe_remove_expired, St])
+ end),
+
+ %% fill the cache. first key expires a 140, last at 180 of "our" time
+ lists:foreach(fun(I) ->
+ Db = ?DB#{uuid => <<I:64>>},
+ aegis_server:encrypt(Db, <<I:64>>, ?VALUE)
+ end, lists:seq(1, 5)),
+
+ ?assertEqual(5, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)),
+
+ %% confirm enties are still in cache and wind up our "clock" to 160
+ lists:foreach(fun(I) ->
+ Db = ?DB#{uuid => <<I:64>>},
+ aegis_server:encrypt(Db, <<I:64>>, ?VALUE)
+ end, lists:seq(1, 5)),
+
+ ?assertEqual(5, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)),
+
+ %% wait for remove_expired_entries to be triggered
+ meck:reset(aegis_server),
+ meck:wait(aegis_server, handle_info, [maybe_remove_expired, '_'], 2500),
+
+ %% 3 "oldest" entries should be removed, 2 yet to expire still in cache
+ lists:foreach(fun(I) ->
+ Db = ?DB#{uuid => <<I:64>>},
+ aegis_server:encrypt(Db, <<I:64>>, ?VALUE)
+ end, lists:seq(1, 5)),
+
+ ?assertEqual(8, meck:num_calls(?AEGIS_KEY_MANAGER, open_db, 1)).
diff --git a/src/chttpd/src/chttpd.app.src b/src/chttpd/src/chttpd.app.src
index 3526745df..af330e0df 100644
--- a/src/chttpd/src/chttpd.app.src
+++ b/src/chttpd/src/chttpd.app.src
@@ -26,6 +26,7 @@
couch_stats,
config,
couch,
+ ctrace,
ets_lru,
fabric
]},
diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl
index adde0730f..fdca5c810 100644
--- a/src/chttpd/src/chttpd.erl
+++ b/src/chttpd/src/chttpd.erl
@@ -25,7 +25,7 @@
error_info/1, parse_form/1, json_body/1, json_body_obj/1, body/1,
doc_etag/1, make_etag/1, etag_respond/3, etag_match/2,
partition/1, serve_file/3, serve_file/4,
- server_header/0, start_chunked_response/3,send_chunk/2,
+ server_header/0, start_chunked_response/3,send_chunk/2,last_chunk/1,
start_response_length/4, send/2, start_json_response/2,
start_json_response/3, end_json_response/1, send_response/4,
send_response_no_cors/4,
@@ -52,8 +52,9 @@
req,
code,
headers,
- first_chunk,
- resp=nil
+ chunks,
+ resp=nil,
+ buffer_response=false
}).
start_link() ->
@@ -238,6 +239,8 @@ handle_request_int(MochiReq) ->
erlang:put(dont_log_request, true),
erlang:put(dont_log_response, true),
+ maybe_trace_fdb(MochiReq:get_header_value("x-couchdb-fdb-trace")),
+
{HttpReq2, Response} = case before_request(HttpReq0) of
{ok, HttpReq1} ->
process_request(HttpReq1);
@@ -257,6 +260,7 @@ handle_request_int(MochiReq) ->
case after_request(HttpReq2, HttpResp) of
#httpd_resp{status = ok, response = Resp} ->
+ span_ok(HttpResp),
{ok, Resp};
#httpd_resp{status = aborted, reason = Reason} ->
couch_log:error("Response abnormally terminated: ~p", [Reason]),
@@ -264,9 +268,11 @@ handle_request_int(MochiReq) ->
end.
before_request(HttpReq) ->
+ ctrace:is_enabled() andalso start_span(HttpReq),
try
- chttpd_stats:init(),
- chttpd_plugin:before_request(HttpReq)
+ {ok, HttpReq1} = chttpd_plugin:before_request(HttpReq),
+ chttpd_stats:init(HttpReq1),
+ {ok, HttpReq1}
catch Tag:Error ->
{error, catch_error(HttpReq, Tag, Error)}
end.
@@ -281,7 +287,7 @@ after_request(HttpReq, HttpResp0) ->
{ok, HttpResp0#httpd_resp{status = aborted}}
end,
HttpResp2 = update_stats(HttpReq, HttpResp1),
- chttpd_stats:report(HttpReq, HttpResp2),
+ chttpd_stats:report(HttpResp2),
maybe_log(HttpReq, HttpResp2),
HttpResp2.
@@ -314,6 +320,8 @@ process_request(#httpd{mochi_req = MochiReq} = HttpReq) ->
end.
handle_req_after_auth(HandlerKey, HttpReq) ->
+ #httpd{user_ctx = #user_ctx{name = User}} = HttpReq,
+ ctrace:tag(#{user => User}),
try
HandlerFun = chttpd_handlers:url_handler(HandlerKey,
fun chttpd_db:handle_request/1),
@@ -350,6 +358,10 @@ catch_error(HttpReq, throw, Error) ->
send_error(HttpReq, Error);
catch_error(HttpReq, error, database_does_not_exist) ->
send_error(HttpReq, database_does_not_exist);
+catch_error(HttpReq, error, decryption_failed) ->
+ send_error(HttpReq, decryption_failed);
+catch_error(HttpReq, error, not_ciphertext) ->
+ send_error(HttpReq, not_ciphertext);
catch_error(HttpReq, Tag, Error) ->
Stack = erlang:get_stacktrace(),
% TODO improve logging and metrics collection for client disconnects
@@ -412,8 +424,7 @@ possibly_hack(#httpd{path_parts=[<<"_replicate">>]}=Req) ->
{Props0} = chttpd:json_body_obj(Req),
Props1 = fix_uri(Req, Props0, <<"source">>),
Props2 = fix_uri(Req, Props1, <<"target">>),
- put(post_body, {Props2}),
- Req;
+ Req#httpd{req_body={Props2}};
possibly_hack(Req) ->
Req.
@@ -666,13 +677,16 @@ body(#httpd{mochi_req=MochiReq, req_body=ReqBody}) ->
validate_ctype(Req, Ctype) ->
couch_httpd:validate_ctype(Req, Ctype).
-json_body(Httpd) ->
+json_body(#httpd{req_body=undefined} = Httpd) ->
case body(Httpd) of
undefined ->
throw({bad_request, "Missing request body"});
Body ->
?JSON_DECODE(maybe_decompress(Httpd, Body))
- end.
+ end;
+
+json_body(#httpd{req_body=ReqBody}) ->
+ ReqBody.
json_body_obj(Httpd) ->
case json_body(Httpd) of
@@ -745,7 +759,14 @@ start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers0) ->
send_chunk({remote, _Pid, _Ref} = Resp, Data) ->
couch_httpd:send_chunk(Resp, Data);
send_chunk(Resp, Data) ->
- Resp:write_chunk(Data),
+ case iolist_size(Data) of
+ 0 -> ok; % do nothing
+ _ -> Resp:write_chunk(Data)
+ end,
+ {ok, Resp}.
+
+last_chunk(Resp) ->
+ Resp:write_chunk([]),
{ok, Resp}.
send_response(Req, Code, Headers0, Body) ->
@@ -780,40 +801,54 @@ start_json_response(Req, Code, Headers0) ->
end_json_response(Resp) ->
couch_httpd:end_json_response(Resp).
+
start_delayed_json_response(Req, Code) ->
start_delayed_json_response(Req, Code, []).
+
start_delayed_json_response(Req, Code, Headers) ->
start_delayed_json_response(Req, Code, Headers, "").
+
start_delayed_json_response(Req, Code, Headers, FirstChunk) ->
{ok, #delayed_resp{
start_fun = fun start_json_response/3,
req = Req,
code = Code,
headers = Headers,
- first_chunk = FirstChunk}}.
+ chunks = [FirstChunk],
+ buffer_response = buffer_response(Req)}}.
+
start_delayed_chunked_response(Req, Code, Headers) ->
start_delayed_chunked_response(Req, Code, Headers, "").
+
start_delayed_chunked_response(Req, Code, Headers, FirstChunk) ->
{ok, #delayed_resp{
start_fun = fun start_chunked_response/3,
req = Req,
code = Code,
headers = Headers,
- first_chunk = FirstChunk}}.
+ chunks = [FirstChunk],
+ buffer_response = buffer_response(Req)}}.
+
-send_delayed_chunk(#delayed_resp{}=DelayedResp, Chunk) ->
+send_delayed_chunk(#delayed_resp{buffer_response=false}=DelayedResp, Chunk) ->
{ok, #delayed_resp{resp=Resp}=DelayedResp1} =
start_delayed_response(DelayedResp),
{ok, Resp} = send_chunk(Resp, Chunk),
- {ok, DelayedResp1}.
+ {ok, DelayedResp1};
+
+send_delayed_chunk(#delayed_resp{buffer_response=true}=DelayedResp, Chunk) ->
+ #delayed_resp{chunks = Chunks} = DelayedResp,
+ {ok, DelayedResp#delayed_resp{chunks = [Chunk | Chunks]}}.
+
send_delayed_last_chunk(Req) ->
send_delayed_chunk(Req, []).
+
send_delayed_error(#delayed_resp{req=Req,resp=nil}=DelayedResp, Reason) ->
{Code, ErrorStr, ReasonStr} = error_info(Reason),
{ok, Resp} = send_error(Req, Code, ErrorStr, ReasonStr),
@@ -823,6 +858,7 @@ send_delayed_error(#delayed_resp{resp=Resp, req=Req}, Reason) ->
log_error_with_stack_trace(Reason),
throw({http_abort, Resp, Reason}).
+
close_delayed_json_object(Resp, Buffer, Terminator, 0) ->
% Use a separate chunk to close the streamed array to maintain strict
% compatibility with earlier versions. See COUCHDB-2724
@@ -831,10 +867,22 @@ close_delayed_json_object(Resp, Buffer, Terminator, 0) ->
close_delayed_json_object(Resp, Buffer, Terminator, _Threshold) ->
send_delayed_chunk(Resp, [Buffer | Terminator]).
-end_delayed_json_response(#delayed_resp{}=DelayedResp) ->
+
+end_delayed_json_response(#delayed_resp{buffer_response=false}=DelayedResp) ->
{ok, #delayed_resp{resp=Resp}} =
start_delayed_response(DelayedResp),
- end_json_response(Resp).
+ end_json_response(Resp);
+
+end_delayed_json_response(#delayed_resp{buffer_response=true}=DelayedResp) ->
+ #delayed_resp{
+ req = Req,
+ code = Code,
+ headers = Headers,
+ chunks = Chunks
+ } = DelayedResp,
+ {ok, Resp} = start_response_length(Req, Code, Headers, iolist_size(Chunks)),
+ send(Resp, lists:reverse(Chunks)).
+
get_delayed_req(#delayed_resp{req=#httpd{mochi_req=MochiReq}}) ->
MochiReq;
@@ -847,7 +895,7 @@ start_delayed_response(#delayed_resp{resp=nil}=DelayedResp) ->
req=Req,
code=Code,
headers=Headers,
- first_chunk=FirstChunk
+ chunks=[FirstChunk]
}=DelayedResp,
{ok, Resp} = StartFun(Req, Code, Headers),
case FirstChunk of
@@ -858,6 +906,18 @@ start_delayed_response(#delayed_resp{resp=nil}=DelayedResp) ->
start_delayed_response(#delayed_resp{}=DelayedResp) ->
{ok, DelayedResp}.
+
+buffer_response(Req) ->
+ case chttpd:qs_value(Req, "buffer_response") of
+ "false" ->
+ false;
+ "true" ->
+ true;
+ _ ->
+ config:get_boolean("chttpd", "buffer_response", false)
+ end.
+
+
error_info({Error, Reason}) when is_list(Reason) ->
error_info({Error, couch_util:to_binary(Reason)});
error_info(bad_request) ->
@@ -930,12 +990,18 @@ error_info({error, {database_name_too_long, DbName}}) ->
<<"At least one path segment of `", DbName/binary, "` is too long.">>};
error_info({doc_validation, Reason}) ->
{400, <<"doc_validation">>, Reason};
+error_info({invalid_since_seq, Reason}) ->
+ {400, <<"invalid_since_seq">>, Reason};
error_info({missing_stub, Reason}) ->
{412, <<"missing_stub">>, Reason};
error_info(request_entity_too_large) ->
{413, <<"too_large">>, <<"the request entity is too large">>};
error_info({request_entity_too_large, {attachment, AttName}}) ->
{413, <<"attachment_too_large">>, AttName};
+error_info({request_entity_too_large, {bulk_docs, Max}}) when is_integer(Max) ->
+ {413, <<"max_bulk_docs_count_exceeded">>, integer_to_binary(Max)};
+error_info({request_entity_too_large, {bulk_get, Max}}) when is_integer(Max) ->
+ {413, <<"max_bulk_get_count_exceeded">>, integer_to_binary(Max)};
error_info({request_entity_too_large, DocID}) ->
{413, <<"document_too_large">>, DocID};
error_info({error, security_migration_updates_disabled}) ->
@@ -949,6 +1015,10 @@ error_info(not_implemented) ->
error_info(timeout) ->
{500, <<"timeout">>, <<"The request could not be processed in a reasonable"
" amount of time.">>};
+error_info(decryption_failed) ->
+ {500, <<"decryption_failed">>, <<"Decryption failed">>};
+error_info(not_ciphertext) ->
+ {500, <<"not_ciphertext">>, <<"Not Ciphertext">>};
error_info({service_unavailable, Reason}) ->
{503, <<"service unavailable">>, Reason};
error_info({timeout, _Reason}) ->
@@ -970,6 +1040,8 @@ maybe_handle_error(Error) ->
Result;
{Err, Reason} ->
{500, couch_util:to_binary(Err), couch_util:to_binary(Reason)};
+ normal ->
+ exit(normal);
Error ->
{500, <<"unknown_error">>, couch_util:to_binary(Error)}
end.
@@ -1043,16 +1115,20 @@ send_error(#httpd{} = Req, Code, ErrorStr, ReasonStr) ->
send_error(Req, Code, [], ErrorStr, ReasonStr, []).
send_error(Req, Code, Headers, ErrorStr, ReasonStr, []) ->
- send_json(Req, Code, Headers,
+ Return = send_json(Req, Code, Headers,
{[{<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr}]});
+ {<<"reason">>, ReasonStr}]}),
+ span_error(Code, ErrorStr, ReasonStr, []),
+ Return;
send_error(Req, Code, Headers, ErrorStr, ReasonStr, Stack) ->
log_error_with_stack_trace({ErrorStr, ReasonStr, Stack}),
- send_json(Req, Code, [stack_trace_id(Stack) | Headers],
+ Return = send_json(Req, Code, [stack_trace_id(Stack) | Headers],
{[{<<"error">>, ErrorStr},
{<<"reason">>, ReasonStr} |
case Stack of [] -> []; _ -> [{<<"ref">>, stack_hash(Stack)}] end
- ]}).
+ ]}),
+ span_error(Code, ErrorStr, ReasonStr, Stack),
+ Return.
update_timeout_stats(<<"timeout">>, #httpd{requested_path_parts = PathParts}) ->
update_timeout_stats(PathParts);
@@ -1206,6 +1282,126 @@ get_user(#httpd{user_ctx = #user_ctx{name = User}}) ->
get_user(#httpd{user_ctx = undefined}) ->
"undefined".
+maybe_trace_fdb("true") ->
+ % Remember to also enable tracing in erlfdb application environment:
+ % network_options = [{trace_enable, ...}]
+ % Or via the OS environment variable:
+ % FDB_NETWORK_OPTION_TRACE_ENABLE = ""
+ case config:get_boolean("fabric", "fdb_trace", false) of
+ true ->
+ Nonce = erlang:get(nonce),
+ erlang:put(erlfdb_trace, list_to_binary(Nonce));
+ false ->
+ ok
+ end;
+maybe_trace_fdb(_) ->
+ ok.
+
+start_span(Req) ->
+ #httpd{
+ mochi_req = MochiReq,
+ begin_ts = Begin,
+ peer = Peer,
+ nonce = Nonce,
+ method = Method,
+ path_parts = PathParts
+ } = Req,
+ {OperationName, ExtraTags} = get_action(Req),
+ Path = case PathParts of
+ [] -> <<"">>;
+ [_ | _] -> filename:join(PathParts)
+ end,
+ {IsExternalSpan, RootOptions} = root_span_options(MochiReq),
+ Tags = maps:merge(#{
+ peer => Peer,
+ 'http.method' => Method,
+ nonce => Nonce,
+ 'http.url' => MochiReq:get(raw_path),
+ path_parts => Path,
+ 'span.kind' => <<"server">>,
+ component => <<"couchdb.chttpd">>,
+ external => IsExternalSpan
+ }, ExtraTags),
+
+ ctrace:start_span(OperationName, [
+ {tags, Tags},
+ {time, Begin}
+ ] ++ RootOptions).
+
+root_span_options(MochiReq) ->
+ case get_trace_headers(MochiReq) of
+ [undefined, _, _] ->
+ {false, []};
+ [TraceId, SpanId, ParentSpanId] ->
+ Span = ctrace:external_span(TraceId, SpanId, ParentSpanId),
+ {true, [{root, Span}]}
+ end.
+
+parse_trace_id(undefined) ->
+ undefined;
+parse_trace_id(Hex) ->
+ to_int(Hex, 32).
+
+parse_span_id(undefined) ->
+ undefined;
+parse_span_id(Hex) ->
+ to_int(Hex, 16).
+
+to_int(Hex, N) when length(Hex) =:= N ->
+ try
+ list_to_integer(Hex, 16)
+ catch error:badarg ->
+ undefined
+ end.
+
+get_trace_headers(MochiReq) ->
+ case MochiReq:get_header_value("b3") of
+ undefined ->
+ [
+ parse_trace_id(MochiReq:get_header_value("X-B3-TraceId")),
+ parse_span_id(MochiReq:get_header_value("X-B3-SpanId")),
+ parse_span_id(MochiReq:get_header_value("X-B3-ParentSpanId"))
+ ];
+ Value ->
+ case string:split(Value, "-", all) of
+ [TraceIdStr, SpanIdStr, _SampledStr, ParentSpanIdStr] ->
+ [
+ parse_trace_id(TraceIdStr),
+ parse_span_id(SpanIdStr),
+ parse_span_id(ParentSpanIdStr)
+ ];
+ _ ->
+ [undefined, undefined, undefined]
+ end
+ end.
+
+get_action(#httpd{} = Req) ->
+ try
+ chttpd_handlers:handler_info(Req)
+ catch Tag:Error ->
+ couch_log:error("Cannot set tracing action ~p:~p", [Tag, Error]),
+ {undefind, #{}}
+ end.
+
+span_ok(#httpd_resp{code = Code}) ->
+ ctrace:tag(#{
+ error => false,
+ 'http.status_code' => Code
+ }),
+ ctrace:finish_span().
+
+span_error(Code, ErrorStr, ReasonStr, Stack) ->
+ ctrace:tag(#{
+ error => true,
+ 'http.status_code' => Code
+ }),
+ ctrace:log(#{
+ 'error.kind' => ErrorStr,
+ message => ReasonStr,
+ stack => Stack
+ }),
+ ctrace:finish_span().
+
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/chttpd/src/chttpd_app.erl b/src/chttpd/src/chttpd_app.erl
index d7a5aef86..770b78ef9 100644
--- a/src/chttpd/src/chttpd_app.erl
+++ b/src/chttpd/src/chttpd_app.erl
@@ -14,8 +14,8 @@
-behaviour(application).
-export([start/2, stop/1]).
-start(_Type, StartArgs) ->
- chttpd_sup:start_link(StartArgs).
+start(_Type, _StartArgs) ->
+ chttpd_sup:start_link().
stop(_State) ->
ok.
diff --git a/src/chttpd/src/chttpd_auth_cache.erl b/src/chttpd/src/chttpd_auth_cache.erl
index fdae27b79..c5a56bddb 100644
--- a/src/chttpd/src/chttpd_auth_cache.erl
+++ b/src/chttpd/src/chttpd_auth_cache.erl
@@ -12,16 +12,19 @@
-module(chttpd_auth_cache).
-behaviour(gen_server).
+-behaviour(config_listener).
-export([start_link/0, get_user_creds/2, update_user_creds/3]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
code_change/3]).
-export([listen_for_changes/1, changes_callback/2]).
+-export([handle_config_change/5, handle_config_terminate/3]).
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_js_functions.hrl").
-define(CACHE, chttpd_auth_cache_lru).
+-define(RELISTEN_DELAY, 5000).
-record(state, {
changes_pid,
@@ -52,7 +55,8 @@ get_user_creds(_Req, UserName) when is_binary(UserName) ->
update_user_creds(_Req, UserDoc, _Ctx) ->
{_, Ref} = spawn_monitor(fun() ->
- case fabric:update_doc(dbname(), UserDoc, []) of
+ {ok, Db} = fabric2_db:open(dbname(), [?ADMIN_CTX]),
+ case fabric2_db:update_doc(Db, UserDoc) of
{ok, _} ->
exit(ok);
Else ->
@@ -100,9 +104,28 @@ maybe_increment_auth_cache_miss(UserName) ->
%% gen_server callbacks
init([]) ->
+ ensure_auth_db(),
+ ok = config:listen_for_changes(?MODULE, nil),
self() ! {start_listener, 0},
{ok, #state{}}.
+handle_call(reinit_cache, _From, State) ->
+ #state{
+ changes_pid = Pid
+ } = State,
+
+ % The database may currently be cached. This
+ % ensures that we've removed it so that the
+ % system db callbacks are installed.
+ fabric2_server:remove(dbname()),
+
+ ensure_auth_db(),
+ ets_lru:clear(?CACHE),
+ exit(Pid, shutdown),
+ self() ! {start_listener, 0},
+
+ {reply, ok, State#state{changes_pid = undefined}};
+
handle_call(_Call, _From, State) ->
{noreply, State}.
@@ -124,6 +147,9 @@ handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid=Pid} = State) ->
{noreply, State#state{last_seq=Seq}};
handle_info({start_listener, Seq}, State) ->
{noreply, State#state{changes_pid = spawn_changes(Seq)}};
+handle_info(restart_config_listener, State) ->
+ ok = config:listen_for_changes(?MODULE, nil),
+ {noreply, State};
handle_info(_Msg, State) ->
{noreply, State}.
@@ -142,7 +168,8 @@ spawn_changes(Since) ->
Pid.
listen_for_changes(Since) ->
- ensure_auth_ddoc_exists(dbname(), <<"_design/_auth">>),
+ {ok, Db} = fabric2_db:open(dbname(), [?ADMIN_CTX]),
+ ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
CBFun = fun ?MODULE:changes_callback/2,
Args = #changes_args{
feed = "continuous",
@@ -150,7 +177,8 @@ listen_for_changes(Since) ->
heartbeat = true,
filter = {default, main_only}
},
- fabric:changes(dbname(), CBFun, Since, Args).
+ ChangesFun = chttpd_changes:handle_db_changes(Args, nil, Db),
+ ChangesFun({CBFun, Since}).
changes_callback(waiting_for_updates, Acc) ->
{ok, Acc};
@@ -159,7 +187,7 @@ changes_callback(start, Since) ->
changes_callback({stop, EndSeq, _Pending}, _) ->
exit({seq, EndSeq});
changes_callback({change, {Change}}, _) ->
- case couch_util:get_value(id, Change) of
+ case couch_util:get_value(<<"id">>, Change) of
<<"_design/", _/binary>> ->
ok;
DocId ->
@@ -168,13 +196,27 @@ changes_callback({change, {Change}}, _) ->
ets_lru:remove(?CACHE, UserName)
end,
{ok, couch_util:get_value(seq, Change)};
-changes_callback(timeout, Acc) ->
+changes_callback({timeout, _ResponseType}, Acc) ->
{ok, Acc};
changes_callback({error, _}, EndSeq) ->
exit({seq, EndSeq}).
+
+handle_config_change("chttpd_auth", "authentication_db", _DbName, _, _) ->
+ {ok, gen_server:call(?MODULE, reinit_cache, infinity)};
+handle_config_change(_, _, _, _, _) ->
+ {ok, nil}.
+
+handle_config_terminate(_, stop, _) ->
+ ok;
+handle_config_terminate(_Server, _Reason, _State) ->
+ Dst = whereis(?MODULE),
+ erlang:send_after(?RELISTEN_DELAY, Dst, restart_config_listener).
+
+
load_user_from_db(UserName) ->
- try fabric:open_doc(dbname(), docid(UserName), [?ADMIN_CTX, ejson_body, conflicts]) of
+ {ok, Db} = fabric2_db:open(dbname(), [?ADMIN_CTX]),
+ try fabric2_db:open_doc(Db, docid(UserName), [conflicts]) of
{ok, Doc} ->
{Props} = couch_doc:to_json_obj(Doc, []),
Props;
@@ -185,8 +227,21 @@ load_user_from_db(UserName) ->
nil
end.
+
+ensure_auth_db() ->
+ try
+ fabric2_db:open(dbname(), [?ADMIN_CTX])
+ catch error:database_does_not_exist ->
+ case fabric2_db:create(dbname(), [?ADMIN_CTX]) of
+ {ok, _} -> ok;
+ {error, file_exists} -> ok
+ end
+ end.
+
+
dbname() ->
- config:get("chttpd_auth", "authentication_db", "_users").
+ DbNameStr = config:get("chttpd_auth", "authentication_db", "_users"),
+ iolist_to_binary(DbNameStr).
docid(UserName) ->
<<"org.couchdb.user:", UserName/binary>>.
@@ -194,11 +249,11 @@ docid(UserName) ->
username(<<"org.couchdb.user:", UserName/binary>>) ->
UserName.
-ensure_auth_ddoc_exists(DbName, DDocId) ->
- case fabric:open_doc(DbName, DDocId, [?ADMIN_CTX, ejson_body]) of
+ensure_auth_ddoc_exists(Db, DDocId) ->
+ case fabric2_db:open_doc(Db, DDocId) of
{not_found, _Reason} ->
{ok, AuthDesign} = couch_auth_cache:auth_design_doc(DDocId),
- update_doc_ignoring_conflict(DbName, AuthDesign, [?ADMIN_CTX]);
+ update_doc_ignoring_conflict(Db, AuthDesign);
{ok, Doc} ->
{Props} = couch_doc:to_json_obj(Doc, []),
case couch_util:get_value(<<"validate_doc_update">>, Props, []) of
@@ -208,19 +263,20 @@ ensure_auth_ddoc_exists(DbName, DDocId) ->
Props1 = lists:keyreplace(<<"validate_doc_update">>, 1, Props,
{<<"validate_doc_update">>,
?AUTH_DB_DOC_VALIDATE_FUNCTION}),
- update_doc_ignoring_conflict(DbName, couch_doc:from_json_obj({Props1}), [?ADMIN_CTX])
+ NewDoc = couch_doc:from_json_obj({Props1}),
+ update_doc_ignoring_conflict(Db, NewDoc)
end;
{error, Reason} ->
- couch_log:notice("Failed to ensure auth ddoc ~s/~s exists for reason: ~p", [DbName, DDocId, Reason]),
+ couch_log:notice("Failed to ensure auth ddoc ~s/~s exists for reason: ~p", [dbname(), DDocId, Reason]),
ok
end,
ok.
-update_doc_ignoring_conflict(DbName, Doc, Options) ->
+update_doc_ignoring_conflict(DbName, Doc) ->
try
- fabric:update_doc(DbName, Doc, Options)
+ fabric2_db:update_doc(DbName, Doc)
catch
- throw:conflict ->
+ error:conflict ->
ok
end.
diff --git a/src/chttpd/src/chttpd_auth_request.erl b/src/chttpd/src/chttpd_auth_request.erl
index 8040f91fd..3f6f97602 100644
--- a/src/chttpd/src/chttpd_auth_request.erl
+++ b/src/chttpd/src/chttpd_auth_request.erl
@@ -106,8 +106,8 @@ server_authorization_check(#httpd{path_parts=[<<"_node">>,_ , <<"_system">>|_]}=
server_authorization_check(#httpd{path_parts=[<<"_", _/binary>>|_]}=Req) ->
require_admin(Req).
-db_authorization_check(#httpd{path_parts=[DbName|_],user_ctx=Ctx}=Req) ->
- {_} = fabric:get_security(DbName, [{user_ctx, Ctx}]),
+db_authorization_check(#httpd{path_parts=[_DbName|_]}=Req) ->
+ % Db authorization checks are performed in fabric before every FDB operation
Req.
@@ -125,8 +125,8 @@ require_admin(Req) ->
Req.
require_db_admin(#httpd{path_parts=[DbName|_],user_ctx=Ctx}=Req) ->
- Sec = fabric:get_security(DbName, [{user_ctx, Ctx}]),
-
+ {ok, Db} = fabric2_db:open(DbName, [{user_ctx, Ctx}]),
+ Sec = fabric2_db:get_security(Db),
case is_db_admin(Ctx,Sec) of
true -> Req;
false -> throw({unauthorized, <<"You are not a server or db admin.">>})
diff --git a/src/chttpd/src/chttpd_changes.erl b/src/chttpd/src/chttpd_changes.erl
new file mode 100644
index 000000000..45c7d57b9
--- /dev/null
+++ b/src/chttpd/src/chttpd_changes.erl
@@ -0,0 +1,760 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_changes).
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+
+-export([
+ handle_db_changes/3,
+ get_changes_timeout/2,
+ wait_updated/3,
+ get_rest_updated/1,
+ configure_filter/4,
+ filter/3,
+ handle_db_event/3,
+ handle_view_event/3,
+ send_changes_doc_ids/6,
+ send_changes_design_docs/6
+]).
+
+-export([changes_enumerator/2]).
+
+%% export so we can use fully qualified call to facilitate hot-code upgrade
+-export([
+ keep_sending_changes/3
+]).
+
+-record(changes_acc, {
+ db,
+ seq,
+ prepend,
+ filter,
+ callback,
+ user_acc,
+ resp_type,
+ limit,
+ include_docs,
+ doc_options,
+ conflicts,
+ timeout,
+ timeout_fun,
+ aggregation_kvs,
+ aggregation_results
+}).
+
+handle_db_changes(Args0, Req, Db0) ->
+ #changes_args{
+ style = Style,
+ filter = FilterName,
+ feed = Feed,
+ dir = Dir,
+ since = Since
+ } = Args0,
+ Filter = configure_filter(FilterName, Style, Req, Db0),
+ Args = Args0#changes_args{filter_fun = Filter},
+ DbName = fabric2_db:name(Db0),
+ StartListenerFun = fun() ->
+ fabric2_events:link_listener(
+ ?MODULE, handle_db_event, self(), [{dbname, DbName}]
+ )
+ end,
+ Start = fun() ->
+ StartSeq = case Dir =:= rev orelse Since =:= now of
+ true -> fabric2_db:get_update_seq(Db0);
+ false -> Since
+ end,
+ {Db0, StartSeq}
+ end,
+ % begin timer to deal with heartbeat when filter function fails
+ case Args#changes_args.heartbeat of
+ undefined ->
+ erlang:erase(last_changes_heartbeat);
+ Val when is_integer(Val); Val =:= true ->
+ put(last_changes_heartbeat, os:timestamp())
+ end,
+
+ case lists:member(Feed, ["continuous", "longpoll", "eventsource"]) of
+ true ->
+ fun(CallbackAcc) ->
+ {Callback, UserAcc} = get_callback_acc(CallbackAcc),
+ {ok, Listener} = StartListenerFun(),
+
+ {Db, StartSeq} = Start(),
+ UserAcc2 = start_sending_changes(Callback, UserAcc),
+ {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
+ Acc0 = build_acc(Args, Callback, UserAcc2, Db, StartSeq,
+ <<"">>, Timeout, TimeoutFun),
+ try
+ keep_sending_changes(
+ Args#changes_args{dir=fwd},
+ Acc0,
+ true)
+ after
+ fabric2_events:stop_listener(Listener),
+ get_rest_updated(ok) % clean out any remaining update messages
+ end
+ end;
+ false ->
+ fun(CallbackAcc) ->
+ {Callback, UserAcc} = get_callback_acc(CallbackAcc),
+ UserAcc2 = start_sending_changes(Callback, UserAcc),
+ {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
+ {Db, StartSeq} = Start(),
+ Acc0 = build_acc(Args#changes_args{feed="normal"}, Callback,
+ UserAcc2, Db, StartSeq, <<>>,
+ Timeout, TimeoutFun),
+ {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} =
+ send_changes(
+ Acc0,
+ Dir,
+ true),
+ end_sending_changes(Callback, UserAcc3, LastSeq)
+ end
+ end.
+
+
+handle_db_event(_DbName, updated, Parent) ->
+ Parent ! updated,
+ {ok, Parent};
+handle_db_event(_DbName, deleted, Parent) ->
+ Parent ! deleted,
+ {ok, Parent};
+handle_db_event(_DbName, _Event, Parent) ->
+ {ok, Parent}.
+
+
+handle_view_event(_DbName, Msg, {Parent, DDocId}) ->
+ case Msg of
+ {index_commit, DDocId} ->
+ Parent ! updated;
+ {index_delete, DDocId} ->
+ Parent ! deleted;
+ _ ->
+ ok
+ end,
+ {ok, {Parent, DDocId}}.
+
+get_callback_acc({Callback, _UserAcc} = Pair) when is_function(Callback, 2) ->
+ Pair;
+get_callback_acc(Callback) when is_function(Callback, 1) ->
+ {fun(Ev, _) -> Callback(Ev) end, ok}.
+
+
+configure_filter(Filter, _Style, _Req, _Db) when is_tuple(Filter) ->
+ % Filter has already been configured
+ Filter;
+configure_filter("_doc_ids", Style, Req, _Db) ->
+ {doc_ids, Style, get_doc_ids(Req)};
+configure_filter("_selector", Style, Req, _Db) ->
+ {selector, Style, get_selector_and_fields(Req)};
+configure_filter("_design", Style, _Req, _Db) ->
+ {design_docs, Style};
+configure_filter("_view", Style, Req, Db) ->
+ ViewName = get_view_qs(Req),
+ if ViewName /= "" -> ok; true ->
+ throw({bad_request, "`view` filter parameter is not provided."})
+ end,
+ ViewNameParts = string:tokens(ViewName, "/"),
+ case [?l2b(couch_httpd:unquote(Part)) || Part <- ViewNameParts] of
+ [DName, VName] ->
+ {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
+ check_member_exists(DDoc, [<<"views">>, VName]),
+ case fabric2_db:is_clustered(Db) of
+ true ->
+ DIR = fabric_util:doc_id_and_rev(DDoc),
+ {fetch, view, Style, DIR, VName};
+ false ->
+ {view, Style, DDoc, VName}
+ end;
+ [] ->
+ Msg = "`view` must be of the form `designname/viewname`",
+ throw({bad_request, Msg})
+ end;
+configure_filter([$_ | _], _Style, _Req, _Db) ->
+ throw({bad_request, "unknown builtin filter name"});
+configure_filter("", main_only, _Req, _Db) ->
+ {default, main_only};
+configure_filter("", all_docs, _Req, _Db) ->
+ {default, all_docs};
+configure_filter(FilterName, Style, Req, Db) ->
+ FilterNameParts = string:tokens(FilterName, "/"),
+ case [?l2b(couch_httpd:unquote(Part)) || Part <- FilterNameParts] of
+ [DName, FName] ->
+ {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
+ check_member_exists(DDoc, [<<"filters">>, FName]),
+ {custom, Style, Req, DDoc, FName};
+ [] ->
+ {default, Style};
+ _Else ->
+ Msg = "`filter` must be of the form `designname/filtername`",
+ throw({bad_request, Msg})
+ end.
+
+
+filter(Db, Change, {default, Style}) ->
+ apply_style(Db, Change, Style);
+filter(Db, Change, {doc_ids, Style, DocIds}) ->
+ case lists:member(maps:get(id, Change), DocIds) of
+ true ->
+ apply_style(Db, Change, Style);
+ false ->
+ []
+ end;
+filter(Db, Change, {selector, Style, {Selector, _Fields}}) ->
+ Docs = open_revs(Db, Change, Style),
+ Passes = [mango_selector:match(Selector, couch_doc:to_json_obj(Doc, []))
+ || Doc <- Docs],
+ filter_revs(Passes, Docs);
+filter(Db, Change, {design_docs, Style}) ->
+ case maps:get(id, Change) of
+ <<"_design", _/binary>> ->
+ apply_style(Db, Change, Style);
+ _ ->
+ []
+ end;
+filter(Db, Change, {view, Style, DDoc, VName}) ->
+ Docs = open_revs(Db, Change, Style),
+ {ok, Passes} = couch_query_servers:filter_view(DDoc, VName, Docs),
+ filter_revs(Passes, Docs);
+filter(Db, Change, {custom, Style, Req0, DDoc, FName}) ->
+ Req = case Req0 of
+ {json_req, _} -> Req0;
+ #httpd{} -> {json_req, chttpd_external:json_req_obj(Req0, Db)}
+ end,
+ Docs = open_revs(Db, Change, Style),
+ {ok, Passes} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs),
+ filter_revs(Passes, Docs);
+filter(Db, Change, Filter) ->
+ erlang:error({filter_error, Db, Change, Filter}).
+
+
+get_view_qs({json_req, {Props}}) ->
+ {Query} = couch_util:get_value(<<"query">>, Props, {[]}),
+ binary_to_list(couch_util:get_value(<<"view">>, Query, ""));
+get_view_qs(Req) ->
+ couch_httpd:qs_value(Req, "view", "").
+
+get_doc_ids({json_req, {Props}}) ->
+ check_docids(couch_util:get_value(<<"doc_ids">>, Props));
+get_doc_ids(#httpd{method='POST'}=Req) ->
+ couch_httpd:validate_ctype(Req, "application/json"),
+ {Props} = couch_httpd:json_body_obj(Req),
+ check_docids(couch_util:get_value(<<"doc_ids">>, Props));
+get_doc_ids(#httpd{method='GET'}=Req) ->
+ DocIds = ?JSON_DECODE(couch_httpd:qs_value(Req, "doc_ids", "null")),
+ check_docids(DocIds);
+get_doc_ids(_) ->
+ throw({bad_request, no_doc_ids_provided}).
+
+
+get_selector_and_fields({json_req, {Props}}) ->
+ Selector = check_selector(couch_util:get_value(<<"selector">>, Props)),
+ Fields = check_fields(couch_util:get_value(<<"fields">>, Props, nil)),
+ {Selector, Fields};
+get_selector_and_fields(#httpd{method='POST'}=Req) ->
+ couch_httpd:validate_ctype(Req, "application/json"),
+ get_selector_and_fields({json_req, couch_httpd:json_body_obj(Req)});
+get_selector_and_fields(_) ->
+ throw({bad_request, "Selector must be specified in POST payload"}).
+
+
+check_docids(DocIds) when is_list(DocIds) ->
+ lists:foreach(fun
+ (DocId) when not is_binary(DocId) ->
+ Msg = "`doc_ids` filter parameter is not a list of doc ids.",
+ throw({bad_request, Msg});
+ (_) -> ok
+ end, DocIds),
+ DocIds;
+check_docids(_) ->
+ Msg = "`doc_ids` filter parameter is not a list of doc ids.",
+ throw({bad_request, Msg}).
+
+
+check_selector(Selector={_}) ->
+ try
+ mango_selector:normalize(Selector)
+ catch
+ {mango_error, Mod, Reason0} ->
+ {_StatusCode, _Error, Reason} = mango_error:info(Mod, Reason0),
+ throw({bad_request, Reason})
+ end;
+check_selector(_Selector) ->
+ throw({bad_request, "Selector error: expected a JSON object"}).
+
+
+check_fields(nil) ->
+ nil;
+check_fields(Fields) when is_list(Fields) ->
+ try
+ {ok, Fields1} = mango_fields:new(Fields),
+ Fields1
+ catch
+ {mango_error, Mod, Reason0} ->
+ {_StatusCode, _Error, Reason} = mango_error:info(Mod, Reason0),
+ throw({bad_request, Reason})
+ end;
+check_fields(_Fields) ->
+ throw({bad_request, "Selector error: fields must be JSON array"}).
+
+
+open_ddoc(Db, DDocId) ->
+ case fabric2_db:open_doc(Db, DDocId, [ejson_body, ?ADMIN_CTX]) of
+ {ok, _} = Resp -> Resp;
+ Else -> throw(Else)
+ end.
+
+
+check_member_exists(#doc{body={Props}}, Path) ->
+ couch_util:get_nested_json_value({Props}, Path).
+
+
+apply_style(_Db, Change, main_only) ->
+ #{rev_id := RevId} = Change,
+ [{[{<<"rev">>, couch_doc:rev_to_str(RevId)}]}];
+apply_style(Db, Change, all_docs) ->
+ % We have to fetch all revs for this row
+ #{id := DocId} = Change,
+ {ok, Resps} = fabric2_db:open_doc_revs(Db, DocId, all, [deleted]),
+ lists:flatmap(fun(Resp) ->
+ case Resp of
+ {ok, #doc{revs = {Pos, [Rev | _]}}} ->
+ [{[{<<"rev">>, couch_doc:rev_to_str({Pos, Rev})}]}];
+ _ ->
+ []
+ end
+ end, Resps);
+apply_style(Db, Change, Style) ->
+ erlang:error({changes_apply_style, Db, Change, Style}).
+
+
+open_revs(Db, Change, Style) ->
+ #{id := DocId} = Change,
+ Options = [deleted, conflicts],
+ try
+ case Style of
+ main_only ->
+ {ok, Doc} = fabric2_db:open_doc(Db, DocId, Options),
+ [Doc];
+ all_docs ->
+ {ok, Docs} = fabric2_db:open_doc_revs(Db, DocId, all, Options),
+ [Doc || {ok, Doc} <- Docs]
+ end
+ catch _:_ ->
+ % We didn't log this before, should we now?
+ []
+ end.
+
+
+filter_revs(Passes, Docs) ->
+ lists:flatmap(fun
+ ({true, #doc{revs={RevPos, [RevId | _]}}}) ->
+ RevStr = couch_doc:rev_to_str({RevPos, RevId}),
+ Change = {[{<<"rev">>, RevStr}]},
+ [Change];
+ (_) ->
+ []
+ end, lists:zip(Passes, Docs)).
+
+
+get_changes_timeout(Args, Callback) ->
+ #changes_args{
+ heartbeat = Heartbeat,
+ timeout = Timeout,
+ feed = ResponseType
+ } = Args,
+ DefaultTimeout = list_to_integer(
+ config:get("httpd", "changes_timeout", "60000")
+ ),
+ case Heartbeat of
+ undefined ->
+ case Timeout of
+ undefined ->
+ {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end};
+ infinity ->
+ {infinity, fun(UserAcc) -> {stop, UserAcc} end};
+ _ ->
+ {lists:min([DefaultTimeout, Timeout]),
+ fun(UserAcc) -> {stop, UserAcc} end}
+ end;
+ true ->
+ {DefaultTimeout,
+ fun(UserAcc) -> Callback({timeout, ResponseType}, UserAcc) end};
+ _ ->
+ {lists:min([DefaultTimeout, Heartbeat]),
+ fun(UserAcc) -> Callback({timeout, ResponseType}, UserAcc) end}
+ end.
+
+start_sending_changes(Callback, UserAcc) ->
+ {_, NewUserAcc} = Callback(start, UserAcc),
+ NewUserAcc.
+
+build_acc(Args, Callback, UserAcc, Db, StartSeq, Prepend, Timeout, TimeoutFun) ->
+ #changes_args{
+ include_docs = IncludeDocs,
+ doc_options = DocOpts,
+ conflicts = Conflicts,
+ limit = Limit,
+ feed = ResponseType,
+ filter_fun = Filter
+ } = Args,
+ #changes_acc{
+ db = Db,
+ seq = StartSeq,
+ prepend = Prepend,
+ filter = Filter,
+ callback = Callback,
+ user_acc = UserAcc,
+ resp_type = ResponseType,
+ limit = Limit,
+ include_docs = IncludeDocs,
+ doc_options = DocOpts,
+ conflicts = Conflicts,
+ timeout = Timeout,
+ timeout_fun = TimeoutFun,
+ aggregation_results=[],
+ aggregation_kvs=[]
+ }.
+
+send_changes(Acc, Dir, FirstRound) ->
+ #changes_acc{
+ db = Db,
+ seq = StartSeq,
+ filter = Filter
+ } = maybe_upgrade_changes_acc(Acc),
+ DbEnumFun = fun changes_enumerator/2,
+ case can_optimize(FirstRound, Filter) of
+ {true, Fun} ->
+ Fun(Db, StartSeq, Dir, DbEnumFun, Acc, Filter);
+ _ ->
+ Opts = [{dir, Dir}],
+ fabric2_db:fold_changes(Db, StartSeq, DbEnumFun, Acc, Opts)
+ end.
+
+
+can_optimize(true, {doc_ids, _Style, DocIds}) ->
+ MaxDocIds = config:get_integer("couchdb",
+ "changes_doc_ids_optimization_threshold", 100),
+ if length(DocIds) =< MaxDocIds ->
+ {true, fun send_changes_doc_ids/6};
+ true ->
+ false
+ end;
+can_optimize(true, {design_docs, _Style}) ->
+ {true, fun send_changes_design_docs/6};
+can_optimize(_, _) ->
+ false.
+
+
+send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) ->
+ Results = fabric2_db:get_full_doc_infos(Db, DocIds),
+ FullInfos = lists:foldl(fun
+ (#full_doc_info{}=FDI, Acc) -> [FDI | Acc];
+ (not_found, Acc) -> Acc
+ end, [], Results),
+ send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
+
+
+send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
+ FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
+ Opts = [
+ include_deleted,
+ {start_key, <<"_design/">>},
+ {end_key_gt, <<"_design0">>}
+ ],
+ {ok, FullInfos} = fabric2_db:fold_docs(Db, FoldFun, [], Opts),
+ send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
+
+
+send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) ->
+ FoldFun = case Dir of
+ fwd -> fun lists:foldl/3;
+ rev -> fun lists:foldr/3
+ end,
+ GreaterFun = case Dir of
+ fwd -> fun(A, B) -> A > B end;
+ rev -> fun(A, B) -> A =< B end
+ end,
+ DocInfos = lists:foldl(fun(FDI, Acc) ->
+ DI = couch_doc:to_doc_info(FDI),
+ case GreaterFun(DI#doc_info.high_seq, StartSeq) of
+ true -> [DI | Acc];
+ false -> Acc
+ end
+ end, [], FullDocInfos),
+ SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos),
+ FinalAcc = try
+ FoldFun(fun(DocInfo, Acc) ->
+ % Kinda gross that we're munging this back to a map
+ % that will then have to re-read and rebuild the FDI
+ % for all_docs style. But c'est la vie.
+ #doc_info{
+ id = DocId,
+ high_seq = Seq,
+ revs = [#rev_info{rev = Rev, deleted = Deleted} | _]
+ } = DocInfo,
+ Change = #{
+ id => DocId,
+ sequence => Seq,
+ rev_id => Rev,
+ deleted => Deleted
+ },
+ case Fun(Change, Acc) of
+ {ok, NewAcc} ->
+ NewAcc;
+ {stop, NewAcc} ->
+ throw({stop, NewAcc})
+ end
+ end, Acc0, SortedDocInfos)
+ catch
+ {stop, Acc} -> Acc
+ end,
+ case Dir of
+ fwd ->
+ FinalAcc0 = case element(1, FinalAcc) of
+ changes_acc -> % we came here via couch_http or internal call
+ FinalAcc#changes_acc{seq = fabric2_db:get_update_seq(Db)};
+ fabric_changes_acc -> % we came here via chttpd / fabric / rexi
+ FinalAcc#fabric_changes_acc{seq = couch_db:get_update_seq(Db)}
+ end,
+ {ok, FinalAcc0};
+ rev -> {ok, FinalAcc}
+ end.
+
+
+keep_sending_changes(Args, Acc0, FirstRound) ->
+ #changes_args{
+ feed = ResponseType,
+ limit = Limit,
+ db_open_options = DbOptions
+ } = Args,
+
+ {ok, ChangesAcc} = send_changes(Acc0, fwd, FirstRound),
+
+ #changes_acc{
+ db = Db, callback = Callback,
+ timeout = Timeout, timeout_fun = TimeoutFun, seq = EndSeq,
+ prepend = Prepend2, user_acc = UserAcc2, limit = NewLimit
+ } = maybe_upgrade_changes_acc(ChangesAcc),
+
+ if Limit > NewLimit, ResponseType == "longpoll" ->
+ end_sending_changes(Callback, UserAcc2, EndSeq);
+ true ->
+ {Go, UserAcc3} = notify_waiting_for_updates(Callback, UserAcc2),
+ if Go /= ok -> end_sending_changes(Callback, UserAcc3, EndSeq); true ->
+ case wait_updated(Timeout, TimeoutFun, UserAcc3) of
+ {updated, UserAcc4} ->
+ UserCtx = fabric2_db:get_user_ctx(Db),
+ DbOptions1 = [{user_ctx, UserCtx} | DbOptions],
+ case fabric2_db:open(fabric2_db:name(Db), DbOptions1) of
+ {ok, Db2} ->
+ ?MODULE:keep_sending_changes(
+ Args#changes_args{limit=NewLimit},
+ ChangesAcc#changes_acc{
+ db = Db2,
+ user_acc = UserAcc4,
+ seq = EndSeq,
+ prepend = Prepend2,
+ timeout = Timeout,
+ timeout_fun = TimeoutFun},
+ false);
+ _Else ->
+ end_sending_changes(Callback, UserAcc3, EndSeq)
+ end;
+ {stop, UserAcc4} ->
+ end_sending_changes(Callback, UserAcc4, EndSeq)
+ end
+ end
+ end.
+
+notify_waiting_for_updates(Callback, UserAcc) ->
+ Callback(waiting_for_updates, UserAcc).
+
+end_sending_changes(Callback, UserAcc, EndSeq) ->
+ Callback({stop, EndSeq, null}, UserAcc).
+
+changes_enumerator(Change, Acc) ->
+ #changes_acc{
+ filter = Filter,
+ callback = Callback,
+ user_acc = UserAcc,
+ limit = Limit,
+ db = Db,
+ timeout = Timeout,
+ timeout_fun = TimeoutFun
+ } = maybe_upgrade_changes_acc(Acc),
+ Results0 = filter(Db, Change, Filter),
+ Results = [Result || Result <- Results0, Result /= null],
+ Seq = maps:get(sequence, Change),
+ Go = if (Limit =< 1) andalso Results =/= [] -> stop; true -> ok end,
+ case Results of
+ [] ->
+ {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
+ case Done of
+ stop ->
+ {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
+ ok ->
+ {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
+ end;
+ _ ->
+ ChangesRow = changes_row(Results, Change, Acc),
+ {UserGo, UserAcc2} = Callback({change, ChangesRow}, UserAcc),
+ RealGo = case UserGo of
+ ok -> Go;
+ stop -> stop
+ end,
+ reset_heartbeat(),
+ {RealGo, Acc#changes_acc{
+ seq = Seq,
+ user_acc = UserAcc2,
+ limit = Limit - 1
+ }}
+ end.
+
+
+changes_row(Results, Change, Acc) ->
+ #{
+ id := Id,
+ sequence := Seq,
+ deleted := Del
+ } = Change,
+ {[
+ {<<"seq">>, Seq},
+ {<<"id">>, Id},
+ {<<"changes">>, Results}
+ ] ++ deleted_item(Del) ++ maybe_get_changes_doc(Change, Acc)}.
+
+maybe_get_changes_doc(Value, #changes_acc{include_docs=true}=Acc) ->
+ #changes_acc{
+ db = Db,
+ doc_options = DocOpts0,
+ conflicts = Conflicts,
+ filter = Filter
+ } = Acc,
+ OpenOpts = case Conflicts of
+ true -> [deleted, conflicts];
+ false -> [deleted]
+ end,
+ DocOpts1 = case Conflicts of
+ true -> [conflicts | DocOpts0];
+ false -> DocOpts0
+ end,
+ load_doc(Db, Value, OpenOpts, DocOpts1, Filter);
+
+maybe_get_changes_doc(_Value, _Acc) ->
+ [].
+
+
+load_doc(Db, Value, Opts, DocOpts, Filter) ->
+ case load_doc(Db, Value, Opts) of
+ null ->
+ [{doc, null}];
+ Doc ->
+ [{doc, doc_to_json(Doc, DocOpts, Filter)}]
+ end.
+
+
+load_doc(Db, Change, Opts) ->
+ #{
+ id := Id,
+ rev_id := RevId
+ } = Change,
+ case fabric2_db:open_doc_revs(Db, Id, [RevId], Opts) of
+ {ok, [{ok, Doc}]} ->
+ Doc;
+ _ ->
+ null
+ end.
+
+
+doc_to_json(Doc, DocOpts, {selector, _Style, {_Selector, Fields}})
+ when Fields =/= nil ->
+ mango_fields:extract(couch_doc:to_json_obj(Doc, DocOpts), Fields);
+doc_to_json(Doc, DocOpts, _Filter) ->
+ couch_doc:to_json_obj(Doc, DocOpts).
+
+
+deleted_item(true) -> [{<<"deleted">>, true}];
+deleted_item(_) -> [].
+
+% waits for a updated msg, if there are multiple msgs, collects them.
+wait_updated(Timeout, TimeoutFun, UserAcc) ->
+ receive
+ updated ->
+ get_rest_updated(UserAcc);
+ deleted ->
+ {stop, UserAcc}
+ after Timeout ->
+ {Go, UserAcc2} = TimeoutFun(UserAcc),
+ case Go of
+ ok ->
+ ?MODULE:wait_updated(Timeout, TimeoutFun, UserAcc2);
+ stop ->
+ {stop, UserAcc2}
+ end
+ end.
+
+get_rest_updated(UserAcc) ->
+ receive
+ updated ->
+ get_rest_updated(UserAcc)
+ after 0 ->
+ {updated, UserAcc}
+ end.
+
+reset_heartbeat() ->
+ case get(last_changes_heartbeat) of
+ undefined ->
+ ok;
+ _ ->
+ put(last_changes_heartbeat, os:timestamp())
+ end.
+
+maybe_heartbeat(Timeout, TimeoutFun, Acc) ->
+ Before = get(last_changes_heartbeat),
+ case Before of
+ undefined ->
+ {ok, Acc};
+ _ ->
+ Now = os:timestamp(),
+ case timer:now_diff(Now, Before) div 1000 >= Timeout of
+ true ->
+ {StopOrGo, Acc2} = TimeoutFun(Acc),
+ put(last_changes_heartbeat, Now),
+ {StopOrGo, Acc2};
+ false ->
+ {ok, Acc}
+ end
+ end.
+
+
+maybe_upgrade_changes_acc(#changes_acc{} = Acc) ->
+ Acc;
+maybe_upgrade_changes_acc(Acc) when tuple_size(Acc) == 19 ->
+ #changes_acc{
+ db = element(2, Acc),
+ seq = element(6, Acc),
+ prepend = element(7, Acc),
+ filter = element(8, Acc),
+ callback = element(9, Acc),
+ user_acc = element(10, Acc),
+ resp_type = element(11, Acc),
+ limit = element(12, Acc),
+ include_docs = element(13, Acc),
+ doc_options = element(14, Acc),
+ conflicts = element(15, Acc),
+ timeout = element(16, Acc),
+ timeout_fun = element(17, Acc),
+ aggregation_kvs = element(18, Acc),
+ aggregation_results = element(19, Acc)
+ }.
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index 6a3df6def..ec4a1a40f 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -16,6 +16,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("fabric/include/fabric.hrl").
-include_lib("mem3/include/mem3.hrl").
-export([handle_request/1, handle_compact_req/2, handle_design_req/2,
@@ -41,7 +42,6 @@
% Accumulator for changes_callback function
-record(cacc, {
- etag,
feed,
mochi,
prepend = "",
@@ -49,7 +49,8 @@
chunks_sent = 0,
buffer = [],
bufsize = 0,
- threshold
+ threshold,
+ include_docs
}).
-define(IS_ALL_DOCS(T), (
@@ -85,45 +86,38 @@ handle_request(#httpd{path_parts=[DbName|RestParts],method=Method}=Req)->
handle_changes_req(#httpd{method='POST'}=Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
- handle_changes_req1(Req, Db);
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ handle_changes_req_tx(Req, TxDb)
+ end);
handle_changes_req(#httpd{method='GET'}=Req, Db) ->
- handle_changes_req1(Req, Db);
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ handle_changes_req_tx(Req, TxDb)
+ end);
handle_changes_req(#httpd{path_parts=[_,<<"_changes">>]}=Req, _Db) ->
send_method_not_allowed(Req, "GET,POST,HEAD").
-handle_changes_req1(#httpd{}=Req, Db) ->
- #changes_args{filter=Raw, style=Style} = Args0 = parse_changes_query(Req),
- ChangesArgs = Args0#changes_args{
- filter_fun = couch_changes:configure_filter(Raw, Style, Req, Db),
- db_open_options = [{user_ctx, couch_db:get_user_ctx(Db)}]
- },
+handle_changes_req_tx(#httpd{}=Req, Db) ->
+ ChangesArgs = parse_changes_query(Req),
+ ChangesFun = chttpd_changes:handle_db_changes(ChangesArgs, Req, Db),
Max = chttpd:chunked_response_buffer_size(),
case ChangesArgs#changes_args.feed of
"normal" ->
- T0 = os:timestamp(),
- {ok, Info} = fabric:get_db_info(Db),
- Suffix = mem3:shard_suffix(Db),
- Etag = chttpd:make_etag({Info, Suffix}),
- DeltaT = timer:now_diff(os:timestamp(), T0) / 1000,
- couch_stats:update_histogram([couchdb, dbinfo], DeltaT),
- chttpd:etag_respond(Req, Etag, fun() ->
- Acc0 = #cacc{
- feed = normal,
- etag = Etag,
- mochi = Req,
- threshold = Max
- },
- fabric:changes(Db, fun changes_callback/2, Acc0, ChangesArgs)
- end);
+ Acc0 = #cacc{
+ feed = normal,
+ mochi = Req,
+ threshold = Max
+ },
+ ChangesFun({fun changes_callback/2, Acc0});
Feed when Feed =:= "continuous"; Feed =:= "longpoll"; Feed =:= "eventsource" ->
couch_stats:increment_counter([couchdb, httpd, clients_requesting_changes]),
Acc0 = #cacc{
feed = list_to_atom(Feed),
mochi = Req,
- threshold = Max
+ threshold = Max,
+ include_docs = ChangesArgs#changes_args.include_docs
},
try
- fabric:changes(Db, fun changes_callback/2, Acc0, ChangesArgs)
+ ChangesFun({fun changes_callback/2, Acc0})
after
couch_stats:decrement_counter([couchdb, httpd, clients_requesting_changes])
end;
@@ -136,8 +130,9 @@ handle_changes_req1(#httpd{}=Req, Db) ->
changes_callback(start, #cacc{feed = continuous} = Acc) ->
{ok, Resp} = chttpd:start_delayed_json_response(Acc#cacc.mochi, 200),
{ok, Acc#cacc{mochi = Resp, responding = true}};
-changes_callback({change, Change}, #cacc{feed = continuous} = Acc) ->
- chttpd_stats:incr_rows(),
+changes_callback({change, Change}, #cacc{feed = continuous,
+ include_docs = IncludeDocs} = Acc) ->
+ incr_stats_changes_feed(IncludeDocs),
Data = [?JSON_ENCODE(Change) | "\n"],
Len = iolist_size(Data),
maybe_flush_changes_feed(Acc, Data, Len);
@@ -160,8 +155,9 @@ changes_callback(start, #cacc{feed = eventsource} = Acc) ->
],
{ok, Resp} = chttpd:start_delayed_json_response(Req, 200, Headers),
{ok, Acc#cacc{mochi = Resp, responding = true}};
-changes_callback({change, {ChangeProp}=Change}, #cacc{feed = eventsource} = Acc) ->
- chttpd_stats:incr_rows(),
+changes_callback({change, {ChangeProp}=Change},
+ #cacc{feed = eventsource, include_docs = IncludeDocs} = Acc) ->
+ incr_stats_changes_feed(IncludeDocs),
Seq = proplists:get_value(seq, ChangeProp),
Chunk = [
"data: ", ?JSON_ENCODE(Change),
@@ -182,18 +178,17 @@ changes_callback({stop, _EndSeq}, #cacc{feed = eventsource} = Acc) ->
% callbacks for longpoll and normal (single JSON Object)
changes_callback(start, #cacc{feed = normal} = Acc) ->
- #cacc{etag = Etag, mochi = Req} = Acc,
+ #cacc{mochi = Req} = Acc,
FirstChunk = "{\"results\":[\n",
- {ok, Resp} = chttpd:start_delayed_json_response(Req, 200,
- [{"ETag",Etag}], FirstChunk),
+ {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [], FirstChunk),
{ok, Acc#cacc{mochi = Resp, responding = true}};
changes_callback(start, Acc) ->
#cacc{mochi = Req} = Acc,
FirstChunk = "{\"results\":[\n",
{ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [], FirstChunk),
{ok, Acc#cacc{mochi = Resp, responding = true}};
-changes_callback({change, Change}, Acc) ->
- chttpd_stats:incr_rows(),
+changes_callback({change, Change}, #cacc{include_docs = IncludeDocs} = Acc) ->
+ incr_stats_changes_feed(IncludeDocs),
Data = [Acc#cacc.prepend, ?JSON_ENCODE(Change)],
Len = iolist_size(Data),
maybe_flush_changes_feed(Acc, Data, Len);
@@ -227,7 +222,7 @@ changes_callback(waiting_for_updates, Acc) ->
mochi = Resp1,
chunks_sent = ChunksSent + 1
}};
-changes_callback(timeout, Acc) ->
+changes_callback({timeout, _ResponseType}, Acc) ->
#cacc{mochi = Resp, chunks_sent = ChunksSent} = Acc,
{ok, Resp1} = chttpd:send_delayed_chunk(Resp, "\n"),
{ok, Acc#cacc{mochi = Resp1, chunks_sent = ChunksSent + 1}};
@@ -255,27 +250,25 @@ maybe_flush_changes_feed(Acc0, Data, Len) ->
},
{ok, Acc}.
-handle_compact_req(#httpd{method='POST'}=Req, Db) ->
+incr_stats_changes_feed(IncludeDocs) ->
+ chttpd_stats:incr_rows(),
+ if not IncludeDocs -> ok; true ->
+ chttpd_stats:incr_reads()
+ end.
+
+% Return the same response as if a compaction succeeded even though _compaction
+% isn't a valid operation in CouchDB >= 4.x anymore. This is mostly to not
+% break existing user script which maybe periodically call this endpoint. In
+% the future this endpoint will return a 410 response then it will be removed.
+handle_compact_req(#httpd{method='POST'}=Req, _Db) ->
chttpd:validate_ctype(Req, "application/json"),
- case Req#httpd.path_parts of
- [_DbName, <<"_compact">>] ->
- ok = fabric:compact(Db),
- send_json(Req, 202, {[{ok, true}]});
- [DbName, <<"_compact">>, DesignName | _] ->
- case ddoc_cache:open(DbName, <<"_design/", DesignName/binary>>) of
- {ok, _DDoc} ->
- ok = fabric:compact(Db, DesignName),
- send_json(Req, 202, {[{ok, true}]});
- Error ->
- throw(Error)
- end
- end;
+ send_json(Req, 202, {[{ok, true}]});
handle_compact_req(Req, _Db) ->
send_method_not_allowed(Req, "POST").
handle_view_cleanup_req(Req, Db) ->
- ok = fabric:cleanup_index_files_all_nodes(Db),
+ ok = fabric2_index:cleanup(Db),
send_json(Req, 202, {[{ok, true}]}).
@@ -355,8 +348,7 @@ update_partition_stats(PathParts) ->
handle_design_req(#httpd{
path_parts=[_DbName, _Design, Name, <<"_",_/binary>> = Action | _Rest]
}=Req, Db) ->
- DbName = mem3:dbname(couch_db:name(Db)),
- case ddoc_cache:open(DbName, <<"_design/", Name/binary>>) of
+ case fabric2_db:open_doc(Db, <<"_design/", Name/binary>>) of
{ok, DDoc} ->
Handler = chttpd_handlers:design_handler(Action, fun bad_action_req/3),
Handler(Req, Db, DDoc);
@@ -372,7 +364,7 @@ bad_action_req(#httpd{path_parts=[_, _, Name|FileNameParts]}=Req, Db, _DDoc) ->
handle_design_info_req(#httpd{method='GET'}=Req, Db, #doc{} = DDoc) ->
[_, _, Name, _] = Req#httpd.path_parts,
- {ok, GroupInfoList} = fabric:get_view_group_info(Db, DDoc),
+ {ok, GroupInfoList} = couch_views:get_info(Db, DDoc),
send_json(Req, 200, {[
{name, Name},
{view_index, {GroupInfoList}}
@@ -381,81 +373,57 @@ handle_design_info_req(#httpd{method='GET'}=Req, Db, #doc{} = DDoc) ->
handle_design_info_req(Req, _Db, _DDoc) ->
send_method_not_allowed(Req, "GET").
-create_db_req(#httpd{}=Req, DbName) ->
+create_db_req(#httpd{user_ctx=Ctx}=Req, DbName) ->
couch_httpd:verify_is_server_admin(Req),
- N = chttpd:qs_value(Req, "n", config:get("cluster", "n", "3")),
- Q = chttpd:qs_value(Req, "q", config:get("cluster", "q", "8")),
- P = chttpd:qs_value(Req, "placement", config:get("cluster", "placement")),
- EngineOpt = parse_engine_opt(Req),
- DbProps = parse_partitioned_opt(Req),
- Options = [
- {n, N},
- {q, Q},
- {placement, P},
- {props, DbProps}
- ] ++ EngineOpt,
DocUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
- case fabric:create_db(DbName, Options) of
- ok ->
- send_json(Req, 201, [{"Location", DocUrl}], {[{ok, true}]});
- accepted ->
- send_json(Req, 202, [{"Location", DocUrl}], {[{ok, true}]});
- {error, file_exists} ->
- chttpd:send_error(Req, file_exists);
- Error ->
- throw(Error)
+ case fabric2_db:create(DbName, [{user_ctx, Ctx}]) of
+ {ok, _} ->
+ send_json(Req, 201, [{"Location", DocUrl}], {[{ok, true}]});
+ {error, file_exists} ->
+ chttpd:send_error(Req, file_exists);
+ Error ->
+ throw(Error)
end.
-delete_db_req(#httpd{}=Req, DbName) ->
+delete_db_req(#httpd{user_ctx=Ctx}=Req, DbName) ->
couch_httpd:verify_is_server_admin(Req),
- case fabric:delete_db(DbName, []) of
- ok ->
- send_json(Req, 200, {[{ok, true}]});
- accepted ->
- send_json(Req, 202, {[{ok, true}]});
- Error ->
- throw(Error)
+ case fabric2_db:delete(DbName, [{user_ctx, Ctx}]) of
+ ok ->
+ send_json(Req, 200, {[{ok, true}]});
+ Error ->
+ throw(Error)
end.
do_db_req(#httpd{path_parts=[DbName|_], user_ctx=Ctx}=Req, Fun) ->
- Shard = hd(mem3:shards(DbName)),
- Props = couch_util:get_value(props, Shard#shard.opts, []),
- Opts = case Ctx of
- undefined ->
- [{props, Props}];
- #user_ctx{} ->
- [{user_ctx, Ctx}, {props, Props}]
- end,
- {ok, Db} = couch_db:clustered_db(DbName, Opts),
+ Options = [{user_ctx, Ctx}, {interactive, true}],
+ {ok, Db} = fabric2_db:open(DbName, Options),
Fun(Req, Db).
-db_req(#httpd{method='GET',path_parts=[DbName]}=Req, _Db) ->
+db_req(#httpd{method='GET',path_parts=[_DbName]}=Req, Db) ->
% measure the time required to generate the etag, see if it's worth it
T0 = os:timestamp(),
- {ok, DbInfo} = fabric:get_db_info(DbName),
+ {ok, DbInfo} = fabric2_db:get_db_info(Db),
DeltaT = timer:now_diff(os:timestamp(), T0) / 1000,
couch_stats:update_histogram([couchdb, dbinfo], DeltaT),
send_json(Req, {DbInfo});
-db_req(#httpd{method='POST', path_parts=[DbName], user_ctx=Ctx}=Req, Db) ->
+db_req(#httpd{method='POST', path_parts=[DbName]}=Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
- W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
- Options = [{user_ctx,Ctx}, {w,W}],
-
- Doc = couch_db:doc_from_json_obj_validate(Db, chttpd:json_body(Req)),
- Doc2 = case Doc#doc.id of
+ Doc0 = chttpd:json_body(Req),
+ Doc1 = couch_doc:from_json_obj_validate(Doc0, fabric2_db:name(Db)),
+ Doc2 = case Doc1#doc.id of
<<"">> ->
- Doc#doc{id=couch_uuids:new(), revs={0, []}};
+ Doc1#doc{id=couch_uuids:new(), revs={0, []}};
_ ->
- Doc
+ Doc1
end,
DocId = Doc2#doc.id,
case chttpd:qs_value(Req, "batch") of
"ok" ->
% async_batching
spawn(fun() ->
- case catch(fabric:update_doc(Db, Doc2, Options)) of
+ case catch(fabric2_db:update_doc(Db, Doc2, [])) of
{ok, _} ->
chttpd_stats:incr_writes(),
ok;
@@ -475,7 +443,7 @@ db_req(#httpd{method='POST', path_parts=[DbName], user_ctx=Ctx}=Req, Db) ->
% normal
DocUrl = absolute_uri(Req, [$/, couch_util:url_encode(DbName),
$/, couch_util:url_encode(DocId)]),
- case fabric:update_doc(Db, Doc2, Options) of
+ case fabric2_db:update_doc(Db, Doc2, []) of
{ok, NewRev} ->
chttpd_stats:incr_writes(),
HttpCode = 201;
@@ -493,13 +461,10 @@ db_req(#httpd{method='POST', path_parts=[DbName], user_ctx=Ctx}=Req, Db) ->
db_req(#httpd{path_parts=[_DbName]}=Req, _Db) ->
send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
-db_req(#httpd{method='POST', path_parts=[DbName, <<"_ensure_full_commit">>],
- user_ctx=Ctx}=Req, _Db) ->
+db_req(#httpd{method='POST', path_parts=[_DbName, <<"_ensure_full_commit">>]
+ }=Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
- %% use fabric call to trigger a database_does_not_exist exception
- %% for missing databases that'd return error 404 from chttpd
- %% get_security used to prefer shards on the same node over other nodes
- fabric:get_security(DbName, [{user_ctx, Ctx}]),
+ #{db_prefix := <<_/binary>>} = Db,
send_json(Req, 201, {[
{ok, true},
{instance_start_time, <<"0">>}
@@ -508,7 +473,7 @@ db_req(#httpd{method='POST', path_parts=[DbName, <<"_ensure_full_commit">>],
db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
send_method_not_allowed(Req, "POST");
-db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req, Db) ->
+db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
couch_stats:increment_counter([couchdb, httpd, bulk_requests]),
chttpd:validate_ctype(Req, "application/json"),
{JsonProps} = chttpd:json_body_obj(Req),
@@ -520,23 +485,23 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req,
DocsArray0 ->
DocsArray0
end,
- couch_stats:update_histogram([couchdb, httpd, bulk_docs], length(DocsArray)),
- W = case couch_util:get_value(<<"w">>, JsonProps) of
- Value when is_integer(Value) ->
- integer_to_list(Value);
- _ ->
- chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db)))
+ MaxDocs = config:get_integer("couchdb", "max_bulk_docs_count", 10000),
+ case length(DocsArray) =< MaxDocs of
+ true -> ok;
+ false -> throw({request_entity_too_large, {bulk_docs, MaxDocs}})
end,
- case chttpd:header_value(Req, "X-Couch-Full-Commit") of
+ couch_stats:update_histogram([couchdb, httpd, bulk_docs], length(DocsArray)),
+ Options = case chttpd:header_value(Req, "X-Couch-Full-Commit") of
"true" ->
- Options = [full_commit, {user_ctx,Ctx}, {w,W}];
+ [full_commit];
"false" ->
- Options = [delay_commit, {user_ctx,Ctx}, {w,W}];
+ [delay_commit];
_ ->
- Options = [{user_ctx,Ctx}, {w,W}]
+ []
end,
+ DbName = fabric2_db:name(Db),
Docs = lists:map(fun(JsonObj) ->
- Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj),
+ Doc = couch_doc:from_json_obj_validate(JsonObj, DbName),
validate_attachment_names(Doc),
case Doc#doc.id of
<<>> -> Doc#doc{id = couch_uuids:new()};
@@ -550,7 +515,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req,
true -> [all_or_nothing|Options];
_ -> Options
end,
- case fabric:update_docs(Db, Docs, Options2) of
+ case fabric2_db:update_docs(Db, Docs, Options2) of
{ok, Results} ->
% output the results
chttpd_stats:incr_writes(length(Results)),
@@ -569,7 +534,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req,
send_json(Req, 417, ErrorsJson)
end;
false ->
- case fabric:update_docs(Db, Docs, [replicated_changes|Options]) of
+ case fabric2_db:update_docs(Db, Docs, [replicated_changes|Options]) of
{ok, Errors} ->
chttpd_stats:incr_writes(length(Docs)),
ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
@@ -596,11 +561,14 @@ db_req(#httpd{method='POST', path_parts=[_, <<"_bulk_get">>],
undefined ->
throw({bad_request, <<"Missing JSON list of 'docs'.">>});
Docs ->
+ MaxDocs = config:get_integer("couchdb", "max_bulk_get_count", 10000),
+ case length(Docs) =< MaxDocs of
+ true -> ok;
+ false -> throw({request_entity_too_large, {bulk_get, MaxDocs}})
+ end,
#doc_query_args{
- options = Options0
+ options = Options
} = bulk_get_parse_doc_query(Req),
- Options = [{user_ctx, Req#httpd.user_ctx} | Options0],
-
AcceptJson = MochiReq:accepts_content_type("application/json"),
AcceptMixedMp = MochiReq:accepts_content_type("multipart/mixed"),
AcceptRelatedMp = MochiReq:accepts_content_type("multipart/related"),
@@ -665,8 +633,6 @@ db_req(#httpd{path_parts=[_, <<"_bulk_get">>]}=Req, _Db) ->
db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
couch_stats:increment_counter([couchdb, httpd, purge_requests]),
chttpd:validate_ctype(Req, "application/json"),
- W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
- Options = [{user_ctx, Req#httpd.user_ctx}, {w, W}],
{IdsRevs} = chttpd:json_body_obj(Req),
IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs],
MaxIds = config:get_integer("purge", "max_document_id_number", 100),
@@ -683,7 +649,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
true -> ok
end,
couch_stats:increment_counter([couchdb, document_purges, total], length(IdsRevs2)),
- Results2 = case fabric:purge_docs(Db, IdsRevs2, Options) of
+ Results2 = case fabric:purge_docs(Db, IdsRevs2, []) of
{ok, Results} ->
chttpd_stats:incr_writes(length(Results)),
Results;
@@ -741,7 +707,7 @@ db_req(#httpd{path_parts=[_,OP]}=Req, _Db) when ?IS_ALL_DOCS(OP) ->
db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
{JsonDocIdRevs} = chttpd:json_body_obj(Req),
- case fabric:get_missing_revs(Db, JsonDocIdRevs) of
+ case fabric2_db:get_missing_revs(Db, JsonDocIdRevs) of
{error, Reason} ->
chttpd:send_error(Req, Reason);
{ok, Results} ->
@@ -758,7 +724,7 @@ db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) ->
db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
{JsonDocIdRevs} = chttpd:json_body_obj(Req),
- case fabric:get_missing_revs(Db, JsonDocIdRevs) of
+ case fabric2_db:get_missing_revs(Db, JsonDocIdRevs) of
{error, Reason} ->
chttpd:send_error(Req, Reason);
{ok, Results} ->
@@ -779,12 +745,10 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
db_req(#httpd{path_parts=[_,<<"_revs_diff">>]}=Req, _Db) ->
send_method_not_allowed(Req, "POST");
-db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>],user_ctx=Ctx}=Req,
- Db) ->
- DbName = ?b2l(couch_db:name(Db)),
- validate_security_can_be_edited(DbName),
+db_req(#httpd{method = 'PUT',path_parts = [_, <<"_security">>]} = Req, Db) ->
+ validate_security_can_be_edited(fabric2_db:name(Db)),
SecObj = chttpd:json_body(Req),
- case fabric:set_security(Db, SecObj, [{user_ctx, Ctx}]) of
+ case fabric2_db:set_security(Db, SecObj) of
ok ->
send_json(Req, {[{<<"ok">>, true}]});
Else ->
@@ -792,28 +756,26 @@ db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>],user_ctx=Ctx}=Req,
end;
db_req(#httpd{method='GET',path_parts=[_,<<"_security">>]}=Req, Db) ->
- send_json(Req, fabric:get_security(Db));
+ send_json(Req, fabric2_db:get_security(Db));
db_req(#httpd{path_parts=[_,<<"_security">>]}=Req, _Db) ->
send_method_not_allowed(Req, "PUT,GET");
-db_req(#httpd{method='PUT',path_parts=[_,<<"_revs_limit">>],user_ctx=Ctx}=Req,
- Db) ->
+db_req(#httpd{method='PUT',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
Limit = chttpd:json_body(Req),
- ok = fabric:set_revs_limit(Db, Limit, [{user_ctx,Ctx}]),
+ ok = fabric2_db:set_revs_limit(Db, Limit),
send_json(Req, {[{<<"ok">>, true}]});
db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
- send_json(Req, fabric:get_revs_limit(Db));
+ send_json(Req, fabric2_db:get_revs_limit(Db));
db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) ->
send_method_not_allowed(Req, "PUT,GET");
db_req(#httpd{method='PUT',path_parts=[_,<<"_purged_infos_limit">>]}=Req, Db) ->
- Options = [{user_ctx, Req#httpd.user_ctx}],
case chttpd:json_body(Req) of
Limit when is_integer(Limit), Limit > 0 ->
- case fabric:set_purge_infos_limit(Db, Limit, Options) of
+ case fabric:set_purge_infos_limit(Db, Limit, []) of
ok ->
send_json(Req, {[{<<"ok">>, true}]});
Error ->
@@ -861,49 +823,250 @@ db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
db_attachment_req(Req, Db, DocId, FileNameParts).
multi_all_docs_view(Req, Db, OP, Queries) ->
- Args0 = couch_mrview_http:parse_params(Req, undefined),
+ Args = couch_views_http:parse_params(Req, undefined),
+ case couch_views_util:is_paginated(Args) of
+ false ->
+ stream_multi_all_docs_view(Req, Db, OP, Args, Queries);
+ true ->
+ paginate_multi_all_docs_view(Req, Db, OP, Args, Queries)
+ end.
+
+
+stream_multi_all_docs_view(Req, Db, OP, Args0, Queries) ->
Args1 = Args0#mrargs{view_type=map},
- ArgQueries = lists:map(fun({Query}) ->
- QueryArg1 = couch_mrview_http:parse_params(Query, undefined,
- Args1, [decoded]),
- QueryArgs2 = fabric_util:validate_all_docs_args(Db, QueryArg1),
- set_namespace(OP, QueryArgs2)
- end, Queries),
- Options = [{user_ctx, Req#httpd.user_ctx}],
- VAcc0 = #vacc{db=Db, req=Req, prepend="\r\n"},
- FirstChunk = "{\"results\":[",
- {ok, Resp0} = chttpd:start_delayed_json_response(VAcc0#vacc.req,
- 200, [], FirstChunk),
- VAcc1 = VAcc0#vacc{resp=Resp0},
- VAcc2 = lists:foldl(fun(Args, Acc0) ->
- {ok, Acc1} = fabric:all_docs(Db, Options,
- fun view_cb/2, Acc0, Args),
- Acc1
- end, VAcc1, ArgQueries),
- {ok, Resp1} = chttpd:send_delayed_chunk(VAcc2#vacc.resp, "\r\n]}"),
+ ArgQueries = chttpd_view:parse_queries(Req, Args1, Queries, fun(QArgs) ->
+ set_namespace(OP, QArgs)
+ end),
+ Max = chttpd:chunked_response_buffer_size(),
+ First = "{\"results\":[",
+ {ok, Resp0} = chttpd:start_delayed_json_response(Req, 200, [], First),
+ VAcc0 = #vacc{
+ db = Db,
+ req = Req,
+ resp = Resp0,
+ threshold = Max,
+ prepend = "\r\n"
+ },
+ VAcc1 = lists:foldl(fun
+ (#mrargs{keys = undefined} = ArgsIn, Acc0) ->
+ send_all_docs(Db, ArgsIn, Acc0);
+ (#mrargs{keys = Keys} = ArgsIn, Acc0) when is_list(Keys) ->
+ Acc1 = send_all_docs_keys(Db, ArgsIn, Acc0),
+ {ok, Acc2} = view_cb(complete, Acc1),
+ Acc2
+ end, VAcc0, ArgQueries),
+ {ok, Resp1} = chttpd:send_delayed_chunk(VAcc1#vacc.resp, "\r\n]}"),
chttpd:end_delayed_json_response(Resp1).
+
+paginate_multi_all_docs_view(Req, Db, OP, Args0, Queries) ->
+ Args1 = Args0#mrargs{view_type=map},
+ ArgQueries = chttpd_view:parse_queries(Req, Args1, Queries, fun(QArgs) ->
+ set_namespace(OP, QArgs)
+ end),
+ KeyFun = fun({Props}) ->
+ {couch_util:get_value(id, Props), undefined}
+ end,
+ #mrargs{page_size = PageSize} = Args0,
+ #httpd{path_parts = Parts} = Req,
+ UpdateSeq = fabric2_db:get_update_seq(Db),
+ EtagTerm = {Parts, UpdateSeq, Args0},
+ Response = couch_views_http:paginated(
+ Req, EtagTerm, PageSize, ArgQueries, KeyFun,
+ fun(Args) ->
+ all_docs_paginated_cb(Db, Args)
+ end),
+ chttpd:send_json(Req, Response).
+
+
all_docs_view(Req, Db, Keys, OP) ->
- Args0 = couch_mrview_http:parse_body_and_query(Req, Keys),
+ Args = couch_views_http:parse_body_and_query(Req, Keys),
+ case couch_views_util:is_paginated(Args) of
+ false ->
+ stream_all_docs_view(Req, Db, Args, OP);
+ true ->
+ paginate_all_docs_view(Req, Db, Args, OP)
+ end.
+
+stream_all_docs_view(Req, Db, Args0, OP) ->
Args1 = Args0#mrargs{view_type=map},
- Args2 = fabric_util:validate_all_docs_args(Db, Args1),
+ Args2 = couch_views_util:validate_args(Args1),
Args3 = set_namespace(OP, Args2),
- Options = [{user_ctx, Req#httpd.user_ctx}],
Max = chttpd:chunked_response_buffer_size(),
- VAcc = #vacc{db=Db, req=Req, threshold=Max},
- {ok, Resp} = fabric:all_docs(Db, Options, fun view_cb/2, VAcc, Args3),
- {ok, Resp#vacc.resp}.
-
-view_cb({row, Row} = Msg, Acc) ->
- case lists:keymember(doc, 1, Row) of
- true -> chttpd_stats:incr_reads();
- false -> ok
+ VAcc0 = #vacc{
+ db = Db,
+ req = Req,
+ threshold = Max
+ },
+ case Args3#mrargs.keys of
+ undefined ->
+ VAcc1 = send_all_docs(Db, Args3, VAcc0),
+ {ok, VAcc1#vacc.resp};
+ Keys when is_list(Keys) ->
+ VAcc1 = send_all_docs_keys(Db, Args3, VAcc0),
+ {ok, VAcc2} = view_cb(complete, VAcc1),
+ {ok, VAcc2#vacc.resp}
+ end.
+
+
+paginate_all_docs_view(Req, Db, Args0, OP) ->
+ Args1 = Args0#mrargs{view_type=map},
+ Args2 = chttpd_view:validate_args(Req, Args1),
+ Args3 = set_namespace(OP, Args2),
+ KeyFun = fun({Props}) ->
+ {couch_util:get_value(id, Props), undefined}
+ end,
+ #httpd{path_parts = Parts} = Req,
+ UpdateSeq = fabric2_db:get_update_seq(Db),
+ EtagTerm = {Parts, UpdateSeq, Args3},
+ Response = couch_views_http:paginated(
+ Req, EtagTerm, Args3, KeyFun,
+ fun(Args) ->
+ all_docs_paginated_cb(Db, Args)
+ end),
+ chttpd:send_json(Req, Response).
+
+
+all_docs_paginated_cb(Db, Args) ->
+ #vacc{meta=MetaMap, buffer=Items} = case Args#mrargs.keys of
+ undefined ->
+ send_all_docs(Db, Args, #vacc{paginated=true});
+ Keys when is_list(Keys) ->
+ send_all_docs_keys(Db, Args, #vacc{paginated=true})
+ end,
+ {MetaMap, Items}.
+
+
+send_all_docs(Db, #mrargs{keys = undefined} = Args, VAcc0) ->
+ Opts0 = fabric2_util:all_docs_view_opts(Args),
+ NS = couch_util:get_value(namespace, Opts0),
+ FoldFun = case NS of
+ <<"_all_docs">> -> fold_docs;
+ <<"_design">> -> fold_design_docs;
+ <<"_local">> -> fold_local_docs
+ end,
+ Opts = case couch_views_util:is_paginated(Args) of
+ false ->
+ Opts0 ++ [{restart_tx, true}];
+ true ->
+ Opts0
+ end,
+ ViewCb = fun view_cb/2,
+ Acc = {iter, Db, Args, VAcc0},
+ {ok, {iter, _, _, VAcc1}} = fabric2_db:FoldFun(Db, ViewCb, Acc, Opts),
+ VAcc1.
+
+
+send_all_docs_keys(Db, #mrargs{} = Args, VAcc0) ->
+ Keys = apply_args_to_keylist(Args, Args#mrargs.keys),
+ NS = couch_util:get_value(namespace, Args#mrargs.extra),
+ TotalRows = fabric2_db:get_doc_count(Db, NS),
+ Meta = case Args#mrargs.update_seq of
+ true ->
+ UpdateSeq = fabric2_db:get_update_seq(Db),
+ [{update_seq, UpdateSeq}];
+ false ->
+ []
+ end ++ [{total, TotalRows}, {offset, null}],
+ {ok, VAcc1} = view_cb({meta, Meta}, VAcc0),
+ DocOpts = case Args#mrargs.conflicts of
+ true -> [conflicts | Args#mrargs.doc_options];
+ _ -> Args#mrargs.doc_options
+ end,
+ IncludeDocs = Args#mrargs.include_docs,
+ OpenOpts = [deleted | DocOpts],
+
+ CB = fun(DocId, Doc, Acc) ->
+ Row0 = case Doc of
+ {not_found, missing} ->
+ #view_row{key = DocId};
+ {ok, #doc{deleted = true, revs = Revs}} ->
+ {RevPos, [RevId | _]} = Revs,
+ Value = {[
+ {rev, couch_doc:rev_to_str({RevPos, RevId})},
+ {deleted, true}
+ ]},
+ DocValue = if not IncludeDocs -> undefined; true ->
+ null
+ end,
+ #view_row{
+ key = DocId,
+ id = DocId,
+ value = Value,
+ doc = DocValue
+ };
+ {ok, #doc{revs = Revs} = Doc0} ->
+ {RevPos, [RevId | _]} = Revs,
+ Value = {[
+ {rev, couch_doc:rev_to_str({RevPos, RevId})}
+ ]},
+ DocValue = if not IncludeDocs -> undefined; true ->
+ couch_doc:to_json_obj(Doc0, DocOpts)
+ end,
+ #view_row{
+ key = DocId,
+ id = DocId,
+ value = Value,
+ doc = DocValue
+ }
+ end,
+ Row1 = fabric_view:transform_row(Row0),
+ view_cb(Row1, Acc)
+ end,
+ {ok, VAcc2} = fabric2_db:fold_docs(Db, Keys, CB, VAcc1, OpenOpts),
+ VAcc2.
+
+
+apply_args_to_keylist(Args, Keys0) ->
+ Keys1 = case Args#mrargs.direction of
+ fwd -> Keys0;
+ _ -> lists:reverse(Keys0)
+ end,
+ Keys2 = case Args#mrargs.skip < length(Keys1) of
+ true -> lists:nthtail(Args#mrargs.skip, Keys1);
+ false -> []
+ end,
+ case Args#mrargs.limit < length(Keys2) of
+ true -> lists:sublist(Keys2, Args#mrargs.limit);
+ false -> Keys2
+ end.
+
+
+view_cb({row, Row}, {iter, Db, Args, VAcc}) ->
+ NewRow = case lists:keymember(doc, 1, Row) of
+ true ->
+ chttpd_stats:incr_reads(),
+ Row;
+ false when Args#mrargs.include_docs ->
+ {id, DocId} = lists:keyfind(id, 1, Row),
+ chttpd_stats:incr_reads(),
+ DocOpts = case Args#mrargs.conflicts of
+ true -> [conflicts | Args#mrargs.doc_options];
+ _ -> Args#mrargs.doc_options
+ end,
+ OpenOpts = [deleted | DocOpts],
+ DocMember = case fabric2_db:open_doc(Db, DocId, OpenOpts) of
+ {not_found, missing} ->
+ [];
+ {ok, #doc{deleted = true}} ->
+ [{doc, null}];
+ {ok, #doc{} = Doc} ->
+ [{doc, couch_doc:to_json_obj(Doc, DocOpts)}]
+ end,
+ Row ++ DocMember;
+ _ ->
+ Row
end,
chttpd_stats:incr_rows(),
- couch_mrview_http:view_cb(Msg, Acc);
+ {Go, NewVAcc} = couch_views_http:view_cb({row, NewRow}, VAcc),
+ {Go, {iter, Db, Args, NewVAcc}};
+
+view_cb(Msg, {iter, Db, Args, VAcc}) ->
+ {Go, NewVAcc} = couch_views_http:view_cb(Msg, VAcc),
+ {Go, {iter, Db, Args, NewVAcc}};
view_cb(Msg, Acc) ->
- couch_mrview_http:view_cb(Msg, Acc).
+ couch_views_http:view_cb(Msg, Acc).
db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
% check for the existence of the doc to handle the 404 case.
@@ -921,10 +1084,9 @@ db_doc_req(#httpd{method='GET', mochi_req=MochiReq}=Req, Db, DocId) ->
#doc_query_args{
rev = Rev,
open_revs = Revs,
- options = Options0,
+ options = Options,
atts_since = AttsSince
} = parse_doc_query(Req),
- Options = [{user_ctx, Req#httpd.user_ctx} | Options0],
case Revs of
[] ->
Options2 =
@@ -935,7 +1097,7 @@ db_doc_req(#httpd{method='GET', mochi_req=MochiReq}=Req, Db, DocId) ->
Doc = couch_doc_open(Db, DocId, Rev, Options2),
send_doc(Req, Doc, Options2);
_ ->
- case fabric:open_revs(Db, DocId, Revs, Options) of
+ case fabric2_db:open_doc_revs(Db, DocId, Revs, Options) of
{ok, []} when Revs == all ->
chttpd:send_error(Req, {not_found, missing});
{ok, Results} ->
@@ -971,14 +1133,11 @@ db_doc_req(#httpd{method='GET', mochi_req=MochiReq}=Req, Db, DocId) ->
end
end;
-db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
+db_doc_req(#httpd{method='POST'}=Req, Db, DocId) ->
couch_httpd:validate_referer(Req),
- couch_db:validate_docid(Db, DocId),
+ fabric2_db:validate_docid(DocId),
chttpd:validate_ctype(Req, "multipart/form-data"),
- W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
- Options = [{user_ctx,Ctx}, {w,W}],
-
Form = couch_httpd:parse_form(Req),
case proplists:is_defined("_doc", Form) of
true ->
@@ -986,7 +1145,7 @@ db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
Doc = couch_doc_from_req(Req, Db, DocId, Json);
false ->
Rev = couch_doc:parse_rev(list_to_binary(couch_util:get_value("_rev", Form))),
- Doc = case fabric:open_revs(Db, DocId, [Rev], []) of
+ Doc = case fabric2_db:open_doc_revs(Db, DocId, [Rev], []) of
{ok, [{ok, Doc0}]} ->
chttpd_stats:incr_reads(),
Doc0;
@@ -1015,7 +1174,7 @@ db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
NewDoc = Doc#doc{
atts = UpdatedAtts ++ OldAtts2
},
- case fabric:update_doc(Db, NewDoc, Options) of
+ case fabric2_db:update_doc(Db, NewDoc, []) of
{ok, NewRev} ->
chttpd_stats:incr_writes(),
HttpCode = 201;
@@ -1029,15 +1188,12 @@ db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
{rev, couch_doc:rev_to_str(NewRev)}
]});
-db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
+db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) ->
#doc_query_args{
update_type = UpdateType
} = parse_doc_query(Req),
- DbName = couch_db:name(Db),
- couch_db:validate_docid(Db, DocId),
-
- W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
- Options = [{user_ctx,Ctx}, {w,W}],
+ DbName = fabric2_db:name(Db),
+ fabric2_db:validate_docid(DocId),
Loc = absolute_uri(Req, [$/, couch_util:url_encode(DbName),
$/, couch_util:url_encode(DocId)]),
@@ -1045,7 +1201,7 @@ db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
("multipart/related;" ++ _) = ContentType ->
couch_httpd:check_max_request_length(Req),
- couch_httpd_multipart:num_mp_writers(mem3:n(mem3:dbname(DbName), DocId)),
+ couch_httpd_multipart:num_mp_writers(1),
{ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(ContentType,
fun() -> receive_request_data(Req) end),
Doc = couch_doc_from_req(Req, Db, DocId, Doc0),
@@ -1065,7 +1221,7 @@ db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
Doc = couch_doc_from_req(Req, Db, DocId, chttpd:json_body(Req)),
spawn(fun() ->
- case catch(fabric:update_doc(Db, Doc, Options)) of
+ case catch(fabric2_db:update_doc(Db, Doc, [])) of
{ok, _} ->
chttpd_stats:incr_writes(),
ok;
@@ -1088,7 +1244,7 @@ db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
end
end;
-db_doc_req(#httpd{method='COPY', user_ctx=Ctx}=Req, Db, SourceDocId) ->
+db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) ->
SourceRev =
case extract_header_rev(Req, chttpd:qs_value(Req, "rev")) of
missing_rev -> nil;
@@ -1099,8 +1255,8 @@ db_doc_req(#httpd{method='COPY', user_ctx=Ctx}=Req, Db, SourceDocId) ->
% open old doc
Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
% save new doc
- case fabric:update_doc(Db,
- Doc#doc{id=TargetDocId, revs=TargetRevs}, [{user_ctx,Ctx}]) of
+ case fabric2_db:update_doc(Db,
+ Doc#doc{id=TargetDocId, revs=TargetRevs}, []) of
{ok, NewTargetRev} ->
chttpd_stats:incr_writes(),
HttpCode = 201;
@@ -1109,13 +1265,13 @@ db_doc_req(#httpd{method='COPY', user_ctx=Ctx}=Req, Db, SourceDocId) ->
HttpCode = 202
end,
% respond
- DbName = couch_db:name(Db),
+ DbName = fabric2_db:name(Db),
{PartRes} = update_doc_result_to_json(TargetDocId, {ok, NewTargetRev}),
Loc = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(TargetDocId)),
send_json(Req, HttpCode,
[{"Location", Loc},
{"ETag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewTargetRev)) ++ "\""}],
- {[{ok, true}] ++ PartRes});
+ {PartRes});
db_doc_req(Req, _Db, _DocId) ->
send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY").
@@ -1200,7 +1356,7 @@ send_docs_multipart(Req, Results, Options1) ->
CType = {"Content-Type",
"multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
{ok, Resp} = start_chunked_response(Req, 200, [CType]),
- couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
+ chttpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
lists:foreach(
fun({ok, #doc{atts=Atts}=Doc}) ->
Refs = monitor_attachments(Doc#doc.atts),
@@ -1208,25 +1364,25 @@ send_docs_multipart(Req, Results, Options1) ->
JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
{ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
InnerBoundary, JsonBytes, Atts, true),
- couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ",
+ chttpd:send_chunk(Resp, <<"\r\nContent-Type: ",
ContentType/binary, "\r\n\r\n">>),
couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
- fun(Data) -> couch_httpd:send_chunk(Resp, Data)
+ fun(Data) -> chttpd:send_chunk(Resp, Data)
end, true),
- couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>)
+ chttpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>)
after
demonitor_refs(Refs)
end;
({{not_found, missing}, RevId}) ->
RevStr = couch_doc:rev_to_str(RevId),
Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
- couch_httpd:send_chunk(Resp,
+ chttpd:send_chunk(Resp,
[<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
Json,
<<"\r\n--", OuterBoundary/binary>>])
end, Results),
- couch_httpd:send_chunk(Resp, <<"--">>),
- couch_httpd:last_chunk(Resp).
+ chttpd:send_chunk(Resp, <<"--">>),
+ chttpd:last_chunk(Resp).
bulk_get_multipart_headers({0, []}, Id, Boundary) ->
[
@@ -1266,6 +1422,8 @@ update_doc_result_to_json(DocId, {ok, NewRev}) ->
{[{ok, true}, {id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]};
update_doc_result_to_json(DocId, {accepted, NewRev}) ->
{[{ok, true}, {id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}, {accepted, true}]};
+update_doc_result_to_json(DocId, {{DocId, _}, Error}) ->
+ update_doc_result_to_json(DocId, Error);
update_doc_result_to_json(DocId, Error) ->
{_Code, ErrorStr, Reason} = chttpd:error_info(Error),
{[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}.
@@ -1294,17 +1452,16 @@ send_updated_doc(Req, Db, DocId, Json) ->
send_updated_doc(Req, Db, DocId, Doc, Headers) ->
send_updated_doc(Req, Db, DocId, Doc, Headers, interactive_edit).
-send_updated_doc(#httpd{user_ctx=Ctx} = Req, Db, DocId, #doc{deleted=Deleted}=Doc,
+send_updated_doc(#httpd{} = Req, Db, DocId, #doc{deleted=Deleted}=Doc,
Headers, UpdateType) ->
- W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
Options =
case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
"true" ->
- [full_commit, UpdateType, {user_ctx,Ctx}, {w,W}];
+ [full_commit, UpdateType];
"false" ->
- [delay_commit, UpdateType, {user_ctx,Ctx}, {w,W}];
+ [delay_commit, UpdateType];
_ ->
- [UpdateType, {user_ctx,Ctx}, {w,W}]
+ [UpdateType]
end,
{Status, {etag, Etag}, Body} = update_doc(Db, DocId,
#doc{deleted=Deleted}=Doc, Options),
@@ -1323,31 +1480,7 @@ http_code_from_status(Status) ->
end.
update_doc(Db, DocId, #doc{deleted=Deleted, body=DocBody}=Doc, Options) ->
- {_, Ref} = spawn_monitor(fun() ->
- try fabric:update_doc(Db, Doc, Options) of
- Resp ->
- exit({exit_ok, Resp})
- catch
- throw:Reason ->
- exit({exit_throw, Reason});
- error:Reason ->
- exit({exit_error, Reason});
- exit:Reason ->
- exit({exit_exit, Reason})
- end
- end),
- Result = receive
- {'DOWN', Ref, _, _, {exit_ok, Ret}} ->
- Ret;
- {'DOWN', Ref, _, _, {exit_throw, Reason}} ->
- throw(Reason);
- {'DOWN', Ref, _, _, {exit_error, Reason}} ->
- erlang:error(Reason);
- {'DOWN', Ref, _, _, {exit_exit, Reason}} ->
- erlang:exit(Reason)
- end,
-
- case Result of
+ case fabric2_db:update_doc(Db, Doc, Options) of
{ok, NewRev} ->
Accepted = false;
{accepted, NewRev} ->
@@ -1394,7 +1527,7 @@ couch_doc_from_req(Req, _Db, DocId, #doc{revs=Revs} = Doc) ->
end,
Doc#doc{id=DocId, revs=Revs2};
couch_doc_from_req(Req, Db, DocId, Json) ->
- Doc = couch_db:doc_from_json_obj_validate(Db, Json),
+ Doc = couch_doc:from_json_obj_validate(Json, fabric2_db:name(Db)),
couch_doc_from_req(Req, Db, DocId, Doc).
@@ -1402,11 +1535,10 @@ couch_doc_from_req(Req, Db, DocId, Json) ->
% couch_doc_open(Db, DocId) ->
% couch_doc_open(Db, DocId, nil, []).
-couch_doc_open(Db, DocId, Rev, Options0) ->
- Options = [{user_ctx, couch_db:get_user_ctx(Db)} | Options0],
+couch_doc_open(Db, DocId, Rev, Options) ->
case Rev of
nil -> % open most recent rev
- case fabric:open_doc(Db, DocId, Options) of
+ case fabric2_db:open_doc(Db, DocId, Options) of
{ok, Doc} ->
chttpd_stats:incr_reads(),
Doc;
@@ -1414,7 +1546,7 @@ couch_doc_open(Db, DocId, Rev, Options0) ->
throw(Error)
end;
_ -> % open a specific rev (deletions come back as stubs)
- case fabric:open_revs(Db, DocId, [Rev], Options) of
+ case fabric2_db:open_doc_revs(Db, DocId, [Rev], Options) of
{ok, [{ok, Doc}]} ->
chttpd_stats:incr_reads(),
Doc;
@@ -1535,8 +1667,11 @@ db_attachment_req(#httpd{method='GET',mochi_req=MochiReq}=Req, Db, DocId, FileNa
end;
-db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNameParts)
+db_attachment_req(#httpd{method=Method}=Req, Db, DocId, FileNameParts)
when (Method == 'PUT') or (Method == 'DELETE') ->
+ #httpd{
+ mochi_req = MochiReq
+ } = Req,
FileName = validate_attachment_name(
mochiweb_util:join(
lists:map(fun binary_to_list/1,
@@ -1546,16 +1681,45 @@ db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNamePa
'DELETE' ->
[];
_ ->
- MimeType = case couch_httpd:header_value(Req,"Content-Type") of
+ MimeType = case chttpd:header_value(Req,"Content-Type") of
% We could throw an error here or guess by the FileName.
% Currently, just giving it a default.
undefined -> <<"application/octet-stream">>;
CType -> list_to_binary(CType)
end,
- Data = fabric:att_receiver(Req, chttpd:body_length(Req)),
+ Data = case chttpd:body_length(Req) of
+ undefined ->
+ <<"">>;
+ {unknown_transfer_encoding, Unknown} ->
+ exit({unknown_transfer_encoding, Unknown});
+ chunked ->
+ fun(MaxChunkSize, ChunkFun, InitState) ->
+ chttpd:recv_chunked(
+ Req, MaxChunkSize, ChunkFun, InitState
+ )
+ end;
+ 0 ->
+ <<"">>;
+ Length when is_integer(Length) ->
+ Expect = case chttpd:header_value(Req, "expect") of
+ undefined ->
+ undefined;
+ Value when is_list(Value) ->
+ string:to_lower(Value)
+ end,
+ case Expect of
+ "100-continue" ->
+ MochiReq:start_raw_response({100, gb_trees:empty()});
+ _Else ->
+ ok
+ end,
+ fun() -> chttpd:recv(Req, 0) end;
+ Length ->
+ exit({length_not_integer, Length})
+ end,
ContentLen = case couch_httpd:header_value(Req,"Content-Length") of
undefined -> undefined;
- Length -> list_to_integer(Length)
+ CL -> list_to_integer(CL)
end,
ContentEnc = string:to_lower(string:strip(
couch_httpd:header_value(Req, "Content-Encoding", "identity")
@@ -1587,10 +1751,10 @@ db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNamePa
% check for the existence of the doc to handle the 404 case.
couch_doc_open(Db, DocId, nil, [])
end,
- couch_db:validate_docid(Db, DocId),
+ fabric2_db:validate_docid(DocId),
#doc{id=DocId};
Rev ->
- case fabric:open_revs(Db, DocId, [Rev], [{user_ctx,Ctx}]) of
+ case fabric2_db:open_doc_revs(Db, DocId, [Rev], []) of
{ok, [{ok, Doc0}]} ->
chttpd_stats:incr_reads(),
Doc0;
@@ -1605,8 +1769,7 @@ db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNamePa
DocEdited = Doc#doc{
atts = NewAtt ++ [A || A <- Atts, couch_att:fetch(name, A) /= FileName]
},
- W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
- case fabric:update_doc(Db, DocEdited, [{user_ctx,Ctx}, {w,W}]) of
+ case fabric2_db:update_doc(Db, DocEdited, []) of
{ok, UpdatedRev} ->
chttpd_stats:incr_writes(),
HttpCode = 201;
@@ -1615,7 +1778,7 @@ db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNamePa
HttpCode = 202
end,
erlang:put(mochiweb_request_recv, true),
- DbName = couch_db:name(Db),
+ DbName = fabric2_db:name(Db),
{Status, Headers} = case Method of
'DELETE' ->
@@ -1702,46 +1865,6 @@ get_md5_header(Req) ->
parse_doc_query(Req) ->
lists:foldl(fun parse_doc_query/2, #doc_query_args{}, chttpd:qs(Req)).
-parse_engine_opt(Req) ->
- case chttpd:qs_value(Req, "engine") of
- undefined ->
- [];
- Extension ->
- Available = couch_server:get_engine_extensions(),
- case lists:member(Extension, Available) of
- true ->
- [{engine, iolist_to_binary(Extension)}];
- false ->
- throw({bad_request, invalid_engine_extension})
- end
- end.
-
-
-parse_partitioned_opt(Req) ->
- case chttpd:qs_value(Req, "partitioned") of
- undefined ->
- [];
- "false" ->
- [];
- "true" ->
- ok = validate_partitioned_db_enabled(Req),
- [
- {partitioned, true},
- {hash, [couch_partition, hash, []]}
- ];
- _ ->
- throw({bad_request, <<"Invalid `partitioned` parameter">>})
- end.
-
-
-validate_partitioned_db_enabled(Req) ->
- case couch_flags:is_enabled(partitioned, Req) of
- true ->
- ok;
- false ->
- throw({bad_request, <<"Partitioned feature is not enabled.">>})
- end.
-
parse_doc_query({Key, Value}, Args) ->
case {Key, Value} of
@@ -1811,7 +1934,7 @@ parse_changes_query(Req) ->
{"descending", "true"} ->
Args#changes_args{dir=rev};
{"since", _} ->
- Args#changes_args{since=Value};
+ Args#changes_args{since=parse_since_seq(Value)};
{"last-event-id", _} ->
Args#changes_args{since=Value};
{"limit", _} ->
@@ -1872,6 +1995,30 @@ parse_changes_query(Req) ->
ChangesArgs
end.
+
+parse_since_seq(<<"now">>) ->
+ now;
+
+parse_since_seq(Seq) when is_binary(Seq), size(Seq) > 30 ->
+ throw({bad_request, url_encoded_since_seq});
+
+parse_since_seq(Seq) when is_binary(Seq), size(Seq) > 2 ->
+ % We have implicitly allowed the since seq to either be
+ % JSON encoded or a "raw" string. Here we just remove the
+ % surrounding quotes if they exist and are paired.
+ SeqSize = size(Seq) - 2,
+ case Seq of
+ <<"\"", S:SeqSize/binary, "\"">> -> S;
+ S -> S
+ end;
+
+parse_since_seq(Seq) when is_binary(Seq) ->
+ Seq;
+
+parse_since_seq(Seq) when is_list(Seq) ->
+ parse_since_seq(iolist_to_binary(Seq)).
+
+
extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)->
extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
extract_header_rev(Req, ExplicitRev) ->
@@ -1889,7 +2036,7 @@ extract_header_rev(Req, ExplicitRev) ->
end.
validate_security_can_be_edited(DbName) ->
- UserDbName = config:get("chttpd_auth", "authentication_db", "_users"),
+ UserDbName = ?l2b(config:get("chttpd_auth", "authentication_db", "_users")),
CanEditUserSecurityObject = config:get("couchdb","users_db_security_editable","false"),
case {DbName,CanEditUserSecurityObject} of
{UserDbName,"false"} ->
@@ -1921,6 +2068,8 @@ monitor_attachments(Atts) when is_list(Atts) ->
case couch_att:fetch(data, Att) of
{Fd, _} ->
[monitor(process, Fd) | Monitors];
+ {loc, _, _, _} ->
+ Monitors;
stub ->
Monitors;
Else ->
@@ -1934,8 +2083,7 @@ monitor_attachments(Att) ->
demonitor_refs(Refs) when is_list(Refs) ->
[demonitor(Ref) || Ref <- Refs].
-set_namespace(<<"_all_docs">>, Args) ->
- set_namespace(undefined, Args);
+
set_namespace(<<"_local_docs">>, Args) ->
set_namespace(<<"_local">>, Args);
set_namespace(<<"_design_docs">>, Args) ->
@@ -1984,7 +2132,7 @@ bulk_get_open_doc_revs1(Db, Props, Options, {}) ->
{null, {error, Error}, Options};
DocId ->
try
- couch_db:validate_docid(Db, DocId),
+ fabric2_db:validate_docid(DocId),
bulk_get_open_doc_revs1(Db, Props, Options, {DocId})
catch throw:{Error, Reason} ->
{DocId, {error, {null, Error, Reason}}, Options}
@@ -2018,7 +2166,7 @@ bulk_get_open_doc_revs1(Db, Props, Options, {DocId, Revs}) ->
bulk_get_open_doc_revs1(Db, Props, Options, {DocId, Revs, Options1})
end;
bulk_get_open_doc_revs1(Db, Props, _, {DocId, Revs, Options}) ->
- case fabric:open_revs(Db, DocId, Revs, Options) of
+ case fabric2_db:open_doc_revs(Db, DocId, Revs, Options) of
{ok, []} ->
RevStr = couch_util:get_value(<<"rev">>, Props),
Error = {RevStr, <<"not_found">>, <<"missing">>},
@@ -2092,70 +2240,3 @@ bulk_get_json_error(DocId, Rev, Error, Reason) ->
{<<"rev">>, Rev},
{<<"error">>, Error},
{<<"reason">>, Reason}]}}]}).
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-monitor_attachments_test_() ->
- {"ignore stubs",
- fun () ->
- Atts = [couch_att:new([{data, stub}])],
- ?_assertEqual([], monitor_attachments(Atts))
- end
- }.
-
-parse_partitioned_opt_test_() ->
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_should_allow_partitioned_db(),
- t_should_throw_on_not_allowed_partitioned_db(),
- t_returns_empty_array_for_partitioned_false(),
- t_returns_empty_array_for_no_partitioned_qs()
- ]
- }.
-
-
-setup() ->
- ok.
-
-teardown(_) ->
- meck:unload().
-
-mock_request(Url) ->
- Headers = mochiweb_headers:make([{"Host", "examples.com"}]),
- MochiReq = mochiweb_request:new(nil, 'PUT', Url, {1, 1}, Headers),
- #httpd{mochi_req = MochiReq}.
-
-t_should_allow_partitioned_db() ->
- ?_test(begin
- meck:expect(couch_flags, is_enabled, 2, true),
- Req = mock_request("/all-test21?partitioned=true"),
- [Partitioned, _] = parse_partitioned_opt(Req),
- ?assertEqual(Partitioned, {partitioned, true})
- end).
-
-t_should_throw_on_not_allowed_partitioned_db() ->
- ?_test(begin
- meck:expect(couch_flags, is_enabled, 2, false),
- Req = mock_request("/all-test21?partitioned=true"),
- Throw = {bad_request, <<"Partitioned feature is not enabled.">>},
- ?assertThrow(Throw, parse_partitioned_opt(Req))
- end).
-
-t_returns_empty_array_for_partitioned_false() ->
- ?_test(begin
- Req = mock_request("/all-test21?partitioned=false"),
- ?assertEqual(parse_partitioned_opt(Req), [])
- end).
-
-t_returns_empty_array_for_no_partitioned_qs() ->
- ?_test(begin
- Req = mock_request("/all-test21"),
- ?assertEqual(parse_partitioned_opt(Req), [])
- end).
-
--endif.
diff --git a/src/chttpd/src/chttpd_external.erl b/src/chttpd/src/chttpd_external.erl
index 451d87d2e..7317b7e4b 100644
--- a/src/chttpd/src/chttpd_external.erl
+++ b/src/chttpd/src/chttpd_external.erl
@@ -38,7 +38,7 @@ json_req_obj_fields() ->
<<"peer">>, <<"form">>, <<"cookie">>, <<"userCtx">>, <<"secObj">>].
json_req_obj_field(<<"info">>, #httpd{}, Db, _DocId) ->
- {ok, Info} = get_db_info(Db),
+ {ok, Info} = fabric2_db:get_db_info(Db),
{Info};
json_req_obj_field(<<"uuid">>, #httpd{}, _Db, _DocId) ->
couch_uuids:new();
@@ -81,27 +81,18 @@ json_req_obj_field(<<"form">>, #httpd{mochi_req=Req, method=Method}=HttpReq, Db,
json_req_obj_field(<<"cookie">>, #httpd{mochi_req=Req}, _Db, _DocId) ->
to_json_terms(Req:parse_cookie());
json_req_obj_field(<<"userCtx">>, #httpd{}, Db, _DocId) ->
- couch_util:json_user_ctx(Db);
-json_req_obj_field(<<"secObj">>, #httpd{user_ctx=UserCtx}, Db, _DocId) ->
- get_db_security(Db, UserCtx).
-
-
-get_db_info(Db) ->
- case couch_db:is_clustered(Db) of
- true ->
- fabric:get_db_info(Db);
- false ->
- couch_db:get_db_info(Db)
- end.
-
-
-get_db_security(Db, #user_ctx{}) ->
- case couch_db:is_clustered(Db) of
- true ->
- fabric:get_security(Db);
- false ->
- couch_db:get_security(Db)
- end.
+ json_user_ctx(Db);
+json_req_obj_field(<<"secObj">>, #httpd{user_ctx = #user_ctx{}}, Db, _DocId) ->
+ fabric2_db:get_security(Db).
+
+
+json_user_ctx(Db) ->
+ Ctx = fabric2_db:get_user_ctx(Db),
+ {[
+ {<<"db">>, fabric2_db:name(Db)},
+ {<<"name">>, Ctx#user_ctx.name},
+ {<<"roles">>, Ctx#user_ctx.roles}
+ ]}.
to_json_terms(Data) ->
diff --git a/src/chttpd/src/chttpd_handlers.erl b/src/chttpd/src/chttpd_handlers.erl
index 930563230..17d2952b3 100644
--- a/src/chttpd/src/chttpd_handlers.erl
+++ b/src/chttpd/src/chttpd_handlers.erl
@@ -15,7 +15,8 @@
-export([
url_handler/2,
db_handler/2,
- design_handler/2
+ design_handler/2,
+ handler_info/1
]).
-define(SERVICE_ID, chttpd_handlers).
@@ -35,6 +36,26 @@ db_handler(HandlerKey, DefaultFun) ->
design_handler(HandlerKey, DefaultFun) ->
select(collect(design_handler, [HandlerKey]), DefaultFun).
+handler_info(HttpReq) ->
+ #httpd{
+ method = Method,
+ path_parts = PathParts
+ } = HttpReq,
+ Default = {'unknown.unknown', #{}},
+ try
+ select(collect(handler_info, [Method, PathParts, HttpReq]), Default)
+ catch Type:Reason ->
+ Stack = erlang:get_stacktrace(),
+ couch_log:error("~s :: handler_info failure for ~p : ~p:~p :: ~p", [
+ ?MODULE,
+ get(nonce),
+ Type,
+ Reason,
+ Stack
+ ]),
+ Default
+ end.
+
%% ------------------------------------------------------------------
%% Internal Function Definitions
%% ------------------------------------------------------------------
diff --git a/src/chttpd/src/chttpd_httpd_handlers.erl b/src/chttpd/src/chttpd_httpd_handlers.erl
index 5e86ea87d..d50115917 100644
--- a/src/chttpd/src/chttpd_httpd_handlers.erl
+++ b/src/chttpd/src/chttpd_httpd_handlers.erl
@@ -12,12 +12,23 @@
-module(chttpd_httpd_handlers).
--export([url_handler/1, db_handler/1, design_handler/1]).
+-export([url_handler/1, db_handler/1, design_handler/1, handler_info/3]).
+
+-export([
+ not_supported/2,
+ not_supported/3,
+ not_implemented/2
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
url_handler(<<>>) -> fun chttpd_misc:handle_welcome_req/1;
url_handler(<<"favicon.ico">>) -> fun chttpd_misc:handle_favicon_req/1;
url_handler(<<"_utils">>) -> fun chttpd_misc:handle_utils_dir_req/1;
url_handler(<<"_all_dbs">>) -> fun chttpd_misc:handle_all_dbs_req/1;
+url_handler(<<"_deleted_dbs">>) -> fun chttpd_misc:handle_deleted_dbs_req/1;
url_handler(<<"_dbs_info">>) -> fun chttpd_misc:handle_dbs_info_req/1;
url_handler(<<"_active_tasks">>) -> fun chttpd_misc:handle_task_status_req/1;
url_handler(<<"_scheduler">>) -> fun couch_replicator_httpd:handle_scheduler_req/1;
@@ -33,14 +44,475 @@ db_handler(<<"_view_cleanup">>) -> fun chttpd_db:handle_view_cleanup_req/2;
db_handler(<<"_compact">>) -> fun chttpd_db:handle_compact_req/2;
db_handler(<<"_design">>) -> fun chttpd_db:handle_design_req/2;
db_handler(<<"_partition">>) -> fun chttpd_db:handle_partition_req/2;
-db_handler(<<"_temp_view">>) -> fun chttpd_view:handle_temp_view_req/2;
+db_handler(<<"_temp_view">>) -> fun ?MODULE:not_supported/2;
db_handler(<<"_changes">>) -> fun chttpd_db:handle_changes_req/2;
+db_handler(<<"_purge">>) -> fun ?MODULE:not_implemented/2;
+db_handler(<<"_purged_infos_limit">>) -> fun ?MODULE:not_implemented/2;
db_handler(_) -> no_match.
design_handler(<<"_view">>) -> fun chttpd_view:handle_view_req/3;
-design_handler(<<"_show">>) -> fun chttpd_show:handle_doc_show_req/3;
-design_handler(<<"_list">>) -> fun chttpd_show:handle_view_list_req/3;
+design_handler(<<"_show">>) -> fun ?MODULE:not_supported/3;
+design_handler(<<"_list">>) -> fun ?MODULE:not_supported/3;
design_handler(<<"_update">>) -> fun chttpd_show:handle_doc_update_req/3;
design_handler(<<"_info">>) -> fun chttpd_db:handle_design_info_req/3;
-design_handler(<<"_rewrite">>) -> fun chttpd_rewrite:handle_rewrite_req/3;
+design_handler(<<"_rewrite">>) -> fun ?MODULE:not_supported/3;
design_handler(_) -> no_match.
+
+
+handler_info('GET', [], _) ->
+ {'welcome_message.read', #{}};
+
+handler_info('GET', [<<"_active_tasks">>], _) ->
+ {'active_tasks.read', #{}};
+
+handler_info('GET', [<<"_all_dbs">>], _) ->
+ {'all_dbs.read', #{}};
+
+handler_info('GET', [<<"_deleted_dbs">>], _) ->
+ {'account-deleted-dbs.read', #{}};
+
+handler_info('POST', [<<"_deleted_dbs">>], _) ->
+ {'account-deleted-dbs.undelete', #{}};
+
+handler_info('DELETE', [<<"_deleted_dbs">>, Db], _) ->
+ {'account-deleted-dbs.delete', #{'db.name' => Db}};
+
+handler_info('POST', [<<"_dbs_info">>], _) ->
+ {'dbs_info.read', #{}};
+
+handler_info('GET', [<<"_node">>, <<"_local">>], _) ->
+ {'node.name.read', #{}};
+
+handler_info(Method, [<<"_node">>, <<"_local">> | Rest], HttpReq) ->
+ handler_info(Method, [<<"_node">>, node() | Rest], HttpReq);
+
+handler_info('GET', [<<"_node">>, Node, <<"_config">>], _) ->
+ {'node.config.all.read', #{node => Node}};
+
+handler_info('GET', [<<"_node">>, Node, <<"_config">>, Section], _) ->
+ {'node.config.section.read', #{node => Node, 'config.section' => Section}};
+
+handler_info('GET', [<<"_node">>, Node, <<"_config">>, Section, Key], _) ->
+ {'node.config.key.read', #{
+ node => Node,
+ 'config.section' => Section,
+ 'config.key' => Key
+ }};
+
+handler_info('PUT', [<<"_node">>, Node, <<"_config">>, Section, Key], _) ->
+ {'node.config.key.write', #{
+ node => Node,
+ 'config.section' => Section,
+ 'config.key' => Key
+ }};
+
+handler_info('DELETE', [<<"_node">>, Node, <<"_config">>, Section, Key], _) ->
+ {'node.config.key.delete', #{
+ node => Node,
+ 'config.section' => Section,
+ 'config.key' => Key
+ }};
+
+handler_info('GET', [<<"_node">>, Node, <<"_stats">> | Path], _) ->
+ {'node.stats.read', #{node => Node, 'stat.path' => Path}};
+
+handler_info('GET', [<<"_node">>, Node, <<"_system">>], _) ->
+ {'node.system.read', #{node => Node}};
+
+handler_info('POST', [<<"_node">>, Node, <<"_restart">>], _) ->
+ {'node.restart.execute', #{node => Node}};
+
+handler_info('POST', [<<"_reload_query_servers">>], _) ->
+ {'query_servers.reload', #{}};
+
+handler_info('POST', [<<"_replicate">>], _) ->
+ {'replication.create', #{}};
+
+handler_info('GET', [<<"_scheduler">>, <<"jobs">>], _) ->
+ {'replication.jobs.read', #{}};
+
+handler_info('GET', [<<"_scheduler">>, <<"jobs">>, JobId], _) ->
+ {'replication.job.read', #{'job.id' => JobId}};
+
+handler_info('GET', [<<"_scheduler">>, <<"docs">>], _) ->
+ {'replication.docs.read', #{'db.name' => <<"_replicator">>}};
+
+handler_info('GET', [<<"_scheduler">>, <<"docs">>, Db], _) ->
+ {'replication.docs.read', #{'db.name' => Db}};
+
+handler_info('GET', [<<"_scheduler">>, <<"docs">>, Db, DocId], _) ->
+ {'replication.doc.read', #{'db.name' => Db, 'doc.id' => DocId}};
+
+handler_info('GET', [<<"_scheduler">>, <<"docs">> | Path], _) ->
+ case lists:splitwith(fun(Elem) -> Elem /= <<"_replicator">> end, Path) of
+ {_, [<<"_replicator">>]} ->
+ {'replication.docs.read', #{
+ 'db.name' => filename:join(Path)
+ }};
+ {DbParts, [<<"_replicator">>, DocId]} ->
+ {'replication.doc.read', #{
+ 'db.name' => filename:join(DbParts ++ [<<"_replicator">>]),
+ 'doc.id' => DocId
+ }};
+ _ ->
+ no_match
+ end;
+
+handler_info('GET', [<<"_session">>], _) ->
+ {'session.read', #{}};
+
+handler_info('POST', [<<"_session">>], _) ->
+ {'session.create', #{}};
+
+handler_info('DELETE', [<<"_session">>], _) ->
+ {'session.delete', #{}};
+
+handler_info('GET', [<<"_up">>], _) ->
+ {'health.read', #{}};
+
+handler_info('GET', [<<"_utils">> | Path], _) ->
+ {'utils.read', #{'file.path' => filename:join(Path)}};
+
+handler_info('GET', [<<"_uuids">>], _) ->
+ {'uuids.read', #{}};
+
+handler_info('GET', [<<"favicon.ico">>], _) ->
+ {'favicon.ico.read', #{}};
+
+
+handler_info(Method, [<<"_", _/binary>> = Part| Rest], Req) ->
+ % Maybe bail here so that we don't trample over a
+ % different url_handler plugin. However, we continue
+ % on for known system databases.
+ DbName = case Part of
+ <<"_dbs">> -> '_dbs';
+ <<"_global_changes">> -> '_global_changes';
+ <<"_metadata">> -> '_metadata';
+ <<"_nodes">> -> '_nodes';
+ <<"_replicator">> -> '_replicator';
+ <<"_users">> -> '_users';
+ _ -> no_match
+ end,
+ if DbName == no_match -> no_match; true ->
+ handler_info(Method, [DbName | Rest], Req)
+ end;
+
+handler_info('GET', [Db], _) ->
+ {'db.info.read', #{'db.name' => Db}};
+
+handler_info('PUT', [Db], _) ->
+ {'db.create', #{'db.name' => Db}};
+
+handler_info('POST', [Db], _) ->
+ {'db.doc.write', #{'db.name' => Db}};
+
+handler_info('DELETE', [Db], _) ->
+ {'db.delete', #{'db.name' => Db}};
+
+handler_info(M, [Db, <<"_all_docs">>], _) when M == 'GET'; M == 'POST' ->
+ {'db.all_docs.read', #{'db.name' => Db}};
+
+handler_info('POST', [Db, <<"_all_docs">>, <<"queries">>], _) ->
+ {'db.all_docs.read', #{'db.name' => Db, multi => true}};
+
+handler_info('POST', [Db, <<"_bulk_docs">>], _) ->
+ {'db.docs.write', #{'db.name' => Db, bulk => true}};
+
+handler_info('POST', [Db, <<"_bulk_get">>], _) ->
+ {'db.docs.read', #{'db.name' => Db, bulk => true}};
+
+handler_info('GET', [Db, <<"_changes">>], _) ->
+ {'db.changes.read', #{'db.name' => Db}};
+
+handler_info('POST', [Db, <<"_changes">>], _) ->
+ {'db.changes.read', #{'db.name' => Db}};
+
+handler_info('POST', [Db, <<"_compact">>], _) ->
+ {'db.compact.execute', #{'db.name' => Db}};
+
+handler_info('GET', [Db, <<"_design">>, Name], _) ->
+ {'db.design.doc.read', #{'db.name' => Db, 'design.id' => Name}};
+
+handler_info('POST', [Db, <<"_design">>, Name], _) ->
+ {'db.design.doc.write', #{'db.name' => Db, 'design.id' => Name}};
+
+handler_info('PUT', [Db, <<"_design">>, Name], _) ->
+ {'db.design.doc.write', #{'db.name' => Db, 'design.id' => Name}};
+
+handler_info('COPY', [Db, <<"_design">>, Name], Req) ->
+ {'db.design.doc.write', #{
+ 'db.name' => Db,
+ 'design.id' => get_copy_destination(Req),
+ 'copy.source.doc.id' => <<"_design/", Name/binary>>
+ }};
+
+handler_info('DELETE', [Db, <<"_design">>, Name], _) ->
+ {'db.design.doc.delete', #{'db.name' => Db, 'design.id' => Name}};
+
+handler_info('GET', [Db, <<"_design">>, Name, <<"_info">>], _) ->
+ {'db.design.info.read', #{'db.name' => Db, 'design.id' => Name}};
+
+handler_info(M, [Db, <<"_design">>, Name, <<"_list">>, List, View], _)
+ when M == 'GET'; M == 'POST', M == 'OPTIONS' ->
+ {'db.design.list.read', #{
+ 'db.name' => Db,
+ 'design.id' => Name,
+ 'design.list.name' => List,
+ 'design.view.name' => View
+ }};
+
+handler_info(M, [Db, <<"_design">>, Name, <<"_list">>, List, Design, View], _)
+ when M == 'GET'; M == 'POST', M == 'OPTIONS' ->
+ {'db.design.list.read', #{
+ 'db.name' => Db,
+ 'design.id' => Name,
+ 'design.list.name' => List,
+ 'design.view.source.id' => Design,
+ 'design.view.name' => View
+ }};
+
+handler_info(_, [Db, <<"_design">>, Name, <<"_rewrite">> | Path], _) ->
+ {'db.design.rewrite.execute', #{
+ 'db.name' => Db,
+ 'design.id' => Name,
+ 'rewrite.path' => filename:join(Path)
+ }};
+
+handler_info(_, [Db, <<"_design">>, Name, <<"_show">>, Show, DocId], _) ->
+ {'db.design.show.execute', #{
+ 'db.name' => Db,
+ 'design.id' => Name,
+ 'design.show.name' => Show,
+ 'design.show.doc.id' => DocId
+ }};
+
+handler_info(_, [Db, <<"_design">>, Name, <<"_update">>, Update | Rest], _) ->
+ BaseTags = #{
+ 'db.name' => Db,
+ 'design.id' => Name,
+ 'design.update.name' => Update
+ },
+ Tags = case Rest of
+ [] ->
+ BaseTags;
+ _ ->
+ DocId = filename:join(Rest),
+ maps:put('design.update.doc.id', DocId, BaseTags)
+ end,
+ {'db.design.update.execute', Tags};
+
+handler_info('POST', [Db, <<"_design">>, Name, <<"_view">>, View, <<"queries">>], _) ->
+ {'db.design.view.multi.read', #{
+ 'db.name' => Db,
+ 'design.id' => Name,
+ 'design.view.name' => View
+ }};
+
+handler_info(M, [Db, <<"_design">>, Name, <<"_view">>, View], _)
+ when M == 'GET'; M == 'POST' ->
+ {'db.design.view.read', #{
+ 'db.name' => Db,
+ 'design.id' => Name,
+ 'design.view.name' => View
+ }};
+
+handler_info(_, [_Db, <<"_design">>, _Name, <<"_", _/binary>> | _], _) ->
+ % Bail here so that we don't treat a plugin
+ % design handler in place of a design attachment
+ no_match;
+
+handler_info('GET', [Db, <<"_design">>, Name | Path], _) ->
+ {'db.design.doc.attachment.read', #{
+ 'db.name' => Db,
+ 'design.id' => Name,
+ 'attachment.name' => filename:join(Path)
+ }};
+
+handler_info('PUT', [Db, <<"_design">>, Name | Path], _) ->
+ {'db.design.doc.attachment.write', #{
+ 'db.name' => Db,
+ 'design.id' => Name,
+ 'attachment.name' => filename:join(Path)
+ }};
+
+handler_info('DELETE', [Db, <<"_design">>, Name | Path], _) ->
+ {'db.design.doc.attachment.delete', #{
+ 'db.name' => Db,
+ 'design.id' => Name,
+ 'attachment.name' => filename:join(Path)
+ }};
+
+handler_info(_, [Db, <<"_design/", Name/binary>> | Rest], Req) ->
+ % Recurse if someone sent us `_design%2Fname`
+ chttpd_handlers:handler_info(Req#httpd{
+ path_parts = [Db, <<"_design">>, Name | Rest]
+ });
+
+handler_info(M, [Db, <<"_design_docs">>], _) when M == 'GET'; M == 'POST' ->
+ {'db.design_docs.read', #{'db.name' => Db}};
+
+handler_info('POST', [Db, <<"_design_docs">>, <<"queries">>], _) ->
+ {'db.design_docs.read', #{'db.name' => Db, multi => true}};
+
+handler_info('POST', [Db, <<"_ensure_full_commit">>], _) ->
+ {'db.ensure_full_commit.execute', #{'db.name' => Db}};
+
+handler_info('GET', [Db, <<"_local">>, Name], _) ->
+ {'db.local.doc.read', #{'db.name' => Db, 'local.id' => Name}};
+
+handler_info('POST', [Db, <<"_local">>, Name], _) ->
+ {'db.local.doc.write', #{'db.name' => Db, 'local.id' => Name}};
+
+handler_info('PUT', [Db, <<"_local">>, Name], _) ->
+ {'db.local.doc.write', #{'db.name' => Db, 'local.id' => Name}};
+
+handler_info('COPY', [Db, <<"_local">>, Name], Req) ->
+ {'db.local.doc.write', #{
+ 'db.name' => Db,
+ 'local.id' => get_copy_destination(Req),
+ 'copy.source.doc.id' => <<"_local/", Name/binary>>
+ }};
+
+handler_info('DELETE', [Db, <<"_local">>, Name], _) ->
+ {'db.local.doc.delete', #{'db.name' => Db, 'local.id' => Name}};
+
+handler_info(_, [Db, <<"_local">>, Name | _Path], _) ->
+ {'db.local.doc.invalid_attachment_req', #{
+ 'db.name' => Db,
+ 'local.id' => Name
+ }};
+
+handler_info(M, [Db, <<"_local_docs">>], _) when M == 'GET'; M == 'POST' ->
+ {'db.local_docs.read', #{'db.name' => Db}};
+
+handler_info('POST', [Db, <<"_local_docs">>, <<"queries">>], _) ->
+ {'db.local_docs.read', #{'db.name' => Db, multi => true}};
+
+handler_info('POST', [Db, <<"_missing_revs">>], _) ->
+ {'db.docs.missing_revs.execute', #{'db.name' => Db}};
+
+handler_info('GET', [Db, <<"_partition">>, Partition], _) ->
+ {'db.partition.info.read', #{'db.name' => Db, partition => Partition}};
+
+handler_info(_, [Db, <<"_partition">>, Partition | Rest], Req) ->
+ NewPath = case Rest of
+ [<<"_all_docs">> | _] ->
+ [Db | Rest];
+ [<<"_index">> | _] ->
+ [Db | Rest];
+ [<<"_find">> | _] ->
+ [Db | Rest];
+ [<<"_explain">> | _] ->
+ [Db | Rest];
+ [<<"_design">>, _Name, <<"_", _/binary>> | _] ->
+ [Db | Rest];
+ _ ->
+ no_match
+ end,
+ if NewPath == no_match -> no_match; true ->
+ {OpName, Tags} = chttpd_handlers:handler_info(Req#httpd{
+ path_parts = NewPath
+ }),
+ NewOpName = case atom_to_list(OpName) of
+ "db." ++ Name -> list_to_atom("db.partition." ++ Name);
+ Else -> list_to_atom(Else ++ ".partition")
+ end,
+ {NewOpName, maps:put(partition, Partition, Tags)}
+ end;
+
+handler_info('POST', [Db, <<"_purge">>], _) ->
+ {'db.docs.purge', #{'db.name' => Db}};
+
+handler_info('GET', [Db, <<"_purged_infos_limit">>], _) ->
+ {'db.purged_infos_limit.read', #{'db.name' => Db}};
+
+handler_info('PUT', [Db, <<"_purged_infos_limit">>], _) ->
+ {'db.purged_infos_limit.write', #{'db.name' => Db}};
+
+handler_info('POST', [Db, <<"_revs_diff">>], _) ->
+ {'db.docs.revs_diff.execute', #{'db.name' => Db}};
+
+handler_info('GET', [Db, <<"_revs_limit">>], _) ->
+ {'db.revs_limit.read', #{'db.name' => Db}};
+
+handler_info('PUT', [Db, <<"_revs_limit">>], _) ->
+ {'db.revs_limit.write', #{'db.name' => Db}};
+
+handler_info('GET', [Db, <<"_security">>], _) ->
+ {'db.security.read', #{'db.name' => Db}};
+
+handler_info('PUT', [Db, <<"_security">>], _) ->
+ {'db.security.write', #{'db.name' => Db}};
+
+handler_info(_, [Db, <<"_view_cleanup">>], _) ->
+ {'views.cleanup.execute', #{'db.name' => Db}};
+
+handler_info(_, [_Db, <<"_", _/binary>> | _], _) ->
+ % Bail here for other possible db_handleres
+ no_match;
+
+handler_info('GET', [Db, DocId], _) ->
+ {'db.doc.read', #{'db.name' => Db, 'doc.id' => DocId}};
+
+handler_info('POST', [Db, DocId], _) ->
+ {'db.doc.write', #{'db.name' => Db, 'design.id' => DocId}};
+
+handler_info('PUT', [Db, DocId], _) ->
+ {'db.doc.write', #{'db.name' => Db, 'design.id' => DocId}};
+
+handler_info('COPY', [Db, DocId], Req) ->
+ {'db.doc.write', #{
+ 'db.name' => Db,
+ 'doc.id' => get_copy_destination(Req),
+ 'copy.source.doc.id' => DocId
+ }};
+
+handler_info('DELETE', [Db, DocId], _) ->
+ {'db.doc.delete', #{'db.name' => Db, 'doc.id' => DocId}};
+
+handler_info('GET', [Db, DocId | Path], _) ->
+ {'db.doc.attachment.read', #{
+ 'db.name' => Db,
+ 'doc.id' => DocId,
+ 'attachment.name' => filename:join(Path)
+ }};
+
+handler_info('PUT', [Db, DocId | Path], _) ->
+ {'db.doc.attachment.write', #{
+ 'db.name' => Db,
+ 'doc.id' => DocId,
+ 'attachment.name' => filename:join(Path)
+ }};
+
+handler_info('DELETE', [Db, DocId | Path], _) ->
+ {'db.doc.attachment.delete', #{
+ 'db.name' => Db,
+ 'doc.id' => DocId,
+ 'attachment.name' => filename:join(Path)
+ }};
+
+handler_info(_, _, _) ->
+ no_match.
+
+
+get_copy_destination(Req) ->
+ try
+ {DocIdStr, _} = couch_httpd_db:parse_copy_destination_header(Req),
+ list_to_binary(mochiweb_util:unquote(DocIdStr))
+ catch _:_ ->
+ unknown
+ end.
+
+
+not_supported(#httpd{} = Req, Db, _DDoc) ->
+ not_supported(Req, Db).
+
+
+not_supported(#httpd{} = Req, _Db) ->
+ Msg = <<"resource is not supported in CouchDB >= 4.x">>,
+ chttpd:send_error(Req, 410, gone, Msg).
+
+
+not_implemented(#httpd{} = Req, _Db) ->
+ Msg = <<"resource is not implemented">>,
+ chttpd:send_error(Req, 501, not_implemented, Msg).
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index 830fea378..42d1fb297 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -15,6 +15,7 @@
-export([
handle_all_dbs_req/1,
handle_dbs_info_req/1,
+ handle_deleted_dbs_req/1,
handle_favicon_req/1,
handle_favicon_req/2,
handle_replicate_req/1,
@@ -113,22 +114,27 @@ maybe_add_csp_headers(Headers, _) ->
Headers.
handle_all_dbs_req(#httpd{method='GET'}=Req) ->
- Args = couch_mrview_http:parse_params(Req, undefined),
- ShardDbName = config:get("mem3", "shards_db", "_dbs"),
- %% shard_db is not sharded but mem3:shards treats it as an edge case
- %% so it can be pushed thru fabric
- {ok, Info} = fabric:get_db_info(ShardDbName),
- Etag = couch_httpd:make_etag({Info}),
- Options = [{user_ctx, Req#httpd.user_ctx}],
- {ok, Resp} = chttpd:etag_respond(Req, Etag, fun() ->
- {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [{"ETag",Etag}]),
- VAcc = #vacc{req=Req,resp=Resp},
- fabric:all_docs(ShardDbName, Options, fun all_dbs_callback/2, VAcc, Args)
- end),
- case is_record(Resp, vacc) of
- true -> {ok, Resp#vacc.resp};
- _ -> {ok, Resp}
- end;
+ #mrargs{
+ start_key = StartKey,
+ end_key = EndKey,
+ direction = Dir,
+ limit = Limit,
+ skip = Skip
+ } = couch_mrview_http:parse_params(Req, undefined),
+
+ Options = [
+ {start_key, StartKey},
+ {end_key, EndKey},
+ {dir, Dir},
+ {limit, Limit},
+ {skip, Skip}
+ ],
+
+ {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, []),
+ Callback = fun all_dbs_callback/2,
+ Acc = #vacc{req=Req,resp=Resp},
+ {ok, Acc1} = fabric2_db:list_dbs(Callback, Acc, Options),
+ {ok, Acc1#vacc.resp};
handle_all_dbs_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD").
@@ -137,12 +143,9 @@ all_dbs_callback({meta, _Meta}, #vacc{resp=Resp0}=Acc) ->
{ok, Acc#vacc{resp=Resp1}};
all_dbs_callback({row, Row}, #vacc{resp=Resp0}=Acc) ->
Prepend = couch_mrview_http:prepend_val(Acc),
- case couch_util:get_value(id, Row) of <<"_design", _/binary>> ->
- {ok, Acc};
- DbName ->
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, [Prepend, ?JSON_ENCODE(DbName)]),
- {ok, Acc#vacc{prepend=",", resp=Resp1}}
- end;
+ DbName = couch_util:get_value(id, Row),
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, [Prepend, ?JSON_ENCODE(DbName)]),
+ {ok, Acc#vacc{prepend=",", resp=Resp1}};
all_dbs_callback(complete, #vacc{resp=Resp0}=Acc) ->
{ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "]"),
{ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
@@ -151,7 +154,10 @@ all_dbs_callback({error, Reason}, #vacc{resp=Resp0}=Acc) ->
{ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason),
{ok, Acc#vacc{resp=Resp1}}.
-handle_dbs_info_req(#httpd{method='POST'}=Req) ->
+handle_dbs_info_req(#httpd{method = 'GET'} = Req) ->
+ ok = chttpd:verify_is_server_admin(Req),
+ send_db_infos(Req, list_dbs_info);
+handle_dbs_info_req(#httpd{method='POST', user_ctx=UserCtx}=Req) ->
chttpd:validate_ctype(Req, "application/json"),
Props = chttpd:json_body_obj(Req),
Keys = couch_mrview_util:get_view_keys(Props),
@@ -168,35 +174,134 @@ handle_dbs_info_req(#httpd{method='POST'}=Req) ->
{ok, Resp} = chttpd:start_json_response(Req, 200),
send_chunk(Resp, "["),
lists:foldl(fun(DbName, AccSeparator) ->
- case catch fabric:get_db_info(DbName) of
- {ok, Result} ->
- Json = ?JSON_ENCODE({[{key, DbName}, {info, {Result}}]}),
- send_chunk(Resp, AccSeparator ++ Json);
- _ ->
- Json = ?JSON_ENCODE({[{key, DbName}, {error, not_found}]}),
- send_chunk(Resp, AccSeparator ++ Json)
+ try
+ {ok, Db} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+ {ok, Info} = fabric2_db:get_db_info(Db),
+ Json = ?JSON_ENCODE({[{key, DbName}, {info, {Info}}]}),
+ send_chunk(Resp, AccSeparator ++ Json)
+ catch error:database_does_not_exist ->
+ ErrJson = ?JSON_ENCODE({[{key, DbName}, {error, not_found}]}),
+ send_chunk(Resp, AccSeparator ++ ErrJson)
end,
"," % AccSeparator now has a comma
end, "", Keys),
send_chunk(Resp, "]"),
chttpd:end_json_response(Resp);
handle_dbs_info_req(Req) ->
- send_method_not_allowed(Req, "POST").
+ send_method_not_allowed(Req, "GET,HEAD,POST").
+
+handle_deleted_dbs_req(#httpd{method='GET', path_parts=[_]}=Req) ->
+ ok = chttpd:verify_is_server_admin(Req),
+ send_db_infos(Req, list_deleted_dbs_info);
+handle_deleted_dbs_req(#httpd{method='POST', user_ctx=Ctx, path_parts=[_]}=Req) ->
+ couch_httpd:verify_is_server_admin(Req),
+ chttpd:validate_ctype(Req, "application/json"),
+ GetJSON = fun(Key, Props, Default) ->
+ case couch_util:get_value(Key, Props) of
+ undefined when Default == error ->
+ Fmt = "POST body must include `~s` parameter.",
+ Msg = io_lib:format(Fmt, [Key]),
+ throw({bad_request, iolist_to_binary(Msg)});
+ undefined ->
+ Default;
+ Value ->
+ Value
+ end
+ end,
+ {BodyProps} = chttpd:json_body_obj(Req),
+ {UndeleteProps} = GetJSON(<<"undelete">>, BodyProps, error),
+ DbName = GetJSON(<<"source">>, UndeleteProps, error),
+ TimeStamp = GetJSON(<<"timestamp">>, UndeleteProps, error),
+ TgtDbName = GetJSON(<<"target">>, UndeleteProps, DbName),
+ case fabric2_db:undelete(DbName, TgtDbName, TimeStamp, [{user_ctx, Ctx}]) of
+ ok ->
+ send_json(Req, 200, {[{ok, true}]});
+ {error, file_exists} ->
+ chttpd:send_error(Req, file_exists);
+ {error, not_found} ->
+ chttpd:send_error(Req, not_found);
+ Error ->
+ throw(Error)
+ end;
+handle_deleted_dbs_req(#httpd{path_parts = PP}=Req) when length(PP) == 1 ->
+ send_method_not_allowed(Req, "GET,HEAD,POST");
+handle_deleted_dbs_req(#httpd{method='DELETE', user_ctx=Ctx, path_parts=[_, DbName]}=Req) ->
+ couch_httpd:verify_is_server_admin(Req),
+ TS = case ?JSON_DECODE(couch_httpd:qs_value(Req, "timestamp", "null")) of
+ null ->
+ throw({bad_request, "`timestamp` parameter is not provided."});
+ TS0 ->
+ TS0
+ end,
+ case fabric2_db:delete(DbName, [{user_ctx, Ctx}, {deleted_at, TS}]) of
+ ok ->
+ send_json(Req, 200, {[{ok, true}]});
+ {error, not_found} ->
+ chttpd:send_error(Req, not_found);
+ Error ->
+ throw(Error)
+ end;
+handle_deleted_dbs_req(#httpd{path_parts = PP}=Req) when length(PP) == 2 ->
+ send_method_not_allowed(Req, "HEAD,DELETE");
+handle_deleted_dbs_req(Req) ->
+ chttpd:send_error(Req, not_found).
+
+send_db_infos(Req, ListFunctionName) ->
+ #mrargs{
+ start_key = StartKey,
+ end_key = EndKey,
+ direction = Dir,
+ limit = Limit,
+ skip = Skip
+ } = couch_mrview_http:parse_params(Req, undefined),
+
+ Options = [
+ {start_key, StartKey},
+ {end_key, EndKey},
+ {dir, Dir},
+ {limit, Limit},
+ {skip, Skip}
+ ],
+
+ % TODO: Figure out if we can't calculate a valid
+ % ETag for this request. \xFFmetadataVersion won't
+ % work as we don't bump versions on size changes
+
+ {ok, Resp1} = chttpd:start_delayed_json_response(Req, 200, []),
+ Callback = fun dbs_info_callback/2,
+ Acc = #vacc{req = Req, resp = Resp1},
+ {ok, Resp2} = fabric2_db:ListFunctionName(Callback, Acc, Options),
+ case is_record(Resp2, vacc) of
+ true -> {ok, Resp2#vacc.resp};
+ _ -> {ok, Resp2}
+ end.
+
+dbs_info_callback({meta, _Meta}, #vacc{resp = Resp0} = Acc) ->
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
+ {ok, Acc#vacc{resp = Resp1}};
+dbs_info_callback({row, Props}, #vacc{resp = Resp0} = Acc) ->
+ Prepend = couch_mrview_http:prepend_val(Acc),
+ Chunk = [Prepend, ?JSON_ENCODE({Props})],
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, Chunk),
+ {ok, Acc#vacc{prepend = ",", resp = Resp1}};
+dbs_info_callback(complete, #vacc{resp = Resp0} = Acc) ->
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "]"),
+ {ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
+ {ok, Acc#vacc{resp = Resp2}};
+dbs_info_callback({error, Reason}, #vacc{resp = Resp0} = Acc) ->
+ {ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason),
+ {ok, Acc#vacc{resp = Resp1}}.
handle_task_status_req(#httpd{method='GET'}=Req) ->
ok = chttpd:verify_is_server_admin(Req),
- {Replies, _BadNodes} = gen_server:multi_call(couch_task_status, all),
- Response = lists:flatmap(fun({Node, Tasks}) ->
- [{[{node,Node} | Task]} || Task <- Tasks]
- end, Replies),
- send_json(Req, lists:sort(Response));
+ ActiveTasks = fabric2_active_tasks:get_active_tasks(),
+ send_json(Req, ActiveTasks);
handle_task_status_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD").
-handle_replicate_req(#httpd{method='POST', user_ctx=Ctx} = Req) ->
+handle_replicate_req(#httpd{method='POST', user_ctx=Ctx, req_body=PostBody} = Req) ->
chttpd:validate_ctype(Req, "application/json"),
%% see HACK in chttpd.erl about replication
- PostBody = get(post_body),
case replicate(PostBody, Ctx) of
{ok, {continuous, RepId}} ->
send_json(Req, 202, {[{ok, true}, {<<"_local_id">>, RepId}]});
@@ -279,12 +384,11 @@ handle_up_req(#httpd{method='GET'} = Req) ->
"nolb" ->
send_json(Req, 404, {[{status, nolb}]});
_ ->
- {ok, {Status}} = mem3_seeds:get_status(),
- case couch_util:get_value(status, Status) of
- ok ->
- send_json(Req, 200, {Status});
- seeding ->
- send_json(Req, 404, {Status})
+ try
+ fabric2_db:list_dbs([{limit, 0}]),
+ send_json(Req, 200, {[{status, ok}]})
+ catch error:{timeout, _} ->
+ send_json(Req, 404, {[{status, backend_unavailable}]})
end
end;
diff --git a/src/chttpd/src/chttpd_node.erl b/src/chttpd/src/chttpd_node.erl
index 033abd68d..1ca4bbd5e 100644
--- a/src/chttpd/src/chttpd_node.erl
+++ b/src/chttpd/src/chttpd_node.erl
@@ -70,7 +70,9 @@ handle_node_req(#httpd{method='PUT', path_parts=[_, Node, <<"_config">>, Section
Value = couch_util:trim(chttpd:json_body(Req)),
Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
OldValue = call_node(Node, config, get, [Section, Key, ""]),
- case call_node(Node, config, set, [Section, Key, ?b2l(Value), Persist]) of
+ IsSensitive = Section == <<"admins">>,
+ Opts = #{persisit => Persist, sensitive => IsSensitive},
+ case call_node(Node, config, set, [Section, Key, ?b2l(Value), Opts]) of
ok ->
send_json(Req, 200, list_to_binary(OldValue));
{error, Reason} ->
diff --git a/src/chttpd/src/chttpd_show.erl b/src/chttpd/src/chttpd_show.erl
index a6d0368b9..8a15bdcbe 100644
--- a/src/chttpd/src/chttpd_show.erl
+++ b/src/chttpd/src/chttpd_show.erl
@@ -123,15 +123,14 @@ send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
JsonDoc = couch_query_servers:json_doc(Doc),
Cmd = [<<"updates">>, UpdateName],
- W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
UpdateResp = couch_query_servers:ddoc_prompt(DDoc, Cmd, [JsonDoc, JsonReq]),
JsonResp = case UpdateResp of
[<<"up">>, {NewJsonDoc}, {JsonResp0}] ->
case chttpd:header_value(Req, "X-Couch-Full-Commit", "false") of
"true" ->
- Options = [full_commit, {user_ctx, Req#httpd.user_ctx}, {w, W}];
+ Options = [full_commit, {user_ctx, Req#httpd.user_ctx}];
_ ->
- Options = [{user_ctx, Req#httpd.user_ctx}, {w, W}]
+ Options = [{user_ctx, Req#httpd.user_ctx}]
end,
NewDoc = couch_db:doc_from_json_obj_validate(Db, {NewJsonDoc}),
couch_doc:validate_docid(NewDoc#doc.id),
diff --git a/src/chttpd/src/chttpd_stats.erl b/src/chttpd/src/chttpd_stats.erl
index 59ec9268d..27e9c3180 100644
--- a/src/chttpd/src/chttpd_stats.erl
+++ b/src/chttpd/src/chttpd_stats.erl
@@ -14,8 +14,8 @@
-export([
- init/0,
- report/2,
+ init/1,
+ report/1,
incr_reads/0,
incr_reads/1,
@@ -24,29 +24,40 @@
incr_writes/1,
incr_rows/0,
- incr_rows/1
+ incr_rows/1,
+
+ update_interval/1
]).
-record(st, {
reads = 0,
writes = 0,
- rows = 0
+ rows = 0,
+ reporter,
+ last_report_ts = 0,
+ interval,
+ request
}).
-define(KEY, chttpd_stats).
+-define(INTERVAL_IN_SEC, 60).
-
-init() ->
- put(?KEY, #st{}).
+init(Request) ->
+ Reporter = config:get("chttpd", "stats_reporter"),
+ Time = erlang:monotonic_time(second),
+ Interval = config:get_integer("chttpd", "stats_reporting_interval",
+ ?INTERVAL_IN_SEC),
+ put(?KEY, #st{reporter = Reporter, last_report_ts = Time,
+ interval = Interval, request = Request}).
-report(HttpReq, HttpResp) ->
+report(HttpResp) ->
try
case get(?KEY) of
#st{} = St ->
- report(HttpReq, HttpResp, St);
+ report(HttpResp, St);
_ ->
ok
end
@@ -57,19 +68,18 @@ report(HttpReq, HttpResp) ->
end.
-report(HttpReq, HttpResp, St) ->
- case config:get("chttpd", "stats_reporter") of
- undefined ->
- ok;
- ModStr ->
- Mod = list_to_existing_atom(ModStr),
- #st{
- reads = Reads,
- writes = Writes,
- rows = Rows
- } = St,
- Mod:report(HttpReq, HttpResp, Reads, Writes, Rows)
- end.
+report(HttpResp, #st{reporter = undefined}) ->
+ ok;
+
+report(HttpResp, #st{reporter = Reporter} = St) ->
+ Mod = list_to_existing_atom(Reporter),
+ #st{
+ reads = Reads,
+ writes = Writes,
+ rows = Rows,
+ request = HttpReq
+ } = St,
+ Mod:report(HttpReq, HttpResp, Reads, Writes, Rows).
incr_reads() ->
@@ -101,7 +111,47 @@ incr(Idx, Count) ->
#st{} = St ->
Total = element(Idx, St) + Count,
NewSt = setelement(Idx, St, Total),
- put(?KEY, NewSt);
+ put(?KEY, NewSt),
+ maybe_report_intermittent(St);
+ _ ->
+ ok
+ end.
+
+
+maybe_report_intermittent(State) ->
+ #st{last_report_ts = LastTime, interval = Interval} = State,
+ CurrentTime = erlang:monotonic_time(second),
+ case CurrentTime - LastTime of
+ Change when Change >= Interval ->
+ % Since response is not available during the request, we set
+ % this undefined. Modules that call:
+ % Mod:report(HttpReq, HttpResp, Reads, Writes, Rows) should
+ % be aware of this. Mod:report should also return a boolean
+ % to indicate if reset should occur
+ case ?MODULE:report(undefined) of
+ true ->
+ reset_stats(State, CurrentTime);
+ _ ->
+ ok
+ end;
_ ->
ok
end.
+
+
+update_interval(Interval) ->
+ case get(?KEY) of
+ #st{} = St ->
+ put(?KEY, St#st{interval = Interval});
+ _ ->
+ ok
+ end.
+
+
+reset_stats(State, NewTime) ->
+ put(?KEY, State#st{
+ reads = 0,
+ writes = 0,
+ rows = 0,
+ last_report_ts = NewTime
+ }).
diff --git a/src/chttpd/src/chttpd_sup.erl b/src/chttpd/src/chttpd_sup.erl
index d4bdb118c..8b51e6c40 100644
--- a/src/chttpd/src/chttpd_sup.erl
+++ b/src/chttpd/src/chttpd_sup.erl
@@ -18,17 +18,25 @@
-export([init/1]).
--export([start_link/1]).
+-export([start_link/0]).
-export([handle_config_change/5, handle_config_terminate/3]).
%% Helper macro for declaring children of supervisor
-define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 100, Type, [I]}).
-start_link(Args) ->
- supervisor:start_link({local,?MODULE}, ?MODULE, Args).
+start_link() ->
+ Arg = case fabric2_node_types:is_type(api_frontend) of
+ true -> normal;
+ false -> disabled
+ end,
+ supervisor:start_link({local,?MODULE}, ?MODULE, Arg).
-init([]) ->
+init(disabled) ->
+ couch_log:notice("~p : api_frontend disabled", [?MODULE]),
+ {ok, {{one_for_one, 3, 10}, []}};
+
+init(normal) ->
Children = [
{
config_listener_mon,
diff --git a/src/chttpd/src/chttpd_test_util.erl b/src/chttpd/src/chttpd_test_util.erl
index a1a08eff4..8930a5a5e 100644
--- a/src/chttpd/src/chttpd_test_util.erl
+++ b/src/chttpd/src/chttpd_test_util.erl
@@ -21,7 +21,7 @@ start_couch() ->
start_couch(?CONFIG_CHAIN).
start_couch(IniFiles) ->
- test_util:start_couch(IniFiles, [chttpd]).
+ test_util:start_couch(IniFiles, [couch_js, couch_views, chttpd]).
stop_couch(Ctx) ->
test_util:stop_couch(Ctx).
diff --git a/src/chttpd/src/chttpd_view.erl b/src/chttpd/src/chttpd_view.erl
index f73a8b7b1..8d401013c 100644
--- a/src/chttpd/src/chttpd_view.erl
+++ b/src/chttpd/src/chttpd_view.erl
@@ -14,58 +14,120 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
--export([handle_view_req/3, handle_temp_view_req/2]).
+-export([
+ handle_view_req/3,
+ validate_args/2,
+ parse_queries/4,
+ view_cb/2
+]).
+
+-define(DEFAULT_ALL_DOCS_PAGE_SIZE, 2000).
+-define(DEFAULT_VIEWS_PAGE_SIZE, 2000).
multi_query_view(Req, Db, DDoc, ViewName, Queries) ->
- Args0 = couch_mrview_http:parse_params(Req, undefined),
+ Args = couch_views_http:parse_params(Req, undefined),
+ case couch_views_util:is_paginated(Args) of
+ false ->
+ stream_multi_query_view(Req, Db, DDoc, ViewName, Args, Queries);
+ true ->
+ paginate_multi_query_view(Req, Db, DDoc, ViewName, Args, Queries)
+ end.
+
+
+stream_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
{ok, #mrst{views=Views}} = couch_mrview_util:ddoc_to_mrst(Db, DDoc),
Args1 = couch_mrview_util:set_view_type(Args0, ViewName, Views),
- ArgQueries = lists:map(fun({Query}) ->
- QueryArg = couch_mrview_http:parse_params(Query, undefined,
- Args1, [decoded]),
- QueryArg1 = couch_mrview_util:set_view_type(QueryArg, ViewName, Views),
- fabric_util:validate_args(Db, DDoc, QueryArg1)
- end, Queries),
- Options = [{user_ctx, Req#httpd.user_ctx}],
+ ArgQueries = parse_queries(Req, Args1, Queries, fun(QueryArg) ->
+ couch_mrview_util:set_view_type(QueryArg, ViewName, Views)
+ end),
VAcc0 = #vacc{db=Db, req=Req, prepend="\r\n"},
FirstChunk = "{\"results\":[",
{ok, Resp0} = chttpd:start_delayed_json_response(VAcc0#vacc.req, 200, [], FirstChunk),
VAcc1 = VAcc0#vacc{resp=Resp0},
VAcc2 = lists:foldl(fun(Args, Acc0) ->
- {ok, Acc1} = fabric:query_view(Db, Options, DDoc, ViewName,
- fun view_cb/2, Acc0, Args),
+ Fun = fun view_cb/2,
+ {ok, Acc1} = couch_views:query(Db, DDoc, ViewName, Fun, Acc0, Args),
Acc1
end, VAcc1, ArgQueries),
{ok, Resp1} = chttpd:send_delayed_chunk(VAcc2#vacc.resp, "\r\n]}"),
chttpd:end_delayed_json_response(Resp1).
+
+paginate_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
+ {ok, #mrst{views=Views}} = couch_mrview_util:ddoc_to_mrst(Db, DDoc),
+ ArgQueries = parse_queries(Req, Args0, Queries, fun(QueryArg) ->
+ couch_mrview_util:set_view_type(QueryArg, ViewName, Views)
+ end),
+ KeyFun = fun({Props}) ->
+ {couch_util:get_value(id, Props), couch_util:get_value(key, Props)}
+ end,
+ #mrargs{page_size = PageSize} = Args0,
+ #httpd{path_parts = Parts} = Req,
+ UpdateSeq = fabric2_db:get_update_seq(Db),
+ EtagTerm = {Parts, UpdateSeq, Args0},
+ Response = couch_views_http:paginated(
+ Req, EtagTerm, PageSize, ArgQueries, KeyFun,
+ fun(Args) ->
+ {ok, #vacc{meta=MetaMap, buffer=Items}} = couch_views:query(
+ Db, DDoc, ViewName, fun view_cb/2, #vacc{paginated=true}, Args),
+ {MetaMap, Items}
+ end),
+ chttpd:send_json(Req, Response).
+
+
design_doc_post_view(Req, Props, Db, DDoc, ViewName, Keys) ->
Args = couch_mrview_http:parse_body_and_query(Req, Props, Keys),
fabric_query_view(Db, Req, DDoc, ViewName, Args).
design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
- Args = couch_mrview_http:parse_params(Req, Keys),
+ Args = couch_views_http:parse_params(Req, Keys),
fabric_query_view(Db, Req, DDoc, ViewName, Args).
+
fabric_query_view(Db, Req, DDoc, ViewName, Args) ->
+ case couch_views_util:is_paginated(Args) of
+ false ->
+ stream_fabric_query_view(Db, Req, DDoc, ViewName, Args);
+ true ->
+ paginate_fabric_query_view(Db, Req, DDoc, ViewName, Args)
+ end.
+
+
+stream_fabric_query_view(Db, Req, DDoc, ViewName, Args) ->
Max = chttpd:chunked_response_buffer_size(),
+ Fun = fun view_cb/2,
VAcc = #vacc{db=Db, req=Req, threshold=Max},
- Options = [{user_ctx, Req#httpd.user_ctx}],
- {ok, Resp} = fabric:query_view(Db, Options, DDoc, ViewName,
- fun view_cb/2, VAcc, Args),
+ {ok, Resp} = couch_views:query(Db, DDoc, ViewName, Fun, VAcc, Args),
{ok, Resp#vacc.resp}.
+paginate_fabric_query_view(Db, Req, DDoc, ViewName, Args0) ->
+ KeyFun = fun({Props}) ->
+ {couch_util:get_value(id, Props), couch_util:get_value(key, Props)}
+ end,
+ #httpd{path_parts = Parts} = Req,
+ UpdateSeq = fabric2_db:get_update_seq(Db),
+ ETagTerm = {Parts, UpdateSeq, Args0},
+ Response = couch_views_http:paginated(
+ Req, ETagTerm, Args0, KeyFun,
+ fun(Args) ->
+ VAcc0 = #vacc{paginated=true},
+ {ok, VAcc1} = couch_views:query(Db, DDoc, ViewName, fun view_cb/2, VAcc0, Args),
+ #vacc{meta=Meta, buffer=Items} = VAcc1,
+ {Meta, Items}
+ end),
+ chttpd:send_json(Req, Response).
+
view_cb({row, Row} = Msg, Acc) ->
case lists:keymember(doc, 1, Row) of
true -> chttpd_stats:incr_reads();
false -> ok
end,
chttpd_stats:incr_rows(),
- couch_mrview_http:view_cb(Msg, Acc);
+ couch_views_http:view_cb(Msg, Acc);
view_cb(Msg, Acc) ->
- couch_mrview_http:view_cb(Msg, Acc).
+ couch_views_http:view_cb(Msg, Acc).
handle_view_req(#httpd{method='POST',
@@ -102,9 +164,6 @@ handle_view_req(#httpd{method='POST',
handle_view_req(Req, _Db, _DDoc) ->
chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-handle_temp_view_req(Req, _Db) ->
- Msg = <<"Temporary views are not supported in CouchDB">>,
- chttpd:send_error(Req, 410, gone, Msg).
% See https://github.com/apache/couchdb/issues/2168
assert_no_queries_param(undefined) ->
@@ -116,6 +175,86 @@ assert_no_queries_param(_) ->
}).
+validate_args(Req, #mrargs{page_size = PageSize} = Args) when is_integer(PageSize) ->
+ MaxPageSize = max_page_size(Req),
+ couch_views_util:validate_args(Args, [{page_size, MaxPageSize}]);
+
+validate_args(_Req, #mrargs{} = Args) ->
+ couch_views_util:validate_args(Args, []).
+
+
+max_page_size(#httpd{path_parts=[_Db, <<"_all_docs">>, <<"queries">>]}) ->
+ config:get_integer(
+ "request_limits", "_all_docs/queries", ?DEFAULT_ALL_DOCS_PAGE_SIZE);
+
+max_page_size(#httpd{path_parts=[_Db, <<"_all_docs">>]}) ->
+ config:get_integer(
+ "request_limits", "_all_docs", ?DEFAULT_ALL_DOCS_PAGE_SIZE);
+
+max_page_size(#httpd{path_parts=[_Db, <<"_local_docs">>, <<"queries">>]}) ->
+ config:get_integer(
+ "request_limits", "_all_docs/queries", ?DEFAULT_ALL_DOCS_PAGE_SIZE);
+
+max_page_size(#httpd{path_parts=[_Db, <<"_local_docs">>]}) ->
+ config:get_integer(
+ "request_limits", "_all_docs", ?DEFAULT_ALL_DOCS_PAGE_SIZE);
+
+max_page_size(#httpd{path_parts=[_Db, <<"_design_docs">>, <<"queries">>]}) ->
+ config:get_integer(
+ "request_limits", "_all_docs/queries", ?DEFAULT_ALL_DOCS_PAGE_SIZE);
+
+max_page_size(#httpd{path_parts=[_Db, <<"_design_docs">>]}) ->
+ config:get_integer(
+ "request_limits", "_all_docs", ?DEFAULT_ALL_DOCS_PAGE_SIZE);
+
+max_page_size(#httpd{path_parts=[
+ _Db, <<"_design">>, _DDocName, <<"_view">>, _View, <<"queries">>]}) ->
+ config:get_integer(
+ "request_limits", "_view/queries", ?DEFAULT_VIEWS_PAGE_SIZE);
+
+max_page_size(#httpd{path_parts=[
+ _Db, <<"_design">>, _DDocName, <<"_view">>, _View]}) ->
+ config:get_integer(
+ "request_limits", "_view", ?DEFAULT_VIEWS_PAGE_SIZE).
+
+
+parse_queries(Req, #mrargs{page_size = PageSize} = Args0, Queries, Fun)
+ when is_integer(PageSize) ->
+ MaxPageSize = max_page_size(Req),
+ if length(Queries) < PageSize -> ok; true ->
+ throw({
+ query_parse_error,
+ <<"Provided number of queries is more than given page_size">>
+ })
+ end,
+ couch_views_util:validate_args(Fun(Args0), [{page_size, MaxPageSize}]),
+ Args = Args0#mrargs{page_size = undefined},
+ lists:map(fun({Query}) ->
+ Args1 = couch_views_http:parse_params(Query, undefined, Args, [decoded]),
+ if not is_integer(Args1#mrargs.page_size) -> ok; true ->
+ throw({
+ query_parse_error,
+ <<"You cannot specify `page_size` inside the query">>
+ })
+ end,
+ Args2 = maybe_set_page_size(Args1, MaxPageSize),
+ couch_views_util:validate_args(Fun(Args2), [{page_size, MaxPageSize}])
+ end, Queries);
+
+parse_queries(_Req, #mrargs{} = Args, Queries, Fun) ->
+ lists:map(fun({Query}) ->
+ Args1 = couch_views_http:parse_params(Query, undefined, Args, [decoded]),
+ couch_views_util:validate_args(Fun(Args1))
+ end, Queries).
+
+
+maybe_set_page_size(#mrargs{page_size = undefined} = Args, MaxPageSize) ->
+ Args#mrargs{page_size = MaxPageSize};
+
+maybe_set_page_size(#mrargs{} = Args, _MaxPageSize) ->
+ Args.
+
+
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -141,7 +280,7 @@ check_multi_query_reduce_view_overrides_test_() ->
t_check_include_docs_throw_validation_error() ->
?_test(begin
Req = #httpd{qs = []},
- Db = test_util:fake_db([{name, <<"foo">>}]),
+ Db = #{name => <<"foo">>},
Query = {[{<<"include_docs">>, true}]},
Throw = {query_parse_error, <<"`include_docs` is invalid for reduce">>},
?assertThrow(Throw, multi_query_view(Req, Db, ddoc, <<"v">>, [Query]))
@@ -151,7 +290,7 @@ t_check_include_docs_throw_validation_error() ->
t_check_user_can_override_individual_query_type() ->
?_test(begin
Req = #httpd{qs = []},
- Db = test_util:fake_db([{name, <<"foo">>}]),
+ Db = #{name => <<"foo">>},
Query = {[{<<"include_docs">>, true}, {<<"reduce">>, false}]},
multi_query_view(Req, Db, ddoc, <<"v">>, [Query]),
?assertEqual(1, meck:num_calls(chttpd, start_delayed_json_response, '_'))
@@ -162,7 +301,7 @@ setup_all() ->
Views = [#mrview{reduce_funs = [{<<"v">>, <<"_count">>}]}],
meck:expect(couch_mrview_util, ddoc_to_mrst, 2, {ok, #mrst{views = Views}}),
meck:expect(chttpd, start_delayed_json_response, 4, {ok, resp}),
- meck:expect(fabric, query_view, 7, {ok, #vacc{}}),
+ meck:expect(couch_views, query, 6, {ok, #vacc{}}),
meck:expect(chttpd, send_delayed_chunk, 2, {ok, resp}),
meck:expect(chttpd, end_delayed_json_response, 1, ok).
@@ -174,8 +313,8 @@ teardown_all(_) ->
setup() ->
meck:reset([
chttpd,
- couch_mrview_util,
- fabric
+ couch_views,
+ couch_mrview_util
]).
diff --git a/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl b/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl
index 86a8eab1a..c0095d59d 100644
--- a/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl
+++ b/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl
@@ -39,7 +39,7 @@ setup() ->
couch_epi,
couch_httpd,
couch_stats,
- fabric,
+ fabric2_db,
mochireq
]),
spawn_accumulator().
@@ -78,13 +78,13 @@ bulk_get_test_() ->
should_require_docs_field(_) ->
Req = fake_request({[{}]}),
- Db = test_util:fake_db([{name, <<"foo">>}]),
+ Db = #{name => <<"foo">>},
?_assertThrow({bad_request, _}, chttpd_db:db_req(Req, Db)).
should_not_accept_specific_query_params(_) ->
Req = fake_request({[{<<"docs">>, []}]}),
- Db = test_util:fake_db([{name, <<"foo">>}]),
+ Db = #{name => <<"foo">>},
lists:map(fun (Param) ->
{Param, ?_assertThrow({bad_request, _}, begin
BadReq = Req#httpd{qs = [{Param, ""}]},
@@ -95,7 +95,7 @@ should_not_accept_specific_query_params(_) ->
should_return_empty_results_on_no_docs(Pid) ->
Req = fake_request({[{<<"docs">>, []}]}),
- Db = test_util:fake_db([{name, <<"foo">>}]),
+ Db = #{name => <<"foo">>},
chttpd_db:db_req(Req, Db),
Results = get_results_from_response(Pid),
?_assertEqual([], Results).
@@ -104,7 +104,7 @@ should_return_empty_results_on_no_docs(Pid) ->
should_get_doc_with_all_revs(Pid) ->
DocId = <<"docudoc">>,
Req = fake_request(DocId),
- Db = test_util:fake_db([{name, <<"foo">>}]),
+ Db = #{name => <<"foo">>},
DocRevA = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-ABC">>}]}},
DocRevB = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-CDE">>}]}},
@@ -120,7 +120,7 @@ should_validate_doc_with_bad_id(Pid) ->
DocId = <<"_docudoc">>,
Req = fake_request(DocId),
- Db = test_util:fake_db([{name, <<"foo">>}]),
+ Db = #{name => <<"foo">>},
chttpd_db:db_req(Req, Db),
Result = get_results_from_response(Pid),
@@ -137,7 +137,7 @@ should_validate_doc_with_bad_rev(Pid) ->
Rev = <<"revorev">>,
Req = fake_request(DocId, Rev),
- Db = test_util:fake_db([{name, <<"foo">>}]),
+ Db = #{name => <<"foo">>},
chttpd_db:db_req(Req, Db),
Result = get_results_from_response(Pid),
@@ -154,7 +154,7 @@ should_validate_missing_doc(Pid) ->
Rev = <<"1-revorev">>,
Req = fake_request(DocId, Rev),
- Db = test_util:fake_db([{name, <<"foo">>}]),
+ Db = #{name => <<"foo">>},
mock_open_revs([{1,<<"revorev">>}], {ok, []}),
chttpd_db:db_req(Req, Db),
@@ -172,7 +172,7 @@ should_validate_bad_atts_since(Pid) ->
Rev = <<"1-revorev">>,
Req = fake_request(DocId, Rev, <<"badattsince">>),
- Db = test_util:fake_db([{name, <<"foo">>}]),
+ Db = #{name => <<"foo">>},
mock_open_revs([{1,<<"revorev">>}], {ok, []}),
chttpd_db:db_req(Req, Db),
@@ -190,14 +190,13 @@ should_include_attachments_when_atts_since_specified(_) ->
Rev = <<"1-revorev">>,
Req = fake_request(DocId, Rev, [<<"1-abc">>]),
- Db = test_util:fake_db([{name, <<"foo">>}]),
+ Db = #{name => <<"foo">>},
mock_open_revs([{1,<<"revorev">>}], {ok, []}),
chttpd_db:db_req(Req, Db),
- ?_assert(meck:called(fabric, open_revs,
- ['_', DocId, [{1, <<"revorev">>}],
- [{atts_since, [{1, <<"abc">>}]}, attachments,
- {user_ctx, undefined}]])).
+ Options = [{atts_since, [{1, <<"abc">>}]}, attachments],
+ ?_assert(meck:called(fabric2_db, open_doc_revs, ['_', DocId,
+ [{1, <<"revorev">>}], Options])).
%% helpers
@@ -217,7 +216,7 @@ fake_request(DocId, Rev, AttsSince) ->
mock_open_revs(RevsReq0, RevsResp) ->
- ok = meck:expect(fabric, open_revs,
+ ok = meck:expect(fabric2_db, open_doc_revs,
fun(_, _, RevsReq1, _) ->
?assertEqual(RevsReq0, RevsReq1),
RevsResp
@@ -259,7 +258,7 @@ mock(couch_stats) ->
ok = meck:expect(couch_stats, update_gauge, fun(_, _) -> ok end),
ok;
mock(fabric) ->
- ok = meck:new(fabric, [passthrough]),
+ ok = meck:new(fabric2_db, [passthrough]),
ok;
mock(config) ->
ok = meck:new(config, [passthrough]),
diff --git a/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl b/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl
index 1a3411254..0e4778371 100644
--- a/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl
+++ b/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl
@@ -99,7 +99,8 @@ should_get_doc_with_all_revs(Pid) ->
DocRevB = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-CDE">>}]}},
mock_open_revs(all, {ok, [{ok, DocRevA}, {ok, DocRevB}]}),
- chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])),
+ Db = #{name => <<"foo">>},
+ chttpd_db:db_req(Req, Db),
[{Result}] = get_results_from_response(Pid),
?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
@@ -119,7 +120,8 @@ should_validate_doc_with_bad_id(Pid) ->
DocId = <<"_docudoc">>,
Req = fake_request(DocId),
- chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])),
+ Db = #{name => <<"foo">>},
+ chttpd_db:db_req(Req, Db),
[{Result}] = get_results_from_response(Pid),
?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
@@ -142,7 +144,8 @@ should_validate_doc_with_bad_rev(Pid) ->
Rev = <<"revorev">>,
Req = fake_request(DocId, Rev),
- chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])),
+ Db = #{name => <<"foo">>},
+ chttpd_db:db_req(Req, Db),
[{Result}] = get_results_from_response(Pid),
?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
@@ -166,7 +169,8 @@ should_validate_missing_doc(Pid) ->
Req = fake_request(DocId, Rev),
mock_open_revs([{1,<<"revorev">>}], {ok, []}),
- chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])),
+ Db = #{name => <<"foo">>},
+ chttpd_db:db_req(Req, Db),
[{Result}] = get_results_from_response(Pid),
?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
@@ -190,7 +194,8 @@ should_validate_bad_atts_since(Pid) ->
Req = fake_request(DocId, Rev, <<"badattsince">>),
mock_open_revs([{1,<<"revorev">>}], {ok, []}),
- chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])),
+ Db = #{name => <<"foo">>},
+ chttpd_db:db_req(Req, Db),
[{Result}] = get_results_from_response(Pid),
?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
@@ -214,12 +219,13 @@ should_include_attachments_when_atts_since_specified(_) ->
Req = fake_request(DocId, Rev, [<<"1-abc">>]),
mock_open_revs([{1,<<"revorev">>}], {ok, []}),
- chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])),
+ Db = #{name => <<"foo">>},
+ chttpd_db:db_req(Req, Db),
+
+ Options = [{atts_since, [{1, <<"abc">>}]}, attachments],
+ ?_assert(meck:called(fabric2_db, open_doc_revs, ['_', DocId,
+ [{1, <<"revorev">>}], Options])).
- ?_assert(meck:called(fabric, open_revs,
- ['_', DocId, [{1, <<"revorev">>}],
- [{atts_since, [{1, <<"abc">>}]}, attachments,
- {user_ctx, undefined}]])).
%% helpers
@@ -239,7 +245,7 @@ fake_request(DocId, Rev, AttsSince) ->
mock_open_revs(RevsReq0, RevsResp) ->
- ok = meck:expect(fabric, open_revs,
+ ok = meck:expect(fabric2_db, open_doc_revs,
fun(_, _, RevsReq1, _) ->
?assertEqual(RevsReq0, RevsReq1),
RevsResp
@@ -276,7 +282,7 @@ mock(couch_stats) ->
ok = meck:expect(couch_stats, update_gauge, fun(_, _) -> ok end),
ok;
mock(fabric) ->
- ok = meck:new(fabric, [passthrough]),
+ ok = meck:new(fabric2_db, [passthrough]),
ok;
mock(config) ->
ok = meck:new(config, [passthrough]),
diff --git a/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl b/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl
index 88e2797a3..2826cda24 100644
--- a/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl
@@ -29,6 +29,8 @@ setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
ok = config:set("couchdb", "max_document_size", "50"),
+ ok = config:set("couchdb", "max_bulk_docs_count", "2"),
+ ok = config:set("couchdb", "max_bulk_get_count", "2"),
TmpDb = ?tempdb(),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
@@ -39,7 +41,10 @@ setup() ->
teardown(Url) ->
delete_db(Url),
ok = config:delete("admins", ?USER, _Persist=false),
- ok = config:delete("couchdb", "max_document_size").
+ ok = config:delete("couchdb", "max_document_size"),
+ ok = config:delete("couchdb", "max_bulk_docs_count"),
+ ok = config:delete("couchdb", "max_bulk_get_count"),
+ ok.
create_db(Url) ->
{ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
@@ -67,6 +72,8 @@ all_test_() ->
fun post_single_doc/1,
fun put_single_doc/1,
fun bulk_doc/1,
+ fun bulk_docs_too_many_docs/1,
+ fun bulk_get_too_many_docs/1,
fun put_post_doc_attach_inline/1,
fun put_multi_part_related/1,
fun post_multi_part_form/1
@@ -100,6 +107,41 @@ bulk_doc(Url) ->
Expect = {[{<<"error">>,<<"document_too_large">>},{<<"reason">>,<<>>}]},
?_assertEqual(Expect, ResultJson).
+
+bulk_docs_too_many_docs(Url) ->
+ Docs = "{\"docs\": ["
+ "{\"doc1\": \"{}\"}, "
+ "{\"doc2\": \"{}\"}, "
+ "{\"doc3\": \"{}\"}"
+ "]}",
+ {ok, Code, _, ResultBody} = test_request:post(Url ++ "/_bulk_docs/",
+ [?CONTENT_JSON, ?AUTH], Docs),
+ ResultJson = ?JSON_DECODE(ResultBody),
+ ExpectJson = {[
+ {<<"error">>,<<"max_bulk_docs_count_exceeded">>},
+ {<<"reason">>,<<"2">>}
+ ]},
+ ?_assertEqual({413, ExpectJson}, {Code, ResultJson}).
+
+
+bulk_get_too_many_docs(Url) ->
+ Docs = lists:map(fun(_) ->
+ {ok, 201, _, Body} = test_request:post(Url,
+ [?CONTENT_JSON, ?AUTH], "{}"),
+ {Props} = ?JSON_DECODE(Body),
+ {lists:keydelete(<<"ok">>, 1, Props)}
+ end, [1, 2, 3, 4]),
+
+ {ok, Code, _, ResultBody} = test_request:post(Url ++ "/_bulk_get/",
+ [?CONTENT_JSON, ?AUTH], ?JSON_ENCODE({[{<<"docs">>, Docs}]})),
+ ResultJson = ?JSON_DECODE(ResultBody),
+ ExpectJson = {[
+ {<<"error">>,<<"max_bulk_get_count_exceeded">>},
+ {<<"reason">>,<<"2">>}
+ ]},
+ ?_assertEqual({413, ExpectJson}, {Code, ResultJson}).
+
+
put_post_doc_attach_inline(Url) ->
Body1 = "{\"body\":\"This is a body.\",",
Body2 = lists:concat(["{\"body\":\"This is a body it should fail",
diff --git a/src/chttpd/test/eunit/chttpd_db_test.erl b/src/chttpd/test/eunit/chttpd_db_test.erl
index 204332d7f..cebc3b6d6 100644
--- a/src/chttpd/test/eunit/chttpd_db_test.erl
+++ b/src/chttpd/test/eunit/chttpd_db_test.erl
@@ -73,8 +73,8 @@ all_test_() ->
fun should_return_update_seq_when_set_on_all_docs/1,
fun should_not_return_update_seq_when_unset_on_all_docs/1,
fun should_return_correct_id_on_doc_copy/1,
- fun should_return_400_for_bad_engine/1,
- fun should_not_change_db_proper_after_rewriting_shardmap/1,
+ fun should_ignore_engine_parameter/1,
+ fun should_return_only_one_ok_on_doc_copy/1,
fun should_succeed_on_all_docs_with_queries_keys/1,
fun should_succeed_on_all_docs_with_queries_limit_skip/1,
fun should_succeed_on_all_docs_with_multiple_queries/1,
@@ -269,6 +269,17 @@ should_return_correct_id_on_doc_copy(Url) ->
]
end)}.
+should_return_only_one_ok_on_doc_copy(Url) ->
+ {timeout, ?TIMEOUT, ?_test(begin
+ {ok, _, _, _} = create_doc(Url, "testdoc"),
+ {_, _, _, ResultBody} = test_request:copy(Url ++ "/testdoc",
+ [?CONTENT_JSON, ?AUTH, ?DESTHEADER1]),
+ {ResultJson} = jiffy:decode(ResultBody),
+ NumOks = length(lists:filter(fun({Key, Value}) -> Key == <<"ok">> end, ResultJson)),
+ [
+ ?assertEqual(1, NumOks)
+ ]
+ end)}.
attachment_doc() ->
{ok, Data} = file:read_file(?FIXTURE_TXT),
@@ -282,7 +293,7 @@ attachment_doc() ->
]}.
-should_return_400_for_bad_engine(_) ->
+should_ignore_engine_parameter(_) ->
{timeout, ?TIMEOUT, ?_test(begin
TmpDb = ?tempdb(),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
@@ -290,30 +301,7 @@ should_return_400_for_bad_engine(_) ->
BaseUrl = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
Url = BaseUrl ++ "?engine=cowabunga",
{ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
- ?assertEqual(400, Status)
- end)}.
-
-
-should_not_change_db_proper_after_rewriting_shardmap(_) ->
- {timeout, ?TIMEOUT, ?_test(begin
- TmpDb = ?tempdb(),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- AdmPort = mochiweb_socket_server:get(couch_httpd, port),
-
- BaseUrl = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
- Url = BaseUrl ++ "?partitioned=true&q=1",
- {ok, 201, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
-
- ShardDbName = ?l2b(config:get("mem3", "shards_db", "_dbs")),
- {ok, ShardDb} = mem3_util:ensure_exists(ShardDbName),
- {ok, #doc{body = {Props}}} = couch_db:open_doc(
- ShardDb, TmpDb, [ejson_body]),
- Shards = mem3_util:build_shards(TmpDb, Props),
-
- {Prop2} = ?JSON_DECODE(?JSON_ENCODE({Props})),
- Shards2 = mem3_util:build_shards(TmpDb, Prop2),
- ?assertEqual(Shards2, Shards)
+ ?assertEqual(201, Status)
end)}.
@@ -341,7 +329,7 @@ should_succeed_on_all_docs_with_queries_limit_skip(Url) ->
{ResultJson} = ?JSON_DECODE(RespBody),
ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
{InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
+ ?assertEqual(null, couch_util:get_value(<<"offset">>, InnerJson)),
?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
end)}.
@@ -359,7 +347,7 @@ should_succeed_on_all_docs_with_multiple_queries(Url) ->
{InnerJson1} = lists:nth(1, ResultJsonBody),
?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
{InnerJson2} = lists:nth(2, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
+ ?assertEqual(null, couch_util:get_value(<<"offset">>, InnerJson2)),
?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
end)}.
@@ -389,7 +377,7 @@ should_succeed_on_design_docs_with_queries_limit_skip(Url) ->
{ResultJson} = ?JSON_DECODE(RespBody),
ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
{InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
+ ?assertEqual(null, couch_util:get_value(<<"offset">>, InnerJson)),
?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
end)}.
@@ -407,7 +395,7 @@ should_succeed_on_design_docs_with_multiple_queries(Url) ->
{InnerJson1} = lists:nth(1, ResultJsonBody),
?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
{InnerJson2} = lists:nth(2, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
+ ?assertEqual(null, couch_util:get_value(<<"offset">>, InnerJson2)),
?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
end)}.
diff --git a/src/chttpd/test/eunit/chttpd_dbs_info_test.erl b/src/chttpd/test/eunit/chttpd_dbs_info_test.erl
index 5b61d8831..6e11f3245 100644
--- a/src/chttpd/test/eunit/chttpd_dbs_info_test.erl
+++ b/src/chttpd/test/eunit/chttpd_dbs_info_test.erl
@@ -57,7 +57,7 @@ dbs_info_test_() ->
foreach,
fun setup/0, fun teardown/1,
[
- fun should_return_error_for_get_db_info/1,
+ fun should_return_for_get_db_info/1,
fun should_return_dbs_info_for_single_db/1,
fun should_return_dbs_info_for_multiple_dbs/1,
fun should_return_error_for_exceeded_keys/1,
@@ -69,15 +69,14 @@ dbs_info_test_() ->
}.
-should_return_error_for_get_db_info(Url) ->
+should_return_for_get_db_info(Url) ->
?_test(begin
{ok, Code, _, ResultBody} = test_request:get(Url ++ "/_dbs_info?"
- ++ "keys=[\"db1\"]", [?CONTENT_JSON, ?AUTH]),
- {Body} = jiffy:decode(ResultBody),
+ ++ "start_key=\"db1\"&end_key=\"db1\"", [?CONTENT_JSON, ?AUTH]),
+ Body = jiffy:decode(ResultBody, [return_maps]),
[
- ?assertEqual(<<"method_not_allowed">>,
- couch_util:get_value(<<"error">>, Body)),
- ?assertEqual(405, Code)
+ ?assertEqual(200, Code),
+ ?assertMatch([#{<<"db_name">> := <<"db1">>}], Body)
]
end).
diff --git a/src/chttpd/test/eunit/chttpd_delayed_test.erl b/src/chttpd/test/eunit/chttpd_delayed_test.erl
new file mode 100644
index 000000000..64232dcf8
--- /dev/null
+++ b/src/chttpd/test/eunit/chttpd_delayed_test.erl
@@ -0,0 +1,73 @@
+-module(chttpd_delayed_test).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-define(USER, "chttpd_view_test_admin").
+-define(PASS, "pass").
+-define(AUTH, {basic_auth, {?USER, ?PASS}}).
+-define(CONTENT_JSON, {"Content-Type", "application/json"}).
+-define(DDOC, "{\"_id\": \"_design/bar\", \"views\": {\"baz\":
+ {\"map\": \"function(doc) {emit(doc._id, doc._id);}\"}}}").
+
+-define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
+-define(i2l(I), integer_to_list(I)).
+-define(TIMEOUT, 60). % seconds
+
+setup() ->
+ Hashed = couch_passwords:hash_admin_password(?PASS),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("chttpd", "buffer_response", "true"),
+ TmpDb = ?tempdb(),
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(chttpd, port),
+ Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
+ create_db(Url),
+ Url.
+
+teardown(Url) ->
+ delete_db(Url),
+ ok = config:delete("admins", ?USER, _Persist=false).
+
+create_db(Url) ->
+ {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
+ ?assert(Status =:= 201 orelse Status =:= 202).
+
+
+delete_db(Url) ->
+ {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
+
+
+all_test_() ->
+ {
+ "chttpd delay tests",
+ {
+ setup,
+ fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun test_buffer_response_all_docs/1,
+ fun test_buffer_response_changes/1
+ ]
+ }
+ }
+ }.
+
+
+test_buffer_response_all_docs(Url) ->
+ assert_has_content_length(Url ++ "/_all_docs").
+
+
+test_buffer_response_changes(Url) ->
+ assert_has_content_length(Url ++ "/_changes").
+
+
+assert_has_content_length(Url) ->
+ {timeout, ?TIMEOUT, ?_test(begin
+ {ok, Code, Headers, _Body} = test_request:get(Url, [?AUTH]),
+ ?assertEqual(200, Code),
+ ?assert(lists:keymember("Content-Length", 1, Headers))
+ end)}.
+ \ No newline at end of file
diff --git a/src/chttpd/test/eunit/chttpd_deleted_dbs_test.erl b/src/chttpd/test/eunit/chttpd_deleted_dbs_test.erl
new file mode 100644
index 000000000..d6375c048
--- /dev/null
+++ b/src/chttpd/test/eunit/chttpd_deleted_dbs_test.erl
@@ -0,0 +1,234 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_deleted_dbs_test).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-define(USER, "chttpd_db_test_admin").
+-define(PASS, "pass").
+-define(AUTH, {basic_auth, {?USER, ?PASS}}).
+-define(CONTENT_JSON, {"Content-Type", "application/json"}).
+
+
+setup() ->
+ Hashed = couch_passwords:hash_admin_password(?PASS),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(chttpd, port),
+ lists:concat(["http://", Addr, ":", Port, "/"]).
+
+
+teardown(_Url) ->
+ ok = config:delete("couchdb", "enable_database_recovery", false),
+ ok = config:delete("admins", ?USER, _Persist=false).
+
+
+create_db(Url) ->
+ {ok, Status, _, _} = http(put, Url, ""),
+ ?assert(Status =:= 201 orelse Status =:= 202).
+
+
+delete_db(Url) ->
+ {ok, 200, _, _} = http(delete, Url).
+
+
+deleted_dbs_test_() ->
+ {
+ "chttpd deleted dbs tests",
+ {
+ setup,
+ fun chttpd_test_util:start_couch/0,
+ fun chttpd_test_util:stop_couch/1,
+ {
+ foreach,
+ fun setup/0,
+ fun teardown/1,
+ [
+ fun should_return_error_for_unsupported_method/1,
+ fun should_list_deleted_dbs/1,
+ fun should_list_deleted_dbs_info/1,
+ fun should_undelete_db/1,
+ fun should_remove_deleted_db/1,
+ fun should_undelete_db_to_target_db/1,
+ fun should_not_undelete_db_to_existing_db/1
+ ]
+ }
+ }
+ }.
+
+
+should_return_error_for_unsupported_method(Url) ->
+ ?_test(begin
+ {ok, Code, _, Body} = http(delete, mk_url(Url)),
+
+ ?assertEqual(405, Code),
+ ?assertEqual(<<"method_not_allowed">>, get_json(<<"error">>, Body))
+ end).
+
+
+should_list_deleted_dbs(Url) ->
+ ?_test(begin
+ DbName1 = create_and_delete_db(Url),
+ DbName2 = create_and_delete_db(Url),
+ {ok, Code, _, Body} = http(get, mk_url(Url)),
+ DeletedDbs = get_db_names(Body),
+
+ ?assertEqual(200, Code),
+ ?assertEqual(true, lists:member(DbName1, DeletedDbs)),
+ ?assertEqual(true, lists:member(DbName2, DeletedDbs))
+ end).
+
+
+should_list_deleted_dbs_info(Url) ->
+ ?_test(begin
+ DbName = create_and_delete_db(Url),
+ {ok, _, _, Body} = http(get, mk_url(Url, DbName)),
+ [{Props}] = jiffy:decode(Body),
+
+ ?assertEqual(DbName, couch_util:get_value(<<"db_name">>, Props))
+ end).
+
+
+should_undelete_db(Url) ->
+ ?_test(begin
+ DbName = create_and_delete_db(Url),
+ {ok, _, _, ResultBody} = http(get, mk_url(Url, DbName)),
+ [{Props}] = jiffy:decode(ResultBody),
+ TimeStamp = couch_util:get_value(<<"timestamp">>, Props),
+
+ ErlJSON = {[
+ {undelete, {[
+ {source, DbName},
+ {timestamp, TimeStamp}
+ ]}}
+ ]},
+
+ {ok, Code1, _, _} = http(get, Url ++ DbName),
+ ?assertEqual(404, Code1),
+
+ {ok, Code2, _, _} = http(post, mk_url(Url), ErlJSON),
+ ?assertEqual(200, Code2),
+
+ {ok, Code3, _, _} = http(get, Url ++ DbName),
+ ?assertEqual(200, Code3)
+ end).
+
+
+should_remove_deleted_db(Url) ->
+ ?_test(begin
+ DbName = create_and_delete_db(Url),
+ {ok, _, _, Body1} = http(get, mk_url(Url, DbName)),
+ [{Props}] = jiffy:decode(Body1),
+ TimeStamp = couch_util:get_value(<<"timestamp">>, Props),
+
+ {ok, Code, _, _} = http(delete, mk_url(Url, DbName, TimeStamp)),
+ ?assertEqual(200, Code),
+
+ {ok, _, _, Body2} = http(get, mk_url(Url, DbName)),
+ ?assertEqual([], jiffy:decode(Body2))
+ end).
+
+
+should_undelete_db_to_target_db(Url) ->
+ ?_test(begin
+ DbName = create_and_delete_db(Url),
+ {ok, _, _, Body} = http(get, mk_url(Url, DbName)),
+ [{Props}] = jiffy:decode(Body),
+ TimeStamp = couch_util:get_value(<<"timestamp">>, Props),
+
+ NewDbName = ?tempdb(),
+ ErlJSON = {[
+ {undelete, {[
+ {source, DbName},
+ {timestamp, TimeStamp},
+ {target, NewDbName}
+ ]}}
+ ]},
+
+ {ok, Code1, _, _} = http(get, Url ++ NewDbName),
+ ?assertEqual(404, Code1),
+
+ {ok, Code2, _, _} = http(post, mk_url(Url), ErlJSON),
+ ?assertEqual(200, Code2),
+
+ {ok, Code3, _, _} = http(get, Url ++ NewDbName),
+ ?assertEqual(200, Code3)
+ end).
+
+
+should_not_undelete_db_to_existing_db(Url) ->
+ ?_test(begin
+ DbName = create_and_delete_db(Url),
+ {ok, _, _, ResultBody} = http(get, mk_url(Url, DbName)),
+ [{Props}] = jiffy:decode(ResultBody),
+ TimeStamp = couch_util:get_value(<<"timestamp">>, Props),
+
+ NewDbName = ?tempdb(),
+ create_db(Url ++ NewDbName),
+ ErlJSON = {[
+ {undelete, {[
+ {source, DbName},
+ {timestamp, TimeStamp},
+ {target, NewDbName}
+ ]}}
+ ]},
+ {ok, Code2, _, ResultBody2} = http(post, mk_url(Url), ErlJSON),
+ ?assertEqual(412, Code2),
+ ?assertEqual(<<"file_exists">>, get_json(<<"error">>, ResultBody2))
+ end).
+
+
+create_and_delete_db(BaseUrl) ->
+ DbName = ?tempdb(),
+ DbUrl = BaseUrl ++ DbName,
+ create_db(DbUrl),
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+ delete_db(DbUrl),
+ DbName.
+
+
+http(Verb, Url) ->
+ Headers = [?CONTENT_JSON, ?AUTH],
+ test_request:Verb(Url, Headers).
+
+
+http(Verb, Url, Body) ->
+ Headers = [?CONTENT_JSON, ?AUTH],
+ test_request:Verb(Url, Headers, jiffy:encode(Body)).
+
+
+mk_url(Url) ->
+ Url ++ "/_deleted_dbs".
+
+
+mk_url(Url, DbName) ->
+ Url ++ "/_deleted_dbs?key=\"" ++ ?b2l(DbName) ++ "\"".
+
+
+mk_url(Url, DbName, TimeStamp) ->
+ Url ++ "/_deleted_dbs/" ++ ?b2l(DbName) ++ "?timestamp=\"" ++
+ ?b2l(TimeStamp) ++ "\"".
+
+
+get_json(Key, Body) ->
+ {Props} = jiffy:decode(Body),
+ couch_util:get_value(Key, Props).
+
+
+get_db_names(Body) ->
+ RevDbNames = lists:foldl(fun({DbInfo}, Acc) ->
+ DbName = couch_util:get_value(<<"db_name">>, DbInfo),
+ [DbName | Acc]
+ end, [], jiffy:decode(Body)),
+ lists:reverse(RevDbNames).
diff --git a/src/chttpd/test/eunit/chttpd_handlers_tests.erl b/src/chttpd/test/eunit/chttpd_handlers_tests.erl
index f3e8f5dcd..649d82e86 100644
--- a/src/chttpd/test/eunit/chttpd_handlers_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_handlers_tests.erl
@@ -70,7 +70,7 @@ request_replicate(Url, Body) ->
Headers = [{"Content-Type", "application/json"}],
Handler = {chttpd_misc, handle_replicate_req},
request(post, Url, Headers, Body, Handler, fun(Req) ->
- chttpd:send_json(Req, 200, get(post_body))
+ chttpd:send_json(Req, 200, Req#httpd.req_body)
end).
request(Method, Url, Headers, Body, {M, F}, MockFun) ->
diff --git a/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl b/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl
deleted file mode 100644
index d53d370f8..000000000
--- a/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl
+++ /dev/null
@@ -1,112 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_open_revs_error_test).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(USER, "chttpd_db_test_admin").
--define(PASS, "pass").
--define(AUTH, {basic_auth, {?USER, ?PASS}}).
--define(CONTENT_JSON, {"Content-Type", "application/json"}).
--define(CONTENT_MULTI_FORM, {"Content-Type",
- "multipart/form-data;boundary=\"bound\""}).
-
-setup() ->
- Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
- TmpDb = ?tempdb(),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
- mock(fabric),
- create_db(Url),
- Url.
-
-teardown(Url) ->
- delete_db(Url),
- (catch meck:unload(fabric)),
- ok = config:delete("admins", ?USER, _Persist=false).
-
-create_db(Url) ->
- {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202).
-
-
-create_doc(Url, Id) ->
- test_request:put(Url ++ "/" ++ Id,
- [?CONTENT_JSON, ?AUTH], "{\"mr\": \"rockoartischocko\"}").
-
-delete_db(Url) ->
- {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
-open_revs_error_test_() ->
- {
- "open revs error tests",
- {
- setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_return_503_error_for_open_revs_get/1,
- fun should_return_503_error_for_open_revs_post_form/1
- ]
- }
- }
- }.
-
-should_return_503_error_for_open_revs_get(Url) ->
- {ok, _, _, Body} = create_doc(Url, "testdoc"),
- {Json} = ?JSON_DECODE(Body),
- Ref = couch_util:get_value(<<"rev">>, Json, undefined),
- mock_open_revs({error, all_workers_died}),
- {ok, Code, _, _} = test_request:get(Url ++
- "/testdoc?rev=" ++ ?b2l(Ref), [?AUTH]),
- ?_assertEqual(503, Code).
-
-should_return_503_error_for_open_revs_post_form(Url) ->
- Port = mochiweb_socket_server:get(chttpd, port),
- Host = lists:concat([ "http://127.0.0.1:", Port]),
- Referer = {"Referer", Host},
- Body1 = "{\"body\":\"This is a body.\"}",
- DocBeg = "--bound\r\nContent-Disposition: form-data; name=\"_doc\"\r\n\r\n",
- DocRev = "--bound\r\nContent-Disposition: form-data; name=\"_rev\"\r\n\r\n",
- DocRest = "\r\n--bound\r\nContent-Disposition:"
- "form-data; name=\"_attachments\"; filename=\"file.txt\"\r\n"
- "Content-Type: text/plain\r\n\r\ncontents of file.txt\r\n\r\n"
- "--bound--",
- Doc1 = lists:concat([DocBeg, Body1, DocRest]),
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/" ++ "RevDoc",
- [?CONTENT_MULTI_FORM, ?AUTH, Referer], Doc1),
- {Json} = ?JSON_DECODE(ResultBody),
- Ref = couch_util:get_value(<<"rev">>, Json, undefined),
- Doc2 = lists:concat([DocRev, ?b2l(Ref) , DocRest]),
-
- mock_open_revs({error, all_workers_died}),
- {ok, Code, _, ResultBody1} = test_request:post(Url ++ "/" ++ "RevDoc",
- [?CONTENT_MULTI_FORM, ?AUTH, Referer], Doc2),
- {Json1} = ?JSON_DECODE(ResultBody1),
- ErrorMessage = couch_util:get_value(<<"error">>, Json1),
- [
- ?_assertEqual(503, Code),
- ?_assertEqual(<<"service unvailable">>, ErrorMessage)
- ].
-
-mock_open_revs(RevsResp) ->
- ok = meck:expect(fabric, open_revs, fun(_, _, _, _) -> RevsResp end).
-
-mock(fabric) ->
- ok = meck:new(fabric, [passthrough]).
diff --git a/src/chttpd/test/eunit/chttpd_purge_tests.erl b/src/chttpd/test/eunit/chttpd_purge_tests.erl
index ab435682a..bc1fce0cd 100644
--- a/src/chttpd/test/eunit/chttpd_purge_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_purge_tests.erl
@@ -13,6 +13,10 @@
-module(chttpd_purge_tests).
+% Remove when purge is implemented
+-compile(nowarn_unused_function).
+
+
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
@@ -62,7 +66,7 @@ delete_db(Url) ->
{ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-purge_test_() ->
+purge_test_disabled() ->
{
"chttpd db tests",
{
diff --git a/src/chttpd/test/eunit/chttpd_security_tests.erl b/src/chttpd/test/eunit/chttpd_security_tests.erl
index 0bea9dbcd..8085f82a0 100644
--- a/src/chttpd/test/eunit/chttpd_security_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_security_tests.erl
@@ -12,6 +12,9 @@
-module(chttpd_security_tests).
+% Remove when purge is implemented
+-compile(nowarn_unused_function).
+
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
@@ -38,14 +41,13 @@ setup() ->
ok = config:set("admins", ?USER, ?b2l(Hashed), Persist),
UserDb = ?tempdb(),
TmpDb = ?tempdb(),
- ok = config:set("chttpd_auth", "authentication_db", ?b2l(UserDb), Persist),
-
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
BaseUrl = lists:concat(["http://", Addr, ":", Port, "/"]),
- Url = lists:concat([BaseUrl, ?b2l(TmpDb)]),
UsersUrl = lists:concat([BaseUrl, ?b2l(UserDb)]),
create_db(UsersUrl),
+ ok = config:set("chttpd_auth", "authentication_db", ?b2l(UserDb), Persist),
+ Url = lists:concat([BaseUrl, ?b2l(TmpDb)]),
create_db(Url),
create_design_doc(Url),
create_user(UsersUrl,?TEST_MEMBER,?TEST_MEMBER_PASS,[<<?TEST_MEMBER>>]),
@@ -56,6 +58,7 @@ setup() ->
teardown([Url,UsersUrl]) ->
delete_db(Url),
delete_db(UsersUrl),
+ ok = config:delete("chttpd_auth", "authentication_db", _Persist=false),
ok = config:delete("admins", ?USER, _Persist=false).
create_db(Url) ->
@@ -108,15 +111,21 @@ all_test_() ->
fun should_disallow_db_member_db_compaction/1,
fun should_allow_db_admin_db_compaction/1,
fun should_allow_admin_view_compaction/1,
- fun should_disallow_anonymous_view_compaction/1,
- fun should_allow_admin_db_view_cleanup/1,
- fun should_disallow_anonymous_db_view_cleanup/1,
- fun should_allow_admin_purge/1,
- fun should_disallow_anonymous_purge/1,
- fun should_disallow_db_member_purge/1,
- fun should_allow_admin_purged_infos_limit/1,
- fun should_disallow_anonymous_purged_infos_limit/1,
- fun should_disallow_db_member_purged_infos_limit/1
+ fun should_disallow_anonymous_view_compaction/1
+
+ % Re-enable when _view_cleanup is implemented
+ %
+ %fun should_allow_admin_db_view_cleanup/1,
+ %fun should_disallow_anonymous_db_view_cleanup/1,
+
+ % Re-enable when purge is implemented
+ %
+ %fun should_allow_admin_purge/1,
+ %fun should_disallow_anonymous_purge/1,
+ %fun should_disallow_db_member_purge/1,
+ %fun should_allow_admin_purged_infos_limit/1,
+ %fun should_disallow_anonymous_purged_infos_limit/1,
+ %fun should_disallow_db_member_purged_infos_limit/1
]
}
}
@@ -337,13 +346,11 @@ should_return_error_for_sec_obj_with_incorrect_roles_and_names(
Body = jiffy:encode({SecurityProperties}),
{ok, Status, _, RespBody} = test_request:put(SecurityUrl,
[?CONTENT_JSON, ?AUTH], Body),
- ResultJson = ?JSON_DECODE(RespBody),
+ ResultJson = couch_util:json_decode(RespBody, [return_maps]),
+ ExpectReason = <<"names must be a JSON list of strings">>,
[
?_assertEqual(500, Status),
- ?_assertEqual({[
- {<<"error">>,<<"error">>},
- {<<"reason">>,<<"no_majority">>}
- ]}, ResultJson)
+ ?_assertMatch(#{<<"reason">> := ExpectReason}, ResultJson)
].
should_return_error_for_sec_obj_with_incorrect_roles([Url,_UsersUrl]) ->
@@ -356,13 +363,11 @@ should_return_error_for_sec_obj_with_incorrect_roles([Url,_UsersUrl]) ->
Body = jiffy:encode({SecurityProperties}),
{ok, Status, _, RespBody} = test_request:put(SecurityUrl,
[?CONTENT_JSON, ?AUTH], Body),
- ResultJson = ?JSON_DECODE(RespBody),
+ ResultJson = couch_util:json_decode(RespBody, [return_maps]),
+ ExpectReason = <<"roles must be a JSON list of strings">>,
[
?_assertEqual(500, Status),
- ?_assertEqual({[
- {<<"error">>,<<"error">>},
- {<<"reason">>,<<"no_majority">>}
- ]}, ResultJson)
+ ?_assertMatch(#{<<"reason">> := ExpectReason}, ResultJson)
].
should_return_error_for_sec_obj_with_incorrect_names([Url,_UsersUrl]) ->
@@ -375,13 +380,11 @@ should_return_error_for_sec_obj_with_incorrect_names([Url,_UsersUrl]) ->
Body = jiffy:encode({SecurityProperties}),
{ok, Status, _, RespBody} = test_request:put(SecurityUrl,
[?CONTENT_JSON, ?AUTH], Body),
- ResultJson = ?JSON_DECODE(RespBody),
+ ResultJson = couch_util:json_decode(RespBody, [return_maps]),
+ ExpectReason = <<"names must be a JSON list of strings">>,
[
?_assertEqual(500, Status),
- ?_assertEqual({[
- {<<"error">>,<<"error">>},
- {<<"reason">>,<<"no_majority">>}
- ]}, ResultJson)
+ ?_assertMatch(#{<<"reason">> := ExpectReason}, ResultJson)
].
should_return_error_for_sec_obj_in_user_db([_,_UsersUrl]) ->
diff --git a/src/chttpd/test/eunit/chttpd_session_tests.erl b/src/chttpd/test/eunit/chttpd_session_tests.erl
new file mode 100644
index 000000000..a802d9ec2
--- /dev/null
+++ b/src/chttpd/test/eunit/chttpd_session_tests.erl
@@ -0,0 +1,74 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_session_tests).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include("chttpd_test.hrl").
+
+-define(USER, "chttpd_test_admin").
+-define(PASS, "pass").
+
+
+setup() ->
+ ok = config:delete("chttpd_auth", "authentication_db", _Persist=false),
+ Hashed = couch_passwords:hash_admin_password(?PASS),
+ ok = config:set("admins", ?USER, binary_to_list(Hashed), _Persist=false),
+ root_url() ++ "/_session".
+
+
+cleanup(_) ->
+ ok = config:delete("chttpd_auth", "authentication_db", _Persist=false),
+ ok = config:delete("admins", ?USER, _Persist=false).
+
+
+session_test_() ->
+ {
+ "Session tests",
+ {
+ setup,
+ fun() -> test_util:start_couch([fabric, chttpd]) end,
+ fun test_util:stop_couch/1,
+ {
+ foreach,
+ fun setup/0,
+ fun cleanup/1,
+ [
+ ?TDEF_FE(session_authentication_db_absent),
+ ?TDEF_FE(session_authentication_db_present)
+ ]
+ }
+ }
+ }.
+
+
+session_authentication_db_absent(Url) ->
+ ok = config:delete("chttpd_auth", "authentication_db", _Persist=false),
+ ?assertThrow({not_found, _}, session_authentication_db(Url)).
+
+
+session_authentication_db_present(Url) ->
+ Name = "_users",
+ ok = config:set("chttpd_auth", "authentication_db", Name, false),
+ ?assertEqual(list_to_binary(Name), session_authentication_db(Url)).
+
+
+session_authentication_db(Url) ->
+ {ok, 200, _, Body} = test_request:get(Url, [{basic_auth, {?USER, ?PASS}}]),
+ couch_util:get_nested_json_value(
+ jiffy:decode(Body), [<<"info">>, <<"authentication_db">>]).
+
+
+root_url() ->
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(chttpd, port),
+ lists:concat(["http://", Addr, ":", Port]).
diff --git a/src/chttpd/test/eunit/chttpd_stats_tests.erl b/src/chttpd/test/eunit/chttpd_stats_tests.erl
new file mode 100644
index 000000000..1742285a1
--- /dev/null
+++ b/src/chttpd/test/eunit/chttpd_stats_tests.erl
@@ -0,0 +1,77 @@
+-module(chttpd_stats_tests).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+
+start() ->
+ ok = application:start(config),
+ ok = application:start(couch_log).
+
+
+stop(_) ->
+ ok = application:stop(config),
+ ok = application:stop(couch_log).
+
+
+setup() ->
+ ok = meck:new(chttpd_stats, [passthrough]).
+
+
+teardown(_) ->
+ meck:unload(),
+ ok.
+
+
+
+chttpd_stats_test_() ->
+ {
+ "chttpd_stats tests",
+ {
+ setup,
+ fun start/0,
+ fun stop/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun test_reset/1,
+ fun test_no_reset/1
+ ]
+ }
+ }
+ }.
+
+
+test_reset(_) ->
+ ?_test(begin
+ chttpd_stats:init(undefined),
+ chttpd_stats:incr_rows(3),
+ chttpd_stats:incr_rows(),
+ chttpd_stats:incr_writes(5),
+ chttpd_stats:incr_writes(),
+ chttpd_stats:incr_reads(),
+ chttpd_stats:incr_reads(2),
+ State1 = get(chttpd_stats),
+ ?assertMatch({st, 3, 6, 4, _, _, _, _}, State1),
+
+ ok = meck:expect(chttpd_stats, report, fun(_) -> true end),
+ % force a reset with 0 interval
+ chttpd_stats:update_interval(0),
+ % after this is called, the report should happen and rows should
+ % reset to 0
+ chttpd_stats:incr_rows(),
+ ResetState = get(chttpd_stats),
+ ?assertMatch({st, 0, 0, 0, _, _, _, _}, ResetState)
+ end).
+
+
+test_no_reset(_) ->
+ ?_test(begin
+ ok = meck:expect(chttpd_stats, report, fun(_) -> false end),
+ chttpd_stats:init(undefined),
+ chttpd_stats:update_interval(0),
+ chttpd_stats:incr_rows(),
+ State = get(chttpd_stats),
+ ?assertMatch({st, 0, 0, 1, _, _, _, _}, State)
+ end).
diff --git a/src/chttpd/test/eunit/chttpd_test.hrl b/src/chttpd/test/eunit/chttpd_test.hrl
new file mode 100644
index 000000000..6db97ec2b
--- /dev/null
+++ b/src/chttpd/test/eunit/chttpd_test.hrl
@@ -0,0 +1,35 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+% Borrowed from fabric2_test.hrl
+
+% Some test modules do not use with, so squash the unused fun compiler warning
+-compile([{nowarn_unused_function, [{with, 1}]}]).
+
+
+-define(TDEF(Name), {atom_to_list(Name), fun Name/1}).
+-define(TDEF(Name, Timeout), {atom_to_list(Name), Timeout, fun Name/1}).
+
+-define(TDEF_FE(Name), fun(Arg) -> {atom_to_list(Name), ?_test(Name(Arg))} end).
+-define(TDEF_FE(Name, Timeout), fun(Arg) -> {atom_to_list(Name), {timeout, Timeout, ?_test(Name(Arg))}} end).
+
+
+with(Tests) ->
+ fun(ArgsTuple) ->
+ lists:map(fun
+ ({Name, Fun}) ->
+ {Name, ?_test(Fun(ArgsTuple))};
+ ({Name, Timeout, Fun}) ->
+ {Name, {timeout, Timeout, ?_test(Fun(ArgsTuple))}}
+ end, Tests)
+ end.
diff --git a/src/chttpd/test/eunit/chttpd_view_test.erl b/src/chttpd/test/eunit/chttpd_view_test.erl
index 4c224bb4e..1744f97a1 100644
--- a/src/chttpd/test/eunit/chttpd_view_test.erl
+++ b/src/chttpd/test/eunit/chttpd_view_test.erl
@@ -99,7 +99,7 @@ should_succeed_on_view_with_queries_limit_skip(Url) ->
{ResultJson} = ?JSON_DECODE(RespBody),
ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
{InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
+ ?assertEqual(null, couch_util:get_value(<<"offset">>, InnerJson)),
?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
end)}.
@@ -119,6 +119,6 @@ should_succeed_on_view_with_multiple_queries(Url) ->
{InnerJson1} = lists:nth(1, ResultJsonBody),
?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
{InnerJson2} = lists:nth(2, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
+ ?assertEqual(null, couch_util:get_value(<<"offset">>, InnerJson2)),
?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
end)}.
diff --git a/src/chttpd/test/exunit/pagination_test.exs b/src/chttpd/test/exunit/pagination_test.exs
new file mode 100644
index 000000000..6544017df
--- /dev/null
+++ b/src/chttpd/test/exunit/pagination_test.exs
@@ -0,0 +1,1393 @@
+defmodule Couch.Test.Pagination do
+ use ExUnit.Case
+ import Couch.DBTest, only: [retry_until: 1]
+ alias Couch.DBTest, as: Utils
+
+ defp create_admin(user_name, password) do
+ hashed = String.to_charlist(:couch_passwords.hash_admin_password(password))
+ :config.set('admins', String.to_charlist(user_name), hashed, false)
+ end
+
+ defp base_url() do
+ addr = :config.get('chttpd', 'bind_address', '127.0.0.1')
+ port = :mochiweb_socket_server.get(:chttpd, :port)
+ "http://#{addr}:#{port}"
+ end
+
+ setup_all do
+ test_ctx =
+ :test_util.start_couch([:chttpd, :couch_jobs, :couch_views, :couch_eval, :couch_js])
+
+ :ok = create_admin("adm", "pass")
+
+ on_exit(fn ->
+ :test_util.stop_couch(test_ctx)
+ end)
+
+ %{
+ base_url: base_url(),
+ user: "adm",
+ pass: "pass"
+ }
+ end
+
+ defp with_session(context) do
+ session = Couch.login(context.user, context.pass, base_url: context.base_url)
+ %{session: session}
+ end
+
+ defp random_db(context) do
+ db_name = Utils.random_db_name("db")
+
+ on_exit(fn ->
+ delete_db(context.session, db_name)
+ end)
+
+ create_db(context.session, db_name)
+ %{db_name: db_name}
+ end
+
+ defp with_docs(context) do
+ assert Map.has_key?(context, :n_docs), "Please define '@describetag n_docs: 10'"
+ %{docs: create_docs(context.session, context.db_name, 1..context.n_docs)}
+ end
+
+ defp with_view(context) do
+ ddoc_id = "simple"
+
+ ddoc = %{
+ _id: "_design/#{ddoc_id}",
+ views: %{
+ all: %{
+ map: "function(doc) { emit(doc.string, doc) }"
+ }
+ }
+ }
+
+ create_doc(context.session, context.db_name, ddoc)
+ %{view_name: "all", ddoc_id: ddoc_id}
+ end
+
+ defp with_same_key_docs(context) do
+ assert Map.has_key?(context, :n_docs), "Please define '@describetag n_docs: 10'"
+
+ docs =
+ for id <- 1..context.n_docs do
+ str_id = docid(id)
+ %{"_id" => str_id, "integer" => id, "string" => docid(div(id, context.page_size))}
+ end
+
+ docs =
+ docs
+ |> Enum.map(fn doc ->
+ created_doc = create_doc(context.session, context.db_name, doc)
+ Map.merge(doc, created_doc)
+ end)
+
+ %{docs: docs}
+ end
+
+ defp all_docs(context) do
+ assert Map.has_key?(context, :page_size), "Please define '@describetag page_size: 4'"
+
+ assert Map.has_key?(context, :descending),
+ "Please define '@describetag descending: false'"
+
+ resp =
+ Couch.Session.get(context.session, "/#{context.db_name}/_all_docs",
+ query: %{page_size: context.page_size, descending: context.descending}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+
+ %{
+ response: resp.body
+ }
+ end
+
+ defp paginate_queries(context, opts) do
+ paginate_queries(context, [], opts)
+ end
+
+ defp paginate_queries(context, acc, opts) do
+ {paginate_opts, client_opts} = Keyword.split(opts, [:url, :direction])
+
+ resp =
+ Couch.Session.post(context.session, Keyword.get(paginate_opts, :url), client_opts)
+
+ results = resp.body["results"]
+ view_url = String.replace_suffix(Keyword.get(paginate_opts, :url), "/queries", "")
+
+ opts =
+ opts
+ |> Keyword.replace!(:url, view_url)
+ |> Keyword.delete(:body)
+
+ final =
+ Enum.map(results, fn result ->
+ paginate(context, result, [Map.get(result, "rows")], opts)
+ end)
+
+ final
+ end
+
+ defp paginate(context, current, acc, opts) do
+ {paginate_opts, client_opts} = Keyword.split(opts, [:url, :direction])
+ direction_key = Keyword.get(paginate_opts, :direction, "next")
+
+ if Map.has_key?(current, direction_key) do
+ bookmark = current[direction_key]
+ client_opts = Keyword.replace!(client_opts, :query, %{bookmark: bookmark})
+
+ resp =
+ Couch.Session.get(context.session, Keyword.get(paginate_opts, :url), client_opts)
+
+ result = resp.body
+ paginate(context, result, [Map.get(result, "rows") | acc], opts)
+ else
+ Enum.reverse(acc)
+ end
+ end
+
+ defp paginate(context) do
+ if Map.has_key?(context.response, "next") do
+ bookmark = context.response["next"]
+ pages = Map.get(context, :pages, [context.response])
+ assert length(pages) < div(context.n_docs, context.page_size) + 1
+
+ resp =
+ Couch.Session.get(context.session, "/#{context.db_name}/_all_docs",
+ query: %{bookmark: bookmark}
+ )
+
+ context =
+ Map.merge(context, %{
+ pages: [resp.body | pages],
+ response: resp.body
+ })
+
+ paginate(context)
+ else
+ context =
+ Map.update(context, :pages, [], fn acc ->
+ Enum.reverse(acc)
+ end)
+
+ context
+ end
+ end
+
+ def create_db(session, db_name, opts \\ []) do
+ retry_until(fn ->
+ resp = Couch.Session.put(session, "/#{db_name}", opts)
+ assert resp.status_code in [201, 202], "got error #{inspect(resp.body)}"
+ assert resp.body == %{"ok" => true}
+ {:ok, resp}
+ end)
+ end
+
+ defp delete_db(session, db_name) do
+ retry_until(fn ->
+ resp = Couch.Session.delete(session, "/#{db_name}")
+ assert resp.status_code in [200, 202, 404], "got error #{inspect(resp.body)}"
+ {:ok, resp}
+ end)
+ end
+
+ defp create_doc(session, db_name, body) do
+ {:ok, body} =
+ retry_until(fn ->
+ resp = Couch.Session.post(session, "/#{db_name}", body: body)
+ assert resp.status_code in [201, 202], "got error #{inspect(resp.body)}"
+ assert resp.body["ok"]
+ {:ok, resp.body}
+ end)
+
+ Map.delete(body, "ok")
+ end
+
+ defp create_docs(session, db_name, range) do
+ docs = make_docs(range)
+
+ docs
+ |> Enum.map(fn doc ->
+ created_doc = create_doc(session, db_name, doc)
+ Map.merge(doc, created_doc)
+ end)
+ end
+
+ defp docid(id) do
+ id |> Integer.to_string() |> String.pad_leading(3, "0")
+ end
+
+ defp make_docs(id_range) do
+ max = Enum.max(id_range)
+
+ for id <- id_range do
+ str_id = docid(id)
+ %{"_id" => str_id, "integer" => id, "string" => docid(max - id)}
+ end
+ end
+
+ describe "Legacy API (10 docs)" do
+ @describetag n_docs: 10
+ setup [:with_session, :random_db, :with_docs]
+
+ test ": _all_docs/queries", ctx do
+ queries = %{
+ queries: [%{descending: false}, %{descending: true}]
+ }
+
+ resp =
+ Couch.Session.post(ctx.session, "/#{ctx.db_name}/_all_docs/queries",
+ body: :jiffy.encode(queries)
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ [q1, q2] = resp.body["results"]
+ assert q1["rows"] == Enum.reverse(q2["rows"])
+ end
+ end
+
+ for descending <- [false, true] do
+ describe "Legacy API (10 docs) : _all_docs?descending=#{descending}" do
+ @describetag n_docs: 10
+ @describetag descending: descending
+ setup [:with_session, :random_db, :with_docs]
+
+ test "total_rows matches the length of rows array", ctx do
+ resp =
+ Couch.Session.get(ctx.session, "/#{ctx.db_name}/_all_docs",
+ query: %{descending: ctx.descending}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ body = resp.body
+ assert body["total_rows"] == length(body["rows"])
+ end
+
+ test "the rows are correctly sorted", ctx do
+ resp =
+ Couch.Session.get(ctx.session, "/#{ctx.db_name}/_all_docs",
+ query: %{descending: ctx.descending}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ body = resp.body
+ ids = Enum.map(body["rows"], fn row -> row["id"] end)
+
+ if ctx.descending do
+ assert Enum.reverse(Enum.sort(ids)) == ids
+ else
+ assert Enum.sort(ids) == ids
+ end
+ end
+
+ test "start_key is respected", ctx do
+ head_pos = 2
+ tail_pos = ctx.n_docs - head_pos
+ doc_ids = Enum.map(ctx.docs, fn doc -> doc["id"] end)
+
+ {start_pos, doc_ids} =
+ if ctx.descending do
+ {head_pos, Enum.reverse(Enum.drop(Enum.sort(doc_ids), -tail_pos))}
+ else
+ {tail_pos, Enum.drop(Enum.sort(doc_ids), tail_pos - 1)}
+ end
+
+ start_key = ~s("#{docid(start_pos)}")
+
+ resp =
+ Couch.Session.get(ctx.session, "/#{ctx.db_name}/_all_docs",
+ query: %{descending: ctx.descending, start_key: start_key}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ ids = Enum.map(resp.body["rows"], fn row -> row["id"] end)
+ assert doc_ids == ids
+ end
+
+ test "end_key is respected", ctx do
+ head_pos = 2
+ tail_pos = ctx.n_docs - head_pos
+ doc_ids = Enum.map(ctx.docs, fn doc -> doc["id"] end)
+
+ {end_pos, doc_ids} =
+ if ctx.descending do
+ {tail_pos, Enum.reverse(Enum.drop(Enum.sort(doc_ids), tail_pos - 1))}
+ else
+ {head_pos, Enum.drop(Enum.sort(doc_ids), -tail_pos)}
+ end
+
+ end_key = ~s("#{docid(end_pos)}")
+
+ resp =
+ Couch.Session.get(ctx.session, "/#{ctx.db_name}/_all_docs",
+ query: %{descending: ctx.descending, end_key: end_key}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ ids = Enum.map(resp.body["rows"], fn row -> row["id"] end)
+ assert doc_ids == ids
+ end
+
+ test "range between start_key and end_key works", ctx do
+ head_pos = 2
+ slice_size = 3
+ doc_ids = Enum.sort(Enum.map(ctx.docs, fn doc -> doc["id"] end))
+ # -1 due to 0 based indexing
+ # -2 is due to 0 based indexing and inclusive end
+ slice = Enum.slice(doc_ids, (head_pos - 1)..(head_pos + slice_size - 2))
+
+ {start_key, end_key, doc_ids} =
+ if ctx.descending do
+ reversed = Enum.reverse(slice)
+ [first | _] = reversed
+ [last | _] = slice
+ {~s("#{first}"), ~s("#{last}"), reversed}
+ else
+ [first | _] = slice
+ [last | _] = Enum.reverse(slice)
+ {~s("#{first}"), ~s("#{last}"), slice}
+ end
+
+ assert length(doc_ids) == slice_size
+
+ resp =
+ Couch.Session.get(ctx.session, "/#{ctx.db_name}/_all_docs",
+ query: %{descending: ctx.descending, start_key: start_key, end_key: end_key}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ ids = Enum.map(resp.body["rows"], fn row -> row["id"] end)
+ assert doc_ids == ids
+ end
+ end
+ end
+
+ describe "Legacy API (10 docs) : /{db}/_design/{ddoc}/_view" do
+ @describetag n_docs: 10
+ @describetag descending: false
+ @describetag page_size: 4
+ setup [:with_session, :random_db, :with_view, :with_docs]
+
+ test "total_rows matches the length of rows array", ctx do
+ resp =
+ Couch.Session.get(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}",
+ query: %{descending: ctx.descending}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ body = resp.body
+ assert body["total_rows"] == length(body["rows"])
+ end
+ end
+
+ describe "Legacy API (10 docs) : /{db}/_design/{ddoc}/_view/queries" do
+ @describetag n_docs: 10
+ @describetag page_size: 4
+ setup [:with_session, :random_db, :with_view, :with_docs]
+
+ test "descending is respected", ctx do
+ queries = %{
+ queries: [%{descending: false}, %{descending: true}]
+ }
+
+ resp =
+ Couch.Session.post(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}/queries",
+ body: :jiffy.encode(queries)
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+
+ [q1, q2] = resp.body["results"]
+ q1 = Enum.map(q1["rows"], fn row -> row["key"] end)
+ q2 = Enum.map(q2["rows"], fn row -> row["key"] end)
+ assert q1 == Enum.reverse(q2)
+ assert q1 == Enum.sort(q1)
+ end
+
+ test "ensure we paginate starting from first query", ctx do
+ queries = %{
+ queries: [%{descending: false}, %{descending: true}]
+ }
+
+ resp =
+ Couch.Session.post(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}/queries",
+ query: %{page_size: ctx.page_size},
+ body: :jiffy.encode(queries)
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+
+ [q1, q2] = resp.body["results"]
+ q1 = Enum.map(q1["rows"], fn row -> row["key"] end)
+ q2 = Enum.map(q2["rows"], fn row -> row["key"] end)
+ assert ctx.page_size == length(q1)
+ assert q2 == []
+ end
+ end
+
+ describe "Pagination API (10 docs)" do
+ @describetag n_docs: 10
+ @describetag page_size: 4
+ setup [:with_session, :random_db, :with_docs]
+
+ test ": _all_docs?page_size=4", ctx do
+ %{session: session, db_name: db_name} = ctx
+
+ resp =
+ Couch.Session.get(session, "/#{db_name}/_all_docs",
+ query: %{page_size: ctx.page_size}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ end
+
+ test ": _all_docs?page_size=4 should respect limit", ctx do
+ %{session: session, db_name: db_name} = ctx
+
+ resp =
+ Couch.Session.get(session, "/#{db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, limit: ctx.page_size - 2}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert length(resp.body["rows"]) == ctx.page_size - 2
+ assert not Map.has_key?(resp.body, "next")
+
+ resp =
+ Couch.Session.get(session, "/#{db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, limit: ctx.page_size - 1}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert length(resp.body["rows"]) == ctx.page_size - 1
+ assert not Map.has_key?(resp.body, "next")
+
+ resp =
+ Couch.Session.get(session, "/#{db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, limit: ctx.page_size}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert length(resp.body["rows"]) == ctx.page_size
+ assert not Map.has_key?(resp.body, "next")
+
+ resp =
+ Couch.Session.get(session, "/#{db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, limit: ctx.page_size + 1}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert length(resp.body["rows"]) == ctx.page_size
+ assert Map.has_key?(resp.body, "next")
+
+ resp =
+ Couch.Session.get(session, "/#{db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, limit: ctx.page_size + 2}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert length(resp.body["rows"]) == ctx.page_size
+ assert Map.has_key?(resp.body, "next")
+ end
+
+ test ": _all_docs/queries should limit number of queries", ctx do
+ queries = %{
+ queries: [%{}, %{}, %{}, %{}, %{}]
+ }
+
+ resp =
+ Couch.Session.post(ctx.session, "/#{ctx.db_name}/_all_docs/queries",
+ query: %{page_size: ctx.page_size},
+ body: :jiffy.encode(queries)
+ )
+
+ assert resp.status_code == 400
+
+ assert resp.body["reason"] ==
+ "Provided number of queries is more than given page_size"
+ end
+
+ test ": _all_docs/queries should forbid `page_size` in queries", ctx do
+ queries = %{
+ queries: [%{page_size: 3}]
+ }
+
+ resp =
+ Couch.Session.post(ctx.session, "/#{ctx.db_name}/_all_docs/queries",
+ query: %{page_size: ctx.page_size},
+ body: :jiffy.encode(queries)
+ )
+
+ assert resp.status_code == 400
+
+ assert resp.body["reason"] ==
+ "You cannot specify `page_size` inside the query"
+ end
+
+ test ": _all_docs should forbid `page_size` and `keys`", ctx do
+ body = %{
+ page_size: 3,
+ keys: [
+ "002",
+ "004"
+ ]
+ }
+
+ resp =
+ Couch.Session.post(ctx.session, "/#{ctx.db_name}/_all_docs",
+ body: :jiffy.encode(body)
+ )
+
+ assert resp.status_code == 400
+
+ assert resp.body["reason"] ==
+ "`page_size` is incompatible with `keys`"
+ end
+
+ test ": _all_docs should limit 'skip' parameter", ctx do
+ resp =
+ Couch.Session.get(ctx.session, "/#{ctx.db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, skip: 3000}
+ )
+
+ assert resp.status_code == 400
+
+ assert resp.body["reason"] ==
+ "`skip` should be an integer in range [0 .. 2000]"
+ end
+
+ test ": _all_docs should forbid extra parameters when 'bookmark' is present", ctx do
+ resp =
+ Couch.Session.get(ctx.session, "/#{ctx.db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, skip: 3000, bookmark: ""}
+ )
+
+ assert resp.status_code == 400
+
+ assert resp.body["reason"] ==
+ "Cannot use `bookmark` with other options"
+ end
+ end
+
+ for descending <- [false, true] do
+ for n <- [4, 9] do
+ describe "Pagination API (10 docs) : _all_docs?page_size=#{n}&descending=#{
+ descending
+ }" do
+ @describetag n_docs: 10
+ @describetag descending: descending
+ @describetag page_size: n
+ setup [:with_session, :random_db, :with_docs, :all_docs]
+
+ test "should return 'next' bookmark", ctx do
+ body = ctx.response
+ assert Map.has_key?(body, "next")
+ end
+
+ test "total_rows matches the length of rows array", ctx do
+ body = ctx.response
+ assert body["total_rows"] == length(body["rows"])
+ end
+
+ test "total_rows matches the requested page_size", ctx do
+ body = ctx.response
+ assert body["total_rows"] == ctx.page_size
+ end
+
+ test "can use 'next' bookmark to get remaining results", ctx do
+ bookmark = ctx.response["next"]
+
+ resp =
+ Couch.Session.get(ctx.session, "/#{ctx.db_name}/_all_docs",
+ query: %{bookmark: bookmark}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ body = resp.body
+ assert body["total_rows"] == length(body["rows"])
+ assert body["total_rows"] <= ctx.page_size
+ end
+ end
+
+ describe "Pagination API (10 docs) : _all_docs?page_size=#{n}&descending=#{
+ descending
+ } : range" do
+ @describetag n_docs: 10
+ @describetag descending: descending
+ @describetag page_size: n
+ setup [:with_session, :random_db, :with_docs]
+
+ test "start_key is respected", ctx do
+ head_pos = 2
+ tail_pos = ctx.n_docs - head_pos
+ doc_ids = Enum.map(ctx.docs, fn doc -> doc["id"] end)
+
+ {start_pos, doc_ids} =
+ if ctx.descending do
+ {head_pos, Enum.reverse(Enum.drop(Enum.sort(doc_ids), -tail_pos))}
+ else
+ {tail_pos, Enum.drop(Enum.sort(doc_ids), tail_pos - 1)}
+ end
+
+ start_key = ~s("#{docid(start_pos)}")
+
+ resp =
+ Couch.Session.get(ctx.session, "/#{ctx.db_name}/_all_docs",
+ query: %{descending: ctx.descending, start_key: start_key}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ ids = Enum.map(resp.body["rows"], fn row -> row["id"] end)
+ assert doc_ids == ids
+ end
+
+ test "end_key is respected", ctx do
+ head_pos = 2
+ tail_pos = ctx.n_docs - head_pos
+ doc_ids = Enum.map(ctx.docs, fn doc -> doc["id"] end)
+
+ {end_pos, doc_ids} =
+ if ctx.descending do
+ {tail_pos, Enum.reverse(Enum.drop(Enum.sort(doc_ids), tail_pos - 1))}
+ else
+ {head_pos, Enum.drop(Enum.sort(doc_ids), -tail_pos)}
+ end
+
+ end_key = ~s("#{docid(end_pos)}")
+
+ resp =
+ Couch.Session.get(ctx.session, "/#{ctx.db_name}/_all_docs",
+ query: %{descending: ctx.descending, end_key: end_key}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ ids = Enum.map(resp.body["rows"], fn row -> row["id"] end)
+ assert doc_ids == ids
+ end
+
+ test "range between start_key and end_key works", ctx do
+ head_pos = 2
+ slice_size = 3
+ doc_ids = Enum.sort(Enum.map(ctx.docs, fn doc -> doc["id"] end))
+ # -1 due to 0 based indexing
+ # -2 is due to 0 based indexing and inclusive end
+ slice = Enum.slice(doc_ids, (head_pos - 1)..(head_pos + slice_size - 2))
+
+ {start_key, end_key, doc_ids} =
+ if ctx.descending do
+ reversed = Enum.reverse(slice)
+ [first | _] = reversed
+ [last | _] = slice
+ {~s("#{first}"), ~s("#{last}"), reversed}
+ else
+ [first | _] = slice
+ [last | _] = Enum.reverse(slice)
+ {~s("#{first}"), ~s("#{last}"), slice}
+ end
+
+ assert length(doc_ids) == slice_size
+
+ resp =
+ Couch.Session.get(ctx.session, "/#{ctx.db_name}/_all_docs",
+ query: %{descending: ctx.descending, start_key: start_key, end_key: end_key}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ ids = Enum.map(resp.body["rows"], fn row -> row["id"] end)
+ assert doc_ids == ids
+ end
+ end
+ end
+ end
+
+ for descending <- [false, true] do
+ for n <- [4, 9] do
+ describe "Pagination API (10 docs) : _all_docs?page_size=#{n}&descending=#{
+ descending
+ } : pages" do
+ @describetag n_docs: 10
+ @describetag descending: descending
+ @describetag page_size: n
+ setup [:with_session, :random_db, :with_docs, :all_docs, :paginate]
+
+ test "final page doesn't include 'next' bookmark", ctx do
+ assert not Map.has_key?(ctx.response, "next")
+ assert ctx.response["total_rows"] == rem(ctx.n_docs, ctx.page_size)
+ end
+
+ test "each but last page has page_size rows", ctx do
+ pages = Enum.drop(ctx.pages, -1)
+
+ assert Enum.all?(pages, fn resp ->
+ length(resp["rows"]) == ctx.page_size
+ end)
+ end
+
+ test "sum of rows on all pages is equal to number of documents", ctx do
+ pages = ctx.pages
+ n = Enum.reduce(pages, 0, fn resp, acc -> acc + length(resp["rows"]) end)
+ assert n == ctx.n_docs
+ end
+
+ test "the rows are correctly sorted", ctx do
+ pages = ctx.pages
+
+ ids =
+ Enum.reduce(pages, [], fn resp, acc ->
+ acc ++ Enum.map(resp["rows"], fn row -> row["id"] end)
+ end)
+
+ if ctx.descending do
+ assert Enum.reverse(Enum.sort(ids)) == ids
+ else
+ assert Enum.sort(ids) == ids
+ end
+ end
+ end
+ end
+ end
+
+ for n <- 10..11 do
+ describe "Pagination API (10 docs) : _all_docs?page_size=#{n}" do
+ @describetag n_docs: 10
+ @describetag descending: false
+ @describetag page_size: n
+ setup [:with_session, :random_db, :with_docs, :all_docs]
+
+ test "should not return 'next' bookmark", ctx do
+ body = ctx.response
+ assert not Map.has_key?(body, "next")
+ end
+
+ test "total_rows matches the length of rows array", ctx do
+ body = ctx.response
+ assert body["total_rows"] == length(body["rows"])
+ end
+
+ test "total_rows less than the requested page_size", ctx do
+ body = ctx.response
+ assert body["total_rows"] <= ctx.page_size
+ end
+ end
+ end
+
+ for descending <- [false, true] do
+ for n <- [4, 9] do
+ describe "Pagination API (10 docs) : _all_docs/queries?page_size=#{n}&descending=#{
+ descending
+ } : pages" do
+ @describetag n_docs: 10
+ @describetag descending: descending
+ @describetag page_size: n
+
+ @describetag queries: %{
+ queries: [
+ %{
+ descending: true
+ },
+ %{
+ limit: n + 1,
+ skip: 2
+ }
+ ]
+ }
+
+ setup [:with_session, :random_db, :with_docs]
+
+ test "one of the results contains 'next' bookmark", ctx do
+ resp =
+ Couch.Session.post(ctx.session, "/#{ctx.db_name}/_all_docs/queries",
+ query: %{page_size: ctx.page_size, descending: ctx.descending},
+ body: :jiffy.encode(ctx.queries)
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ results = resp.body["results"]
+ assert Enum.any?(results, fn result -> Map.has_key?(result, "next") end)
+ end
+
+ test "each 'next' bookmark is working", ctx do
+ resp =
+ Couch.Session.post(ctx.session, "/#{ctx.db_name}/_all_docs/queries",
+ query: %{page_size: ctx.page_size, descending: ctx.descending},
+ body: :jiffy.encode(ctx.queries)
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ results = resp.body["results"]
+
+ bookmarks =
+ results
+ |> Enum.filter(fn result -> Map.has_key?(result, "next") end)
+ |> Enum.map(fn result -> Map.get(result, "next") end)
+
+ assert [] != bookmarks
+
+ Enum.each(bookmarks, fn bookmark ->
+ resp =
+ Couch.Session.get(ctx.session, "/#{ctx.db_name}/_all_docs",
+ query: %{bookmark: bookmark}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert [] != resp.body["rows"]
+ end)
+
+ assert Enum.any?(results, fn result -> Map.has_key?(result, "next") end)
+ end
+
+ test "can post bookmarks to queries", ctx do
+ resp =
+ Couch.Session.post(ctx.session, "/#{ctx.db_name}/_all_docs/queries",
+ query: %{page_size: ctx.page_size, descending: ctx.descending},
+ body: :jiffy.encode(ctx.queries)
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ results = resp.body["results"]
+
+ queries =
+ results
+ |> Enum.filter(fn result -> Map.has_key?(result, "next") end)
+ |> Enum.map(fn result -> %{bookmark: Map.get(result, "next")} end)
+
+ resp =
+ Couch.Session.post(ctx.session, "/#{ctx.db_name}/_all_docs/queries",
+ body: :jiffy.encode(%{queries: queries})
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+
+ Enum.each(resp.body["results"], fn result ->
+ assert [] != result["rows"]
+ end)
+ end
+
+ test "respect request page_size", ctx do
+ resp =
+ Couch.Session.post(ctx.session, "/#{ctx.db_name}/_all_docs/queries",
+ query: %{page_size: ctx.page_size, descending: ctx.descending},
+ body: :jiffy.encode(ctx.queries)
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ results = resp.body["results"]
+
+ Enum.each(results ++ resp.body["results"], fn result ->
+ assert length(result["rows"]) <= ctx.page_size
+ end)
+ end
+
+ test "independent page_size in the bookmark", ctx do
+ resp =
+ Couch.Session.post(ctx.session, "/#{ctx.db_name}/_all_docs/queries",
+ query: %{page_size: ctx.page_size, descending: ctx.descending},
+ body: :jiffy.encode(ctx.queries)
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+
+ queries =
+ resp.body["results"]
+ |> Enum.filter(fn result -> Map.has_key?(result, "next") end)
+ |> Enum.map(fn result -> %{bookmark: Map.get(result, "next")} end)
+
+ resp =
+ Couch.Session.post(ctx.session, "/#{ctx.db_name}/_all_docs/queries",
+ body: :jiffy.encode(%{queries: queries})
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+
+ Enum.each(resp.body["results"], fn result ->
+ assert length(result["rows"]) > ctx.page_size
+ end)
+ end
+ end
+ end
+ end
+
+ for descending <- [false, true] do
+ for n <- [4, 9] do
+ describe "Pagination API (10 docs) : /{db}/_design/{ddoc}/_view?page_size=#{n}&descending=#{
+ descending
+ }" do
+ @describetag n_docs: 10
+ @describetag descending: descending
+ @describetag page_size: n
+ setup [:with_session, :random_db, :with_view, :with_docs]
+
+ test "should return 'next' bookmark", ctx do
+ resp =
+ Couch.Session.get(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}",
+ query: %{page_size: ctx.page_size, descending: ctx.descending}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert Map.has_key?(resp.body, "next")
+ end
+
+ test "first page should not return 'previous' bookmark", ctx do
+ resp =
+ Couch.Session.get(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}",
+ query: %{page_size: ctx.page_size, descending: ctx.descending}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert not Map.has_key?(resp.body, "previous")
+ end
+
+ test "total_rows matches the length of rows array", ctx do
+ resp =
+ Couch.Session.get(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}",
+ query: %{page_size: ctx.page_size, descending: ctx.descending}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ body = resp.body
+ assert body["total_rows"] == length(body["rows"])
+ end
+
+ test "total_rows matches the requested page_size", ctx do
+ resp =
+ Couch.Session.get(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}",
+ query: %{page_size: ctx.page_size, descending: ctx.descending}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert resp.body["total_rows"] == ctx.page_size
+ end
+
+ test "can use 'next' bookmark to get remaining results", ctx do
+ resp =
+ Couch.Session.get(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}",
+ query: %{page_size: ctx.page_size, descending: ctx.descending}
+ )
+
+ bookmark = resp.body["next"]
+
+ resp =
+ Couch.Session.get(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}",
+ query: %{bookmark: bookmark}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ body = resp.body
+ assert body["total_rows"] == length(body["rows"])
+ assert body["total_rows"] <= ctx.page_size
+ end
+
+ test "can use 'previous' bookmark", ctx do
+ resp =
+ Couch.Session.get(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}",
+ query: %{page_size: ctx.page_size, descending: ctx.descending}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ next_bookmark = resp.body["next"]
+
+ first_page_keys = Enum.map(resp.body["rows"], fn row -> row["key"] end)
+
+ resp =
+ Couch.Session.get(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}",
+ query: %{bookmark: next_bookmark}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert Map.has_key?(resp.body, "previous")
+
+ resp =
+ Couch.Session.get(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}",
+ query: %{bookmark: resp.body["previous"]}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ keys = Enum.map(resp.body["rows"], fn row -> row["key"] end)
+ assert first_page_keys == keys
+ end
+ end
+ end
+ end
+
+ for n <- 10..11 do
+ describe "Pagination API (10 docs) : /{db}/_design/{ddoc}/_view?page_size=#{n}" do
+ @describetag n_docs: 10
+ @describetag descending: false
+ @describetag page_size: n
+ setup [:with_session, :random_db, :with_view, :with_docs]
+
+ test "should not return 'next' bookmark", ctx do
+ resp =
+ Couch.Session.get(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}",
+ query: %{page_size: ctx.page_size, descending: ctx.descending}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert not Map.has_key?(resp.body, "next")
+ end
+
+ test "total_rows matches the length of rows array", ctx do
+ resp =
+ Couch.Session.get(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}",
+ query: %{page_size: ctx.page_size, descending: ctx.descending}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ body = resp.body
+ assert body["total_rows"] == length(body["rows"])
+ end
+
+ test "total_rows less than the requested page_size", ctx do
+ resp =
+ Couch.Session.get(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}",
+ query: %{page_size: ctx.page_size, descending: ctx.descending}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert resp.body["total_rows"] <= ctx.page_size
+ end
+ end
+ end
+
+ for descending <- [false, true] do
+ for n <- [4, 9] do
+ describe "Pagination API (10 docs) : /{db}/_design/{ddoc}/_view/queries?page_size=#{
+ n
+ }&descending=#{descending} : pages" do
+ @describetag n_docs: 10
+ @describetag descending: descending
+ @describetag page_size: n
+
+ @describetag queries: %{
+ queries: [
+ %{
+ descending: true
+ },
+ %{
+ limit: n + 1,
+ skip: 2
+ }
+ ]
+ }
+ setup [:with_session, :random_db, :with_view, :with_docs]
+
+ test "one of the results contains 'next' bookmark", ctx do
+ resp =
+ Couch.Session.post(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}/queries",
+ query: %{page_size: ctx.page_size, descending: ctx.descending},
+ body: :jiffy.encode(ctx.queries)
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ results = resp.body["results"]
+ assert Enum.any?(results, fn result -> Map.has_key?(result, "next") end)
+ end
+
+ test "each 'next' bookmark is working", ctx do
+ resp =
+ Couch.Session.post(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}/queries",
+ query: %{page_size: ctx.page_size, descending: ctx.descending},
+ body: :jiffy.encode(ctx.queries)
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ results = resp.body["results"]
+
+ bookmarks =
+ results
+ |> Enum.filter(fn result -> Map.has_key?(result, "next") end)
+ |> Enum.map(fn result -> Map.get(result, "next") end)
+
+ assert [] != bookmarks
+
+ Enum.each(bookmarks, fn bookmark ->
+ resp =
+ Couch.Session.get(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}",
+ query: %{bookmark: bookmark}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert [] != resp.body["rows"]
+ end)
+
+ assert Enum.any?(results, fn result -> Map.has_key?(result, "next") end)
+ end
+
+ test "can post bookmarks to queries", ctx do
+ resp =
+ Couch.Session.post(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}/queries",
+ query: %{page_size: ctx.page_size, descending: ctx.descending},
+ body: :jiffy.encode(ctx.queries)
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ results = resp.body["results"]
+
+ queries =
+ results
+ |> Enum.filter(fn result -> Map.has_key?(result, "next") end)
+ |> Enum.map(fn result -> %{bookmark: Map.get(result, "next")} end)
+
+ resp =
+ Couch.Session.post(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}/queries",
+ body: :jiffy.encode(%{queries: queries})
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+
+ Enum.each(resp.body["results"], fn result ->
+ assert [] != result["rows"]
+ end)
+ end
+
+ test "respect request page_size", ctx do
+ resp =
+ Couch.Session.post(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}/queries",
+ query: %{page_size: ctx.page_size, descending: ctx.descending},
+ body: :jiffy.encode(ctx.queries)
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ results = resp.body["results"]
+
+ Enum.each(results ++ resp.body["results"], fn result ->
+ assert length(result["rows"]) <= ctx.page_size
+ end)
+ end
+
+ test "independent page_size in the bookmark", ctx do
+ resp =
+ Couch.Session.post(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}/queries",
+ query: %{page_size: ctx.page_size, descending: ctx.descending},
+ body: :jiffy.encode(ctx.queries)
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+
+ queries =
+ resp.body["results"]
+ |> Enum.filter(fn result -> Map.has_key?(result, "next") end)
+ |> Enum.map(fn result -> %{bookmark: Map.get(result, "next")} end)
+
+ resp =
+ Couch.Session.post(
+ ctx.session,
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}/queries",
+ body: :jiffy.encode(%{queries: queries})
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+
+ Enum.each(resp.body["results"], fn result ->
+ assert length(result["rows"]) > ctx.page_size
+ end)
+ end
+
+ test "can retrieve all pages", ctx do
+ [descending_query, limit_query] =
+ paginate_queries(
+ ctx,
+ url:
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}/queries",
+ query: %{page_size: ctx.page_size, descending: ctx.descending},
+ body: :jiffy.encode(ctx.queries)
+ )
+
+ results = List.flatten(descending_query)
+ assert ctx.n_docs == length(results)
+ expected_key_order = :descending
+ expected_ids_order = :ascending
+
+ assert expected_key_order == ordering?(results, "key"),
+ "expecting keys in #{expected_key_order} order, got: #{
+ inspect(field(results, "key"))
+ }"
+
+ assert expected_ids_order == ordering?(results, "id"),
+ "expecting ids in #{expected_ids_order} order, got: #{
+ inspect(field(results, "id"))
+ }"
+
+ results = List.flatten(limit_query)
+ [_descendiing_query, query] = ctx.queries[:queries]
+
+ expected_length =
+ if ctx.n_docs - query.skip > query.limit do
+ query.limit
+ else
+ query.limit - query.skip
+ end
+
+ assert expected_length == length(results)
+
+ {expected_key_order, expected_ids_order} =
+ if ctx.descending do
+ {:descending, :ascending}
+ else
+ {:ascending, :descending}
+ end
+
+ assert expected_key_order == ordering?(results, "key"),
+ ~s(expecting keys in #{expected_key_order} order, got: #{
+ inspect(field(results, "key"))
+ })
+
+ assert expected_ids_order == ordering?(results, "id"),
+ ~s(expecting keys in #{expected_ids_order} order, got: #{
+ inspect(field(results, "id"))
+ })
+
+ keys = Enum.map(results, &Map.get(&1, "key"))
+ end
+ end
+ end
+ end
+
+ for descending <- [false, true] do
+ for n <- [4, 9] do
+ describe "Pagination API (10 docs) : /{db}/_design/{ddoc}/_view/queries?page_size=#{
+ n
+ }&descending=#{descending} : pages with same key" do
+ @describetag descending: descending
+ @describetag n_docs: 10
+ @describetag page_size: n
+
+ @describetag queries: %{
+ queries: [
+ %{
+ descending: true
+ },
+ %{
+ limit: n + 1,
+ skip: 2
+ }
+ ]
+ }
+ setup [:with_session, :random_db, :with_view, :with_same_key_docs]
+
+ test "handle same key", ctx do
+ '''
+ make sure the results are first sorted by key and then by id
+ '''
+
+ [descending_query, limit_query] =
+ paginate_queries(
+ ctx,
+ url:
+ "/#{ctx.db_name}/_design/#{ctx.ddoc_id}/_view/#{ctx.view_name}/queries",
+ query: %{page_size: ctx.page_size, descending: ctx.descending},
+ body: :jiffy.encode(ctx.queries)
+ )
+
+ aggregate = fn pages ->
+ Enum.reduce(pages, {[], %{}}, fn page, acc ->
+ Enum.reduce(page, acc, fn row, {keys, in_acc} ->
+ id = Map.get(row, "id")
+ key = Map.get(row, "key")
+ {keys ++ [key], Map.update(in_acc, key, [id], &(&1 ++ [id]))}
+ end)
+ end)
+ end
+
+ {keys, aggregated} = aggregate.(descending_query)
+
+ # keys are sorted in reverse order
+ assert :descending == ordering?(keys),
+ ~s(expecting keys in descending order, got: #{inspect(keys)})
+
+ Enum.each(Map.values(aggregated), fn ids ->
+ # keys are sorted in reverse order by id
+ assert :descending == ordering?(ids),
+ ~s(expecting ids in descending order, got: #{inspect(ids)})
+ end)
+
+ {keys, aggregated} = aggregate.(limit_query)
+
+ {expected_key_order, expected_ids_order} =
+ if ctx.descending do
+ {:descending, :descending}
+ else
+ {:ascending, :ascending}
+ end
+
+ # keys are sorted
+ assert expected_key_order == ordering?(keys) or :equal == ordering?(keys),
+ ~s(expecting keys in #{expected_key_order} order, got: #{inspect(keys)})
+
+ Enum.each(Map.values(aggregated), fn ids ->
+ # Keys are sorted by id
+ assert expected_ids_order == ordering?(ids) or :equal == ordering?(ids),
+ ~s(expecting ids in #{expected_ids_order} order, got: #{inspect(ids)})
+ end)
+ end
+ end
+ end
+ end
+
+ defp ordering?(maps, key) do
+ ordering?(field(maps, key))
+ end
+
+ defp ordering?(elements) do
+ ascending = Enum.sort(elements)
+ descending = Enum.reverse(Enum.sort(elements))
+
+ case {ascending, descending} do
+ {^elements, ^elements} -> :equal
+ {^elements, _} -> :ascending
+ {_, ^descending} -> :descending
+ _ -> :unordered
+ end
+ end
+
+ defp field(maps, key) do
+ Enum.map(maps, &Map.get(&1, key))
+ end
+end
diff --git a/src/chttpd/test/exunit/test_helper.exs b/src/chttpd/test/exunit/test_helper.exs
new file mode 100644
index 000000000..314050085
--- /dev/null
+++ b/src/chttpd/test/exunit/test_helper.exs
@@ -0,0 +1,2 @@
+ExUnit.configure(formatters: [JUnitFormatter, ExUnit.CLIFormatter])
+ExUnit.start()
diff --git a/src/chttpd/test/exunit/tracing_test.exs b/src/chttpd/test/exunit/tracing_test.exs
new file mode 100644
index 000000000..f66fb87a2
--- /dev/null
+++ b/src/chttpd/test/exunit/tracing_test.exs
@@ -0,0 +1,101 @@
+defmodule Couch.Test.OpenTracing do
+ use Couch.Test.ExUnit.Case
+ alias Couch.Test.Setup
+ alias Couch.Test.Setup.Step
+ alias Couch.Test.Utils
+ import Couch.DBTest, only: [retry_until: 1]
+
+ defp create_admin(user_name, password) do
+ hashed = String.to_charlist(:couch_passwords.hash_admin_password(password))
+ :config.set('admins', String.to_charlist(user_name), hashed, false)
+ end
+
+ defp base_url() do
+ addr = :config.get('chttpd', 'bind_address', '127.0.0.1')
+ port = :mochiweb_socket_server.get(:chttpd, :port)
+ "http://#{addr}:#{port}"
+ end
+
+ setup_all context do
+ test_ctx = :test_util.start_couch([:chttpd])
+ :ok = create_admin("adm", "pass")
+
+ Map.merge(context, %{
+ base_url: base_url(),
+ user: "adm",
+ pass: "pass"
+ })
+ end
+
+ setup context do
+ db_name = Utils.random_name("db")
+ session = Couch.login(context.user, context.pass, base_url: context.base_url)
+
+ on_exit(fn ->
+ delete_db(session, db_name)
+ end)
+
+ create_db(session, db_name)
+
+ Map.merge(context, %{
+ db_name: db_name,
+ session: session
+ })
+ end
+
+ def create_db(session, db_name, opts \\ []) do
+ retry_until(fn ->
+ resp = Couch.Session.put(session, "/#{db_name}", opts)
+ assert resp.status_code in [201, 202]
+ assert resp.body == %{"ok" => true}
+ {:ok, resp}
+ end)
+ end
+
+ def delete_db(session, db_name) do
+ retry_until(fn ->
+ resp = Couch.Session.delete(session, "/#{db_name}")
+ assert resp.status_code in [200, 202, 404]
+ {:ok, resp}
+ end)
+ end
+
+ def create_doc(session, db_name, body) do
+ retry_until(fn ->
+ resp = Couch.Session.post(session, "/#{db_name}", body: body)
+ assert resp.status_code in [201, 202]
+ assert resp.body["ok"]
+ {:ok, resp}
+ end)
+ end
+
+ defp trace_id() do
+ :couch_util.to_hex(:crypto.strong_rand_bytes(16))
+ end
+
+ defp span_id() do
+ :couch_util.to_hex(:crypto.strong_rand_bytes(8))
+ end
+
+ describe "Open Tracing" do
+ test "should return success with combined b3 header", ctx do
+ %{session: session, db_name: db_name} = ctx
+ doc = '{"mr": "rockoartischocko"}'
+ {:ok, _} = create_doc(session, db_name, doc)
+
+ resp =
+ retry_until(fn ->
+ b3 = "#{trace_id()}-#{span_id()}-#{span_id()}"
+
+ response =
+ Couch.Session.get(session, "/#{db_name}/_all_docs", headers: [b3: b3])
+
+ assert %HTTPotion.Response{} = response
+ response
+ end)
+
+ assert resp.status_code == 200, "Expected 200, got: #{resp.status_code}"
+ assert length(resp.body["rows"]) == 1
+ end
+ end
+end
diff --git a/src/couch/.gitignore b/src/couch/.gitignore
index e1fa65333..861974adb 100644
--- a/src/couch/.gitignore
+++ b/src/couch/.gitignore
@@ -19,3 +19,5 @@ test/engines/log/
.rebar/
.eunit
+
+rebar.config
diff --git a/src/couch/include/couch_db.hrl b/src/couch/include/couch_db.hrl
index 830b9bcf4..cc1fb5def 100644
--- a/src/couch/include/couch_db.hrl
+++ b/src/couch/include/couch_db.hrl
@@ -219,3 +219,6 @@
-type sec_props() :: [tuple()].
-type sec_obj() :: {sec_props()}.
+
+-define(record_to_keyval(Name, Record),
+ lists:zip(record_info(fields, Name), tl(tuple_to_list(Record)))).
diff --git a/src/couch/priv/icu_driver/couch_icu_driver.c b/src/couch/priv/icu_driver/couch_icu_driver.c
index 4d9bb982d..ffccf2e9d 100644
--- a/src/couch/priv/icu_driver/couch_icu_driver.c
+++ b/src/couch/priv/icu_driver/couch_icu_driver.c
@@ -30,6 +30,8 @@ specific language governing permissions and limitations under the License.
#include <string.h> /* for memcpy */
#endif
+#define BUFFER_SIZE 1024
+
typedef struct {
ErlDrvPort port;
@@ -54,6 +56,8 @@ static ErlDrvData couch_drv_start(ErlDrvPort port, char *buff)
UErrorCode status = U_ZERO_ERROR;
couch_drv_data* pData = (couch_drv_data*)driver_alloc(sizeof(couch_drv_data));
+ set_port_control_flags(port, PORT_CONTROL_FLAG_BINARY);
+
if (pData == NULL)
return ERL_DRV_ERROR_GENERAL;
@@ -84,14 +88,17 @@ ErlDrvSSizeT
return_control_result(void* pLocalResult, int localLen,
char **ppRetBuf, ErlDrvSizeT returnLen)
{
+ ErlDrvBinary* buf = NULL;
+
if (*ppRetBuf == NULL || localLen > returnLen) {
- *ppRetBuf = (char*)driver_alloc_binary(localLen);
- if(*ppRetBuf == NULL) {
- return -1;
- }
+ buf = driver_alloc_binary(localLen);
+ memcpy(buf->orig_bytes, pLocalResult, localLen);
+ *ppRetBuf = (char*) buf;
+ return localLen;
+ } else {
+ memcpy(*ppRetBuf, pLocalResult, localLen);
+ return localLen;
}
- memcpy(*ppRetBuf, pLocalResult, localLen);
- return localLen;
}
static ErlDrvSSizeT
@@ -147,6 +154,61 @@ couch_drv_control(ErlDrvData drv_data, unsigned int command,
return return_control_result(&response, sizeof(response), rbuf, rlen);
}
+ case 2: /* GET_SORT_KEY: */
+ {
+
+ UChar source[BUFFER_SIZE];
+ UChar* sourcePtr = source;
+ int32_t sourceLen = BUFFER_SIZE;
+
+ uint8_t sortKey[BUFFER_SIZE];
+ uint8_t* sortKeyPtr = sortKey;
+ int32_t sortKeyLen = BUFFER_SIZE;
+
+ int32_t inputLen;
+
+ UErrorCode status = U_ZERO_ERROR;
+ ErlDrvSSizeT res;
+
+ /* first 32bits are the length */
+ memcpy(&inputLen, pBuf, sizeof(inputLen));
+ pBuf += sizeof(inputLen);
+
+ u_strFromUTF8(sourcePtr, BUFFER_SIZE, &sourceLen, pBuf, inputLen, &status);
+
+ if (sourceLen >= BUFFER_SIZE) {
+ /* reset status or next u_strFromUTF8 call will auto-fail */
+ status = U_ZERO_ERROR;
+ sourcePtr = (UChar*) malloc(sourceLen * sizeof(UChar));
+ u_strFromUTF8(sourcePtr, sourceLen, NULL, pBuf, inputLen, &status);
+ if (U_FAILURE(status)) {
+ rbuf = NULL;
+ return 0;
+ }
+ } else if (U_FAILURE(status)) {
+ rbuf = NULL;
+ return 0;
+ }
+
+ sortKeyLen = ucol_getSortKey(pData->coll, sourcePtr, sourceLen, sortKeyPtr, BUFFER_SIZE);
+
+ if (sortKeyLen > BUFFER_SIZE) {
+ sortKeyPtr = (uint8_t*) malloc(sortKeyLen);
+ ucol_getSortKey(pData->coll, sourcePtr, sourceLen, sortKeyPtr, sortKeyLen);
+ }
+
+ res = return_control_result(sortKeyPtr, sortKeyLen, rbuf, rlen);
+
+ if (sourcePtr != source) {
+ free(sourcePtr);
+ }
+
+ if (sortKeyPtr != sortKey) {
+ free(sortKeyPtr);
+ }
+
+ return res;
+ }
default:
return -1;
diff --git a/src/couch/priv/stats_descriptions.cfg b/src/couch/priv/stats_descriptions.cfg
index 7c8fd94cb..fd6468ffa 100644
--- a/src/couch/priv/stats_descriptions.cfg
+++ b/src/couch/priv/stats_descriptions.cfg
@@ -230,6 +230,10 @@
{type, counter},
{desc, <<"number of HTTP 409 Conflict responses">>}
]}.
+{[couchdb, httpd_status_codes, 410], [
+ {type, counter},
+ {desc, <<"number of HTTP 410 Gone responses">>}
+]}.
{[couchdb, httpd_status_codes, 412], [
{type, counter},
{desc, <<"number of HTTP 412 Precondition Failed responses">>}
diff --git a/src/couch/rebar.config.script b/src/couch/rebar.config.script
index 4a5ef36e7..d4b244ac9 100644
--- a/src/couch/rebar.config.script
+++ b/src/couch/rebar.config.script
@@ -123,7 +123,7 @@ end.
{unix, _} when SMVsn == "60" ->
{
"-DXP_UNIX -I/usr/include/mozjs-60 -I/usr/local/include/mozjs-60 -std=c++14 -Wno-invalid-offsetof",
- "-L/usr/local/lib -std=c++14 -lmozjs-60 -lm"
+ "-L/usr/local/lib -std=c++14 -lmozjs-60 -lm -lstdc++"
};
{unix, _} when SMVsn == "68" ->
{
@@ -237,5 +237,10 @@ AddConfig = [
].
lists:foldl(fun({K, V}, CfgAcc) ->
- lists:keystore(K, 1, CfgAcc, {K, V})
+ case lists:keyfind(K, 1, CfgAcc) of
+ {K, Existent} when is_list(Existent) andalso is_list(V) ->
+ lists:keystore(K, 1, CfgAcc, {K, Existent ++ V});
+ false ->
+ lists:keystore(K, 1, CfgAcc, {K, V})
+ end
end, CONFIG, AddConfig).
diff --git a/src/couch/src/couch_att.erl b/src/couch/src/couch_att.erl
index a24de21d6..d41ab5bf2 100644
--- a/src/couch/src/couch_att.erl
+++ b/src/couch/src/couch_att.erl
@@ -27,9 +27,10 @@
]).
-export([
+ external_size/1,
size_info/1,
to_disk_term/1,
- from_disk_term/2
+ from_disk_term/3
]).
-export([
@@ -38,7 +39,7 @@
]).
-export([
- flush/2,
+ flush/3,
foldl/3,
range_foldl/5,
foldl_decode/3,
@@ -46,11 +47,6 @@
]).
-export([
- upgrade/1,
- downgrade/1
-]).
-
--export([
max_attachment_size/0,
validate_attachment_size/3
]).
@@ -58,137 +54,59 @@
-compile(nowarn_deprecated_type).
-export_type([att/0]).
--include_lib("couch/include/couch_db.hrl").
-
-
-%% Legacy attachment record. This is going to be phased out by the new proplist
-%% based structure. It's needed for now to allow code to perform lazy upgrades
-%% while the patch is rolled out to the cluster. Attachments passed as records
-%% will remain so until they are required to be represented as property lists.
-%% Once this has been widely deployed, this record will be removed entirely and
-%% property lists will be the main format.
--record(att, {
- name :: binary(),
- type :: binary(),
- att_len :: non_neg_integer(),
-
- %% length of the attachment in its identity form
- %% (that is, without a content encoding applied to it)
- %% differs from att_len when encoding /= identity
- disk_len :: non_neg_integer(),
-
- md5 = <<>> :: binary(),
- revpos = 0 :: non_neg_integer(),
- data :: stub | follows | binary() | {any(), any()} |
- {follows, pid(), reference()} | fun(() -> binary()),
-
- %% Encoding of the attachment
- %% currently supported values are:
- %% identity, gzip
- %% additional values to support in the future:
- %% deflate, compress
- encoding = identity :: identity | gzip
-}).
-
-
-%% Extensible Attachment Type
-%%
-%% The following types describe the known properties for attachment fields
-%% encoded as property lists to allow easier upgrades. Values not in this list
-%% should be accepted at runtime but should be treated as opaque data as might
-%% be used by upgraded code. If you plan on operating on new data, please add
-%% an entry here as documentation.
-
-
-%% The name of the attachment is also used as the mime-part name for file
-%% downloads. These must be unique per document.
--type name_prop() :: {name, binary()}.
-
-
-%% The mime type of the attachment. This does affect compression of certain
-%% attachments if the type is found to be configured as a compressable type.
-%% This is commonly reserved for text/* types but could include other custom
-%% cases as well. See definition and use of couch_util:compressable_att_type/1.
--type type_prop() :: {type, binary()}.
-
-
-%% The attachment length is similar to disk-length but ignores additional
-%% encoding that may have occurred.
--type att_len_prop() :: {att_len, non_neg_integer()}.
-
-
-%% The size of the attachment as stored in a disk stream.
--type disk_len_prop() :: {disk_len, non_neg_integer()}.
-
-%% This is a digest of the original attachment data as uploaded by the client.
-%% it's useful for checking validity of contents against other attachment data
-%% as well as quick digest computation of the enclosing document.
--type md5_prop() :: {md5, binary()}.
-
-
--type revpos_prop() :: {revpos, 0}.
+-include_lib("couch/include/couch_db.hrl").
-%% This field is currently overloaded with just about everything. The
-%% {any(), any()} type is just there until I have time to check the actual
-%% values expected. Over time this should be split into more than one property
-%% to allow simpler handling.
--type data_prop() :: {
- data, stub | follows | binary() | {any(), any()} |
- {follows, pid(), reference()} | fun(() -> binary())
-}.
+-define(CURRENT_ATT_FORMAT, 0).
-%% We will occasionally compress our data. See type_prop() for more information
-%% on when this happens.
--type encoding_prop() :: {encoding, identity | gzip}.
+-type prop_name() ::
+ name |
+ type |
+ att_len |
+ disk_len |
+ md5 |
+ revpos |
+ data |
+ encoding.
--type attachment() :: [
- name_prop() | type_prop() |
- att_len_prop() | disk_len_prop() |
- md5_prop() | revpos_prop() |
- data_prop() | encoding_prop()
-].
+-type data_prop_type() ::
+ {loc, #{}, binary(), binary()} |
+ stub |
+ follows |
+ binary() |
+ {follows, pid(), reference()} |
+ fun(() -> binary()).
--type disk_att_v1() :: {
- Name :: binary(),
- Type :: binary(),
- Sp :: any(),
- AttLen :: non_neg_integer(),
- RevPos :: non_neg_integer(),
- Md5 :: binary()
-}.
--type disk_att_v2() :: {
- Name :: binary(),
- Type :: binary(),
- Sp :: any(),
- AttLen :: non_neg_integer(),
- DiskLen :: non_neg_integer(),
- RevPos :: non_neg_integer(),
- Md5 :: binary(),
- Enc :: identity | gzip
+-type att() :: #{
+ name := binary(),
+ type := binary(),
+ att_len := non_neg_integer() | undefined,
+ disk_len := non_neg_integer() | undefined,
+ md5 := binary() | undefined,
+ revpos := non_neg_integer(),
+ data := data_prop_type(),
+ encoding := identity | gzip | undefined
}.
--type disk_att_v3() :: {Base :: tuple(), Extended :: list()}.
-
--type disk_att() :: disk_att_v1() | disk_att_v2() | disk_att_v3().
-
--type att() :: #att{} | attachment() | disk_att().
new() ->
- %% We construct a record by default for compatability. This will be
- %% upgraded on demand. A subtle effect this has on all attachments
- %% constructed via new is that it will pick up the proper defaults
- %% from the #att record definition given above. Newer properties do
- %% not support special default values and will all be treated as
- %% undefined.
- #att{}.
+ #{
+ name => <<>>,
+ type => <<>>,
+ att_len => undefined,
+ disk_len => undefined,
+ md5 => undefined,
+ revpos => 0,
+ data => undefined,
+ encoding => undefined
+ }.
--spec new([{atom(), any()}]) -> att().
+-spec new([{prop_name(), any()}]) -> att().
new(Props) ->
store(Props, new()).
@@ -197,71 +115,28 @@ new(Props) ->
(atom(), att()) -> any().
fetch(Fields, Att) when is_list(Fields) ->
[fetch(Field, Att) || Field <- Fields];
-fetch(Field, Att) when is_list(Att) ->
- case lists:keyfind(Field, 1, Att) of
- {Field, Value} -> Value;
- false -> undefined
- end;
-fetch(name, #att{name = Name}) ->
- Name;
-fetch(type, #att{type = Type}) ->
- Type;
-fetch(att_len, #att{att_len = AttLen}) ->
- AttLen;
-fetch(disk_len, #att{disk_len = DiskLen}) ->
- DiskLen;
-fetch(md5, #att{md5 = Digest}) ->
- Digest;
-fetch(revpos, #att{revpos = RevPos}) ->
- RevPos;
-fetch(data, #att{data = Data}) ->
- Data;
-fetch(encoding, #att{encoding = Encoding}) ->
- Encoding;
-fetch(_, _) ->
- undefined.
+fetch(Field, Att) ->
+ maps:get(Field, Att).
-spec store([{atom(), any()}], att()) -> att().
store(Props, Att0) ->
lists:foldl(fun({Field, Value}, Att) ->
- store(Field, Value, Att)
+ maps:update(Field, Value, Att)
end, Att0, Props).
--spec store(atom(), any(), att()) -> att().
-store(Field, undefined, Att) when is_list(Att) ->
- lists:keydelete(Field, 1, Att);
-store(Field, Value, Att) when is_list(Att) ->
- lists:keystore(Field, 1, Att, {Field, Value});
-store(name, Name, Att) ->
- Att#att{name = Name};
-store(type, Type, Att) ->
- Att#att{type = Type};
-store(att_len, AttLen, Att) ->
- Att#att{att_len = AttLen};
-store(disk_len, DiskLen, Att) ->
- Att#att{disk_len = DiskLen};
-store(md5, Digest, Att) ->
- Att#att{md5 = Digest};
-store(revpos, RevPos, Att) ->
- Att#att{revpos = RevPos};
-store(data, Data, Att) ->
- Att#att{data = Data};
-store(encoding, Encoding, Att) ->
- Att#att{encoding = Encoding};
store(Field, Value, Att) ->
- store(Field, Value, upgrade(Att)).
+ maps:update(Field, Value, Att).
-spec transform(atom(), fun(), att()) -> att().
transform(Field, Fun, Att) ->
- NewValue = Fun(fetch(Field, Att)),
- store(Field, NewValue, Att).
+ maps:update_with(Field, Fun, Att).
-is_stub(Att) ->
- stub == fetch(data, Att).
+is_stub(#{data := stub}) -> true;
+is_stub(#{}) -> false.
%% merge_stubs takes all stub attachments and replaces them with on disk
@@ -275,8 +150,7 @@ merge_stubs(MemAtts, DiskAtts) ->
merge_stubs(MemAtts, OnDisk, []).
-%% restore spec when R14 support is dropped
-%% -spec merge_stubs([att()], dict:dict(), [att()]) -> [att()].
+-spec merge_stubs([att()], dict:dict(), [att()]) -> [att()].
merge_stubs([Att | Rest], OnDisk, Merged) ->
case fetch(data, Att) of
stub ->
@@ -304,18 +178,26 @@ merge_stubs([], _, Merged) ->
{ok, lists:reverse(Merged)}.
+external_size(Att) ->
+ NameSize = size(fetch(name, Att)),
+ TypeSize = case fetch(type, Att) of
+ undefined -> 0;
+ Type -> size(Type)
+ end,
+ AttSize = fetch(att_len, Att),
+ Md5Size = case fetch(md5, Att) of
+ undefined -> 0;
+ Md5 -> size(Md5)
+ end,
+ NameSize + TypeSize + AttSize + Md5Size.
+
+
size_info([]) ->
{ok, []};
size_info(Atts) ->
Info = lists:map(fun(Att) ->
- AttLen = fetch(att_len, Att),
- case fetch(data, Att) of
- {stream, StreamEngine} ->
- {ok, SPos} = couch_stream:to_disk_term(StreamEngine),
- {SPos, AttLen};
- {_, SPos} ->
- {SPos, AttLen}
- end
+ [{loc, _Db, _DocId, AttId}, AttLen] = fetch([data, att_len], Att),
+ {AttId, AttLen}
end, Atts),
{ok, lists:usort(Info)}.
@@ -324,89 +206,41 @@ size_info(Atts) ->
%% old format when possible. This should help make the attachment lazy upgrade
%% as safe as possible, avoiding the need for complicated disk versioning
%% schemes.
-to_disk_term(#att{} = Att) ->
- {stream, StreamEngine} = fetch(data, Att),
- {ok, Sp} = couch_stream:to_disk_term(StreamEngine),
- {
+to_disk_term(Att) ->
+ {loc, #{}, _DocId, AttId} = fetch(data, Att),
+ {?CURRENT_ATT_FORMAT, {
fetch(name, Att),
fetch(type, Att),
- Sp,
+ AttId,
fetch(att_len, Att),
fetch(disk_len, Att),
fetch(revpos, Att),
fetch(md5, Att),
fetch(encoding, Att)
- };
-to_disk_term(Att) ->
- BaseProps = [name, type, data, att_len, disk_len, revpos, md5, encoding],
- {Extended, Base} = lists:foldl(
- fun
- (data, {Props, Values}) ->
- case lists:keytake(data, 1, Props) of
- {value, {_, {stream, StreamEngine}}, Other} ->
- {ok, Sp} = couch_stream:to_disk_term(StreamEngine),
- {Other, [Sp | Values]};
- {value, {_, Value}, Other} ->
- {Other, [Value | Values]};
- false ->
- {Props, [undefined | Values]}
- end;
- (Key, {Props, Values}) ->
- case lists:keytake(Key, 1, Props) of
- {value, {_, Value}, Other} -> {Other, [Value | Values]};
- false -> {Props, [undefined | Values]}
- end
- end,
- {Att, []},
- BaseProps
- ),
- {list_to_tuple(lists:reverse(Base)), Extended}.
-
-
-%% The new disk term format is a simple wrapper around the legacy format. Base
-%% properties will remain in a tuple while the new fields and possibly data from
-%% future extensions will be stored in a list of atom/value pairs. While this is
-%% slightly less efficient, future work should be able to make use of
-%% compression to remove these sorts of common bits (block level compression
-%% with something like a shared dictionary that is checkpointed every now and
-%% then).
-from_disk_term(StreamSrc, {Base, Extended})
- when is_tuple(Base), is_list(Extended) ->
- store(Extended, from_disk_term(StreamSrc, Base));
-from_disk_term(StreamSrc, {Name,Type,Sp,AttLen,DiskLen,RevPos,Md5,Enc}) ->
- {ok, Stream} = open_stream(StreamSrc, Sp),
- #att{
- name=Name,
- type=Type,
- att_len=AttLen,
- disk_len=DiskLen,
- md5=Md5,
- revpos=RevPos,
- data={stream, Stream},
- encoding=upgrade_encoding(Enc)
- };
-from_disk_term(StreamSrc, {Name,Type,Sp,AttLen,RevPos,Md5}) ->
- {ok, Stream} = open_stream(StreamSrc, Sp),
- #att{
- name=Name,
- type=Type,
- att_len=AttLen,
- disk_len=AttLen,
- md5=Md5,
- revpos=RevPos,
- data={stream, Stream}
- };
-from_disk_term(StreamSrc, {Name,{Type,Sp,AttLen}}) ->
- {ok, Stream} = open_stream(StreamSrc, Sp),
- #att{
- name=Name,
- type=Type,
- att_len=AttLen,
- disk_len=AttLen,
- md5= <<>>,
- revpos=0,
- data={stream, Stream}
- }.
+ }}.
+
+
+from_disk_term(#{} = Db, DocId, {?CURRENT_ATT_FORMAT, Props}) ->
+ {
+ Name,
+ Type,
+ AttId,
+ AttLen,
+ DiskLen,
+ RevPos,
+ Md5,
+ Encoding
+ } = Props,
+ new([
+ {name, Name},
+ {type, Type},
+ {data, {loc, Db#{tx := undefined}, DocId, AttId}},
+ {att_len, AttLen},
+ {disk_len, DiskLen},
+ {revpos, RevPos},
+ {md5, Md5},
+ {encoding, Encoding}
+ ]).
%% from_json reads in embedded JSON attachments and creates usable attachment
@@ -433,8 +267,12 @@ stub_from_json(Att, Props) ->
%% json object. See merge_stubs/3 for the stub check.
RevPos = couch_util:get_value(<<"revpos">>, Props),
store([
- {md5, Digest}, {revpos, RevPos}, {data, stub}, {disk_len, DiskLen},
- {att_len, EncodedLen}, {encoding, Encoding}
+ {data, stub},
+ {disk_len, DiskLen},
+ {att_len, EncodedLen},
+ {revpos, RevPos},
+ {md5, Digest},
+ {encoding, Encoding}
], Att).
@@ -443,8 +281,12 @@ follow_from_json(Att, Props) ->
Digest = digest_from_json(Props),
RevPos = couch_util:get_value(<<"revpos">>, Props, 0),
store([
- {md5, Digest}, {revpos, RevPos}, {data, follows}, {disk_len, DiskLen},
- {att_len, EncodedLen}, {encoding, Encoding}
+ {data, follows},
+ {disk_len, DiskLen},
+ {att_len, EncodedLen},
+ {revpos, RevPos},
+ {md5, Digest},
+ {encoding, Encoding}
], Att).
@@ -455,8 +297,10 @@ inline_from_json(Att, Props) ->
Length = size(Data),
RevPos = couch_util:get_value(<<"revpos">>, Props, 0),
store([
- {data, Data}, {revpos, RevPos}, {disk_len, Length},
- {att_len, Length}
+ {data, Data},
+ {disk_len, Length},
+ {att_len, Length},
+ {revpos, RevPos}
], Att)
catch
_:_ ->
@@ -466,7 +310,6 @@ inline_from_json(Att, Props) ->
end.
-
encoded_lengths_from_json(Props) ->
Len = couch_util:get_value(<<"length">>, Props),
case couch_util:get_value(<<"encoding">>, Props) of
@@ -488,9 +331,16 @@ digest_from_json(Props) ->
to_json(Att, OutputData, DataToFollow, ShowEncoding) ->
- [Name, Data, DiskLen, AttLen, Enc, Type, RevPos, Md5] = fetch(
- [name, data, disk_len, att_len, encoding, type, revpos, md5], Att
- ),
+ #{
+ name := Name,
+ type := Type,
+ data := Data,
+ disk_len := DiskLen,
+ att_len := AttLen,
+ revpos := RevPos,
+ md5 := Md5,
+ encoding := Encoding
+ } = Att,
Props = [
{<<"content_type">>, Type},
{<<"revpos">>, RevPos}
@@ -505,71 +355,75 @@ to_json(Att, OutputData, DataToFollow, ShowEncoding) ->
DataToFollow ->
[{<<"length">>, DiskLen}, {<<"follows">>, true}];
true ->
- AttData = case Enc of
+ AttData = case Encoding of
gzip -> zlib:gunzip(to_binary(Att));
identity -> to_binary(Att)
end,
[{<<"data">>, base64:encode(AttData)}]
end,
EncodingProps = if
- ShowEncoding andalso Enc /= identity ->
+ ShowEncoding andalso Encoding /= identity ->
[
- {<<"encoding">>, couch_util:to_binary(Enc)},
+ {<<"encoding">>, couch_util:to_binary(Encoding)},
{<<"encoded_length">>, AttLen}
];
true ->
[]
end,
- HeadersProp = case fetch(headers, Att) of
- undefined -> [];
- Headers -> [{<<"headers">>, Headers}]
+ {Name, {Props ++ DigestProp ++ DataProps ++ EncodingProps}}.
+
+
+flush(Db, DocId, Att1) ->
+ Att2 = read_data(fetch(data, Att1), Att1),
+ [
+ Data,
+ AttLen,
+ DiskLen,
+ ReqMd5,
+ Encoding
+ ] = fetch([data, att_len, disk_len, md5, encoding], Att2),
+
+ % Eventually, we'll check if we can compress this
+ % attachment here and do so if possible.
+
+ % If we were sent a gzip'ed attachment with no
+ % length data, we have to set it here.
+ Att3 = case DiskLen of
+ undefined when AttLen /= undefined ->
+ store(disk_len, AttLen, Att2);
+ undefined when is_binary(Data) ->
+ store(disk_len, size(Data), Att2);
+ _ ->
+ Att2
+ end,
+
+ % If no encoding has been set, default to
+ % identity
+ Att4 = case Encoding of
+ undefined -> store(encoding, identity, Att3);
+ _ -> Att3
end,
- {Name, {Props ++ DigestProp ++ DataProps ++ EncodingProps ++ HeadersProp}}.
+ case Data of
+ {loc, _, _, _} ->
+ % Already flushed
+ Att1;
+ _ when is_binary(Data) ->
+ DataMd5 = couch_hash:md5_hash(Data),
+ if ReqMd5 == undefined -> ok; true ->
+ couch_util:check_md5(DataMd5, ReqMd5)
+ end,
+ Att5 = store(md5, DataMd5, Att4),
+ Att6 = maybe_compress(Att5),
+ fabric2_db:write_attachment(Db, DocId, Att6)
+ end.
-flush(Db, Att) ->
- flush_data(Db, fetch(data, Att), Att).
+read_data({loc, #{}, _DocId, _AttId}, Att) ->
+ % Attachment already written to fdb
+ Att;
-flush_data(Db, Data, Att) when is_binary(Data) ->
- couch_db:with_stream(Db, Att, fun(OutputStream) ->
- couch_stream:write(OutputStream, Data)
- end);
-flush_data(Db, Fun, Att) when is_function(Fun) ->
- AttName = fetch(name, Att),
- MaxAttSize = max_attachment_size(),
- case fetch(att_len, Att) of
- undefined ->
- couch_db:with_stream(Db, Att, fun(OutputStream) ->
- % Fun(MaxChunkSize, WriterFun) must call WriterFun
- % once for each chunk of the attachment,
- Fun(4096,
- % WriterFun({Length, Binary}, State)
- % WriterFun({0, _Footers}, State)
- % Called with Length == 0 on the last time.
- % WriterFun returns NewState.
- fun({0, Footers}, _Total) ->
- F = mochiweb_headers:from_binary(Footers),
- case mochiweb_headers:get_value("Content-MD5", F) of
- undefined ->
- ok;
- Md5 ->
- {md5, base64:decode(Md5)}
- end;
- ({Length, Chunk}, Total0) ->
- Total = Total0 + Length,
- validate_attachment_size(AttName, Total, MaxAttSize),
- couch_stream:write(OutputStream, Chunk),
- Total
- end, 0)
- end);
- AttLen ->
- validate_attachment_size(AttName, AttLen, MaxAttSize),
- couch_db:with_stream(Db, Att, fun(OutputStream) ->
- write_streamed_attachment(OutputStream, Fun, AttLen)
- end)
- end;
-flush_data(Db, {follows, Parser, Ref}, Att) ->
+read_data({follows, Parser, Ref}, Att) ->
ParserRef = erlang:monitor(process, Parser),
Fun = fun() ->
Parser ! {get_bytes, Ref, self()},
@@ -583,41 +437,72 @@ flush_data(Db, {follows, Parser, Ref}, Att) ->
end
end,
try
- flush_data(Db, Fun, store(data, Fun, Att))
+ read_data(Fun, store(data, Fun, Att))
after
erlang:demonitor(ParserRef, [flush])
end;
-flush_data(Db, {stream, StreamEngine}, Att) ->
- case couch_db:is_active_stream(Db, StreamEngine) of
- true ->
- % Already written
- Att;
- false ->
- NewAtt = couch_db:with_stream(Db, Att, fun(OutputStream) ->
- couch_stream:copy(StreamEngine, OutputStream)
- end),
- InMd5 = fetch(md5, Att),
- OutMd5 = fetch(md5, NewAtt),
- couch_util:check_md5(OutMd5, InMd5),
- NewAtt
+
+read_data(Data, Att) when is_binary(Data) ->
+ Att;
+
+read_data(Fun, Att) when is_function(Fun) ->
+ [AttName, AttLen, InMd5] = fetch([name, att_len, md5], Att),
+ MaxAttSize = max_attachment_size(),
+ case AttLen of
+ undefined ->
+ % Fun(MaxChunkSize, WriterFun) must call WriterFun
+ % once for each chunk of the attachment,
+ WriterFun = fun
+ ({0, Footers}, {Len, Acc}) ->
+ F = mochiweb_headers:from_binary(Footers),
+ Md5 = case mochiweb_headers:get_value("Content-MD5", F) of
+ undefined -> undefined;
+ Value -> base64:decode(Value)
+ end,
+ Props0 = [
+ {data, iolist_to_binary(lists:reverse(Acc))},
+ {att_len, Len}
+ ],
+ Props1 = if InMd5 /= md5_in_footer -> Props0; true ->
+ [{md5, Md5} | Props0]
+ end,
+ store(Props1, Att);
+ ({ChunkLen, Chunk}, {Len, Acc}) ->
+ NewLen = Len + ChunkLen,
+ validate_attachment_size(AttName, NewLen, MaxAttSize),
+ {NewLen, [Chunk | Acc]}
+ end,
+ Fun(8192, WriterFun, {0, []});
+ AttLen ->
+ validate_attachment_size(AttName, AttLen, MaxAttSize),
+ read_streamed_attachment(Att, Fun, AttLen, [])
end.
-write_streamed_attachment(_Stream, _F, 0) ->
- ok;
-write_streamed_attachment(_Stream, _F, LenLeft) when LenLeft < 0 ->
+read_streamed_attachment(Att, _F, 0, Acc) ->
+ Bin = iolist_to_binary(lists:reverse(Acc)),
+ store([
+ {data, Bin},
+ {att_len, size(Bin)}
+ ], Att);
+
+read_streamed_attachment(_Att, _F, LenLeft, _Acc) when LenLeft < 0 ->
throw({bad_request, <<"attachment longer than expected">>});
-write_streamed_attachment(Stream, F, LenLeft) when LenLeft > 0 ->
- Bin = try read_next_chunk(F, LenLeft)
+
+read_streamed_attachment(Att, F, LenLeft, Acc) when LenLeft > 0 ->
+ Bin = try
+ read_next_chunk(F, LenLeft)
catch
{mp_parser_died, normal} ->
throw({bad_request, <<"attachment shorter than expected">>})
end,
- ok = couch_stream:write(Stream, Bin),
- write_streamed_attachment(Stream, F, LenLeft - iolist_size(Bin)).
+ Size = iolist_size(Bin),
+ read_streamed_attachment(Att, F, LenLeft - Size, [Bin | Acc]).
+
read_next_chunk(F, _) when is_function(F, 0) ->
F();
+
read_next_chunk(F, LenLeft) when is_function(F, 1) ->
F(lists:min([LenLeft, 16#2000])).
@@ -626,14 +511,17 @@ foldl(Att, Fun, Acc) ->
foldl(fetch(data, Att), Att, Fun, Acc).
+foldl({loc, Db, DocId, AttId}, _Att, Fun, Acc) ->
+ Bin = fabric2_db:read_attachment(Db#{tx := undefined}, DocId, AttId),
+ Fun(Bin, Acc);
+
foldl(Bin, _Att, Fun, Acc) when is_binary(Bin) ->
Fun(Bin, Acc);
-foldl({stream, StreamEngine}, Att, Fun, Acc) ->
- Md5 = fetch(md5, Att),
- couch_stream:foldl(StreamEngine, Md5, Fun, Acc);
+
foldl(DataFun, Att, Fun, Acc) when is_function(DataFun) ->
Len = fetch(att_len, Att),
fold_streamed_data(DataFun, Len, Fun, Acc);
+
foldl({follows, Parser, Ref}, Att, Fun, Acc) ->
ParserRef = erlang:monitor(process, Parser),
DataFun = fun() ->
@@ -654,18 +542,40 @@ foldl({follows, Parser, Ref}, Att, Fun, Acc) ->
end.
+range_foldl(Bin1, From, To, Fun, Acc) when is_binary(Bin1) ->
+ ReadLen = To - From,
+ Bin2 = case Bin1 of
+ _ when size(Bin1) < From -> <<>>;
+ <<_:From/binary, B2/binary>> -> B2
+ end,
+ Bin3 = case Bin2 of
+ _ when size(Bin2) < ReadLen -> Bin2;
+ <<B3:ReadLen/binary, _/binary>> -> B3
+ end,
+ Fun(Bin3, Acc);
+
range_foldl(Att, From, To, Fun, Acc) ->
- {stream, StreamEngine} = fetch(data, Att),
- couch_stream:range_foldl(StreamEngine, From, To, Fun, Acc).
+ {loc, Db, DocId, AttId} = fetch(data, Att),
+ Bin = fabric2_db:read_attachment(Db, DocId, AttId),
+ range_foldl(Bin, From, To, Fun, Acc).
foldl_decode(Att, Fun, Acc) ->
- case fetch([data, encoding], Att) of
- [{stream, StreamEngine}, Enc] ->
- couch_stream:foldl_decode(
- StreamEngine, fetch(md5, Att), Enc, Fun, Acc);
- [Fun2, identity] ->
- fold_streamed_data(Fun2, fetch(att_len, Att), Fun, Acc)
+ [Encoding, Data] = fetch([encoding, data], Att),
+ case {Encoding, Data} of
+ {gzip, {loc, Db, DocId, AttId}} ->
+ NoTxDb = Db#{tx := undefined},
+ Bin = fabric2_db:read_attachment(NoTxDb, DocId, AttId),
+ foldl_decode(store(data, Bin, Att), Fun, Acc);
+ {gzip, _} when is_binary(Data) ->
+ Z = zlib:open(),
+ ok = zlib:inflateInit(Z, 16 + 15),
+ Inflated = iolist_to_binary(zlib:inflate(Z, Data)),
+ ok = zlib:inflateEnd(Z),
+ ok = zlib:close(Z),
+ foldl(Inflated, Att, Fun, Acc);
+ _ ->
+ foldl(Att, Fun, Acc)
end.
@@ -677,10 +587,9 @@ to_binary(Bin, _Att) when is_binary(Bin) ->
Bin;
to_binary(Iolist, _Att) when is_list(Iolist) ->
iolist_to_binary(Iolist);
-to_binary({stream, _StreamEngine}, Att) ->
- iolist_to_binary(
- lists:reverse(foldl(Att, fun(Bin,Acc) -> [Bin|Acc] end, []))
- );
+to_binary({loc, Db, DocId, AttId}, _Att) ->
+ NoTxDb = Db#{tx := undefined},
+ fabric2_db:read_attachment(NoTxDb, DocId, AttId);
to_binary(DataFun, Att) when is_function(DataFun)->
Len = fetch(att_len, Att),
iolist_to_binary(
@@ -695,46 +604,60 @@ to_binary(DataFun, Att) when is_function(DataFun)->
fold_streamed_data(_RcvFun, 0, _Fun, Acc) ->
Acc;
+
fold_streamed_data(RcvFun, LenLeft, Fun, Acc) when LenLeft > 0->
Bin = RcvFun(),
ResultAcc = Fun(Bin, Acc),
fold_streamed_data(RcvFun, LenLeft - size(Bin), Fun, ResultAcc).
-%% Upgrade an attachment record to a property list on demand. This is a one-way
-%% operation as downgrading potentially truncates fields with important data.
--spec upgrade(#att{}) -> attachment().
-upgrade(#att{} = Att) ->
- Map = lists:zip(
- record_info(fields, att),
- lists:seq(2, record_info(size, att))
- ),
- %% Don't store undefined elements since that is default
- [{F, element(I, Att)} || {F, I} <- Map, element(I, Att) /= undefined];
-upgrade(Att) ->
- Att.
+maybe_compress(Att) ->
+ [Encoding, Type] = fetch([encoding, type], Att),
+ IsCompressible = is_compressible(Type),
+ CompLevel = config:get_integer("attachments", "compression_level", 0),
+ case Encoding of
+ identity when IsCompressible, CompLevel >= 1, CompLevel =< 9 ->
+ compress(Att, CompLevel);
+ _ ->
+ Att
+ end.
-%% Downgrade is exposed for interactive convenience. In practice, unless done
-%% manually, upgrades are always one-way.
-downgrade(#att{} = Att) ->
- Att;
-downgrade(Att) ->
- #att{
- name = fetch(name, Att),
- type = fetch(type, Att),
- att_len = fetch(att_len, Att),
- disk_len = fetch(disk_len, Att),
- md5 = fetch(md5, Att),
- revpos = fetch(revpos, Att),
- data = fetch(data, Att),
- encoding = fetch(encoding, Att)
- }.
+compress(Att, Level) ->
+ Data = fetch(data, Att),
+
+ Z = zlib:open(),
+ % 15 = ?MAX_WBITS (defined in the zlib module)
+ % the 16 + ?MAX_WBITS formula was obtained by inspecting zlib:gzip/1
+ ok = zlib:deflateInit(Z, Level, deflated, 16 + 15, 8, default),
+ CompData = iolist_to_binary(zlib:deflate(Z, Data, finish)),
+ ok = zlib:deflateEnd(Z),
+ ok = zlib:close(Z),
+
+ store([
+ {att_len, size(CompData)},
+ {md5, couch_hash:md5_hash(CompData)},
+ {data, CompData},
+ {encoding, gzip}
+ ], Att).
-upgrade_encoding(true) -> gzip;
-upgrade_encoding(false) -> identity;
-upgrade_encoding(Encoding) -> Encoding.
+is_compressible(Type) when is_binary(Type) ->
+ is_compressible(binary_to_list(Type));
+is_compressible(Type) ->
+ TypeExpList = re:split(
+ config:get("attachments", "compressible_types", ""),
+ "\\s*,\\s*",
+ [{return, list}]
+ ),
+ lists:any(
+ fun(TypeExp) ->
+ Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"),
+ "(?:\\s*;.*?)?\\s*", $$],
+ re:run(Type, Regexp, [caseless]) =/= nomatch
+ end,
+ [T || T <- TypeExpList, T /= []]
+ ).
max_attachment_size() ->
@@ -753,204 +676,190 @@ validate_attachment_size(_AttName, _AttSize, _MAxAttSize) ->
ok.
-open_stream(StreamSrc, Data) ->
- case couch_db:is_db(StreamSrc) of
- true ->
- couch_db:open_read_stream(StreamSrc, Data);
- false ->
- case is_function(StreamSrc, 1) of
- true ->
- StreamSrc(Data);
- false ->
- erlang:error({invalid_stream_source, StreamSrc})
- end
- end.
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-% Eww...
--include("couch_bt_engine.hrl").
-
-%% Test utilities
-
-
-empty_att() -> new().
-
-
-upgraded_empty_att() ->
- new([{headers, undefined}]).
-
-
-%% Test groups
-
-
-attachment_upgrade_test_() ->
- {"Lazy record upgrade tests", [
- {"Existing record fields don't upgrade",
- {with, empty_att(), [fun test_non_upgrading_fields/1]}
- },
- {"New fields upgrade",
- {with, empty_att(), [fun test_upgrading_fields/1]}
- }
- ]}.
-
-
-attachment_defaults_test_() ->
- {"Attachment defaults tests", [
- {"Records retain old default values", [
- {with, empty_att(), [fun test_legacy_defaults/1]}
- ]},
- {"Upgraded records inherit defaults", [
- {with, upgraded_empty_att(), [fun test_legacy_defaults/1]}
- ]},
- {"Undefined entries are elided on upgrade", [
- {with, upgraded_empty_att(), [fun test_elided_entries/1]}
- ]}
- ]}.
-
-attachment_field_api_test_() ->
- {"Basic attachment field api", [
- fun test_construction/0,
- fun test_store_and_fetch/0,
- fun test_transform/0
- ]}.
-
-
-attachment_disk_term_test_() ->
- BaseAttachment = new([
- {name, <<"empty">>},
- {type, <<"application/octet-stream">>},
- {att_len, 0},
- {disk_len, 0},
- {md5, <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>},
- {revpos, 4},
- {data, {stream, {couch_bt_engine_stream, {fake_fd, fake_sp}}}},
- {encoding, identity}
- ]),
- BaseDiskTerm = {
- <<"empty">>,
- <<"application/octet-stream">>,
- fake_sp,
- 0, 0, 4,
- <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>,
- identity
- },
- Headers = [{<<"X-Foo">>, <<"bar">>}],
- ExtendedAttachment = store(headers, Headers, BaseAttachment),
- ExtendedDiskTerm = {BaseDiskTerm, [{headers, Headers}]},
- FakeDb = test_util:fake_db([{engine, {couch_bt_engine, #st{fd=fake_fd}}}]),
- {"Disk term tests", [
- ?_assertEqual(BaseDiskTerm, to_disk_term(BaseAttachment)),
- ?_assertEqual(BaseAttachment, from_disk_term(FakeDb, BaseDiskTerm)),
- ?_assertEqual(ExtendedDiskTerm, to_disk_term(ExtendedAttachment)),
- ?_assertEqual(ExtendedAttachment, from_disk_term(FakeDb, ExtendedDiskTerm))
- ]}.
-
-
-attachment_json_term_test_() ->
- Props = [
- {<<"content_type">>, <<"application/json">>},
- {<<"digest">>, <<"md5-QCNtWUNXV0UzJnEjMk92YUk1JA==">>},
- {<<"length">>, 14},
- {<<"revpos">>, 1}
- ],
- PropsInline = [{<<"data">>, <<"eyJhbnN3ZXIiOiA0Mn0=">>}] ++ Props,
- InvalidProps = [{<<"data">>, <<"!Base64Encoded$">>}] ++ Props,
- Att = couch_att:new([
- {name, <<"attachment.json">>},
- {type, <<"application/json">>}
- ]),
- ResultStub = couch_att:new([
- {name, <<"attachment.json">>},
- {type, <<"application/json">>},
- {att_len, 14},
- {disk_len, 14},
- {md5, <<"@#mYCWWE3&q#2OvaI5$">>},
- {revpos, 1},
- {data, stub},
- {encoding, identity}
- ]),
- ResultFollows = ResultStub#att{data = follows},
- ResultInline = ResultStub#att{md5 = <<>>, data = <<"{\"answer\": 42}">>},
- {"JSON term tests", [
- ?_assertEqual(ResultStub, stub_from_json(Att, Props)),
- ?_assertEqual(ResultFollows, follow_from_json(Att, Props)),
- ?_assertEqual(ResultInline, inline_from_json(Att, PropsInline)),
- ?_assertThrow({bad_request, _}, inline_from_json(Att, Props)),
- ?_assertThrow({bad_request, _}, inline_from_json(Att, InvalidProps))
- ]}.
-
-
-attachment_stub_merge_test_() ->
- %% Stub merging needs to demonstrate revpos matching, skipping, and missing
- %% attachment errors.
- {"Attachment stub merging tests", []}.
-
-
-%% Test generators
-
-
-test_non_upgrading_fields(Attachment) ->
- Pairs = [
- {name, "cat.gif"},
- {type, "text/very-very-plain"},
- {att_len, 1024},
- {disk_len, 42},
- {md5, <<"md5-hashhashhash">>},
- {revpos, 4},
- {data, stub},
- {encoding, gzip}
- ],
- lists:foreach(
- fun({Field, Value}) ->
- ?assertMatch(#att{}, Attachment),
- Updated = store(Field, Value, Attachment),
- ?assertMatch(#att{}, Updated)
- end,
- Pairs).
-
-
-test_upgrading_fields(Attachment) ->
- ?assertMatch(#att{}, Attachment),
- UpdatedHeaders = store(headers, [{<<"Ans">>, <<"42">>}], Attachment),
- ?assertMatch(X when is_list(X), UpdatedHeaders),
- UpdatedHeadersUndefined = store(headers, undefined, Attachment),
- ?assertMatch(X when is_list(X), UpdatedHeadersUndefined).
-
-
-test_legacy_defaults(Attachment) ->
- ?assertEqual(<<>>, fetch(md5, Attachment)),
- ?assertEqual(0, fetch(revpos, Attachment)),
- ?assertEqual(identity, fetch(encoding, Attachment)).
-
-
-test_elided_entries(Attachment) ->
- ?assertNot(lists:keymember(name, 1, Attachment)),
- ?assertNot(lists:keymember(type, 1, Attachment)),
- ?assertNot(lists:keymember(att_len, 1, Attachment)),
- ?assertNot(lists:keymember(disk_len, 1, Attachment)),
- ?assertNot(lists:keymember(data, 1, Attachment)).
-
-
-test_construction() ->
- ?assert(new() == new()),
- Initialized = new([{name, <<"foo.bar">>}, {type, <<"application/qux">>}]),
- ?assertEqual(<<"foo.bar">>, fetch(name, Initialized)),
- ?assertEqual(<<"application/qux">>, fetch(type, Initialized)).
-
-
-test_store_and_fetch() ->
- Attachment = empty_att(),
- ?assertEqual(<<"abc">>, fetch(name, store(name, <<"abc">>, Attachment))),
- ?assertEqual(42, fetch(ans, store(ans, 42, Attachment))).
-
-
-test_transform() ->
- Attachment = new([{counter, 0}]),
- Transformed = transform(counter, fun(Count) -> Count + 1 end, Attachment),
- ?assertEqual(1, fetch(counter, Transformed)).
-
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% % Eww...
+%% -include("couch_bt_engine.hrl").
+%%
+%% %% Test utilities
+%%
+%%
+%% empty_att() -> new().
+%%
+%%
+%% upgraded_empty_att() ->
+%% new([{headers, undefined}]).
+%%
+%%
+%% %% Test groups
+%%
+%%
+%% attachment_upgrade_test_() ->
+%% {"Lazy record upgrade tests", [
+%% {"Existing record fields don't upgrade",
+%% {with, empty_att(), [fun test_non_upgrading_fields/1]}
+%% },
+%% {"New fields upgrade",
+%% {with, empty_att(), [fun test_upgrading_fields/1]}
+%% }
+%% ]}.
+%%
+%%
+%% attachment_defaults_test_() ->
+%% {"Attachment defaults tests", [
+%% {"Records retain old default values", [
+%% {with, empty_att(), [fun test_legacy_defaults/1]}
+%% ]},
+%% {"Upgraded records inherit defaults", [
+%% {with, upgraded_empty_att(), [fun test_legacy_defaults/1]}
+%% ]},
+%% {"Undefined entries are elided on upgrade", [
+%% {with, upgraded_empty_att(), [fun test_elided_entries/1]}
+%% ]}
+%% ]}.
+%%
+%% attachment_field_api_test_() ->
+%% {"Basic attachment field api", [
+%% fun test_construction/0,
+%% fun test_store_and_fetch/0,
+%% fun test_transform/0
+%% ]}.
+%%
+%%
+%% attachment_disk_term_test_() ->
+%% BaseAttachment = new([
+%% {name, <<"empty">>},
+%% {type, <<"application/octet-stream">>},
+%% {att_len, 0},
+%% {disk_len, 0},
+%% {md5, <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>},
+%% {revpos, 4},
+%% {data, {stream, {couch_bt_engine_stream, {fake_fd, fake_sp}}}},
+%% {encoding, identity}
+%% ]),
+%% BaseDiskTerm = {
+%% <<"empty">>,
+%% <<"application/octet-stream">>,
+%% fake_sp,
+%% 0, 0, 4,
+%% <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>,
+%% identity
+%% },
+%% Headers = [{<<"X-Foo">>, <<"bar">>}],
+%% ExtendedAttachment = store(headers, Headers, BaseAttachment),
+%% ExtendedDiskTerm = {BaseDiskTerm, [{headers, Headers}]},
+%% FakeDb = test_util:fake_db([{engine, {couch_bt_engine, #st{fd=fake_fd}}}]),
+%% {"Disk term tests", [
+%% ?_assertEqual(BaseDiskTerm, to_disk_term(BaseAttachment)),
+%% ?_assertEqual(BaseAttachment, from_disk_term(FakeDb, BaseDiskTerm)),
+%% ?_assertEqual(ExtendedDiskTerm, to_disk_term(ExtendedAttachment)),
+%% ?_assertEqual(ExtendedAttachment, from_disk_term(FakeDb, ExtendedDiskTerm))
+%% ]}.
+%%
+%%
+%% attachment_json_term_test_() ->
+%% Props = [
+%% {<<"content_type">>, <<"application/json">>},
+%% {<<"digest">>, <<"md5-QCNtWUNXV0UzJnEjMk92YUk1JA==">>},
+%% {<<"length">>, 14},
+%% {<<"revpos">>, 1}
+%% ],
+%% PropsInline = [{<<"data">>, <<"eyJhbnN3ZXIiOiA0Mn0=">>}] ++ Props,
+%% InvalidProps = [{<<"data">>, <<"!Base64Encoded$">>}] ++ Props,
+%% Att = couch_att:new([
+%% {name, <<"attachment.json">>},
+%% {type, <<"application/json">>}
+%% ]),
+%% ResultStub = couch_att:new([
+%% {name, <<"attachment.json">>},
+%% {type, <<"application/json">>},
+%% {att_len, 14},
+%% {disk_len, 14},
+%% {md5, <<"@#mYCWWE3&q#2OvaI5$">>},
+%% {revpos, 1},
+%% {data, stub},
+%% {encoding, identity}
+%% ]),
+%% ResultFollows = ResultStub#att{data = follows},
+%% ResultInline = ResultStub#att{md5 = <<>>, data = <<"{\"answer\": 42}">>},
+%% {"JSON term tests", [
+%% ?_assertEqual(ResultStub, stub_from_json(Att, Props)),
+%% ?_assertEqual(ResultFollows, follow_from_json(Att, Props)),
+%% ?_assertEqual(ResultInline, inline_from_json(Att, PropsInline)),
+%% ?_assertThrow({bad_request, _}, inline_from_json(Att, Props)),
+%% ?_assertThrow({bad_request, _}, inline_from_json(Att, InvalidProps))
+%% ]}.
+%%
+%%
+%% attachment_stub_merge_test_() ->
+%% %% Stub merging needs to demonstrate revpos matching, skipping, and missing
+%% %% attachment errors.
+%% {"Attachment stub merging tests", []}.
+%%
+%%
+%% %% Test generators
+%%
+%%
+%% test_non_upgrading_fields(Attachment) ->
+%% Pairs = [
+%% {name, "cat.gif"},
+%% {type, "text/very-very-plain"},
+%% {att_len, 1024},
+%% {disk_len, 42},
+%% {md5, <<"md5-hashhashhash">>},
+%% {revpos, 4},
+%% {data, stub},
+%% {encoding, gzip}
+%% ],
+%% lists:foreach(
+%% fun({Field, Value}) ->
+%% ?assertMatch(#att{}, Attachment),
+%% Updated = store(Field, Value, Attachment),
+%% ?assertMatch(#att{}, Updated)
+%% end,
+%% Pairs).
+%%
+%%
+%% test_upgrading_fields(Attachment) ->
+%% ?assertMatch(#att{}, Attachment),
+%% UpdatedHeaders = store(headers, [{<<"Ans">>, <<"42">>}], Attachment),
+%% ?assertMatch(X when is_list(X), UpdatedHeaders),
+%% UpdatedHeadersUndefined = store(headers, undefined, Attachment),
+%% ?assertMatch(X when is_list(X), UpdatedHeadersUndefined).
+%%
+%%
+%% test_legacy_defaults(Attachment) ->
+%% ?assertEqual(<<>>, fetch(md5, Attachment)),
+%% ?assertEqual(0, fetch(revpos, Attachment)),
+%% ?assertEqual(identity, fetch(encoding, Attachment)).
+%%
+%%
+%% test_elided_entries(Attachment) ->
+%% ?assertNot(lists:keymember(name, 1, Attachment)),
+%% ?assertNot(lists:keymember(type, 1, Attachment)),
+%% ?assertNot(lists:keymember(att_len, 1, Attachment)),
+%% ?assertNot(lists:keymember(disk_len, 1, Attachment)),
+%% ?assertNot(lists:keymember(data, 1, Attachment)).
+%%
+%%
+%% test_construction() ->
+%% ?assert(new() == new()),
+%% Initialized = new([{name, <<"foo.bar">>}, {type, <<"application/qux">>}]),
+%% ?assertEqual(<<"foo.bar">>, fetch(name, Initialized)),
+%% ?assertEqual(<<"application/qux">>, fetch(type, Initialized)).
+%%
+%%
+%% test_store_and_fetch() ->
+%% Attachment = empty_att(),
+%% ?assertEqual(<<"abc">>, fetch(name, store(name, <<"abc">>, Attachment))),
+%% ?assertEqual(42, fetch(ans, store(ans, 42, Attachment))).
+%%
+%%
+%% test_transform() ->
+%% Attachment = new([{counter, 0}]),
+%% Transformed = transform(counter, fun(Count) -> Count + 1 end, Attachment),
+%% ?assertEqual(1, fetch(counter, Transformed)).
+%%
+%%
+%% -endif.
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
index e1d726dc9..8a7b2181e 100644
--- a/src/couch/src/couch_db.erl
+++ b/src/couch/src/couch_db.erl
@@ -212,6 +212,8 @@ is_db(_) ->
is_system_db(#db{options = Options}) ->
lists:member(sys_db, Options).
+is_clustered(#{}) ->
+ true;
is_clustered(#db{main_pid = nil}) ->
true;
is_clustered(#db{}) ->
diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl
index 33ad14f0b..7224921d4 100644
--- a/src/couch/src/couch_doc.erl
+++ b/src/couch/src/couch_doc.erl
@@ -379,6 +379,17 @@ rev_info({#doc{} = Doc, {Pos, [RevId | _]}}) ->
body_sp = undefined,
seq = undefined,
rev = {Pos, RevId}
+ };
+rev_info({#{} = RevInfo, {Pos, [RevId | _]}}) ->
+ #{
+ deleted := Deleted,
+ sequence := Sequence
+ } = RevInfo,
+ #rev_info{
+ deleted = Deleted,
+ body_sp = undefined,
+ seq = fabric2_fdb:vs_to_seq(Sequence),
+ rev = {Pos, RevId}
}.
is_deleted(#full_doc_info{rev_tree=Tree}) ->
diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl
index 45a82bd0f..e81cf040e 100644
--- a/src/couch/src/couch_httpd_auth.erl
+++ b/src/couch/src/couch_httpd_auth.erl
@@ -393,11 +393,12 @@ handle_session_req(#httpd{method='GET', user_ctx=UserCtx}=Req, _AuthModule) ->
{roles, UserCtx#user_ctx.roles}
]}},
{info, {[
- {authentication_db, ?l2b(config:get("couch_httpd_auth", "authentication_db"))},
{authentication_handlers, [
N || {N, _Fun} <- Req#httpd.authentication_handlers]}
] ++ maybe_value(authenticated, UserCtx#user_ctx.handler, fun(Handler) ->
Handler
+ end) ++ maybe_value(authentication_db, config:get("chttpd_auth", "authentication_db"), fun(Val) ->
+ ?l2b(Val)
end)}}
]})
end;
diff --git a/src/couch/src/couch_lru.erl b/src/couch/src/couch_lru.erl
index 6ad7c65cd..a3057136f 100644
--- a/src/couch/src/couch_lru.erl
+++ b/src/couch/src/couch_lru.erl
@@ -11,13 +11,16 @@
% the License.
-module(couch_lru).
--export([new/0, insert/2, update/2, close/1]).
+-export([new/0, sizes/1, insert/2, update/2, close/1]).
-include("couch_server_int.hrl").
new() ->
{gb_trees:empty(), dict:new()}.
+sizes({Tree, Dict}) ->
+ {gb_trees:size(Tree), dict:size(Dict)}.
+
insert(DbName, {Tree0, Dict0}) ->
Lru = couch_util:unique_monotonic_integer(),
{gb_trees:insert(Lru, DbName, Tree0), dict:store(DbName, Lru, Dict0)}.
diff --git a/src/couch/src/couch_multidb_changes.erl b/src/couch/src/couch_multidb_changes.erl
index e2bbda3e3..09278656e 100644
--- a/src/couch/src/couch_multidb_changes.erl
+++ b/src/couch/src/couch_multidb_changes.erl
@@ -24,7 +24,8 @@
handle_call/3,
handle_info/2,
handle_cast/2,
- code_change/3
+ code_change/3,
+ format_status/2
]).
-export([
@@ -174,6 +175,17 @@ code_change(_OldVsn, State, _Extra) ->
{ok, State}.
+format_status(_Opt, [_PDict, State]) ->
+ #state{
+ pids=Pids
+ } = State,
+ Scrubbed = State#state{
+ pids={length, length(Pids)}
+ },
+ [{data, [{"State",
+ ?record_to_keyval(state, Scrubbed)
+ }]}].
+
% Private functions
-spec register_with_event_server(pid()) -> reference().
diff --git a/src/couch/src/couch_native_process.erl b/src/couch/src/couch_native_process.erl
index eee8b2860..0a228d4c5 100644
--- a/src/couch/src/couch_native_process.erl
+++ b/src/couch/src/couch_native_process.erl
@@ -42,7 +42,7 @@
-vsn(1).
-export([start_link/0,init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,
- handle_info/2]).
+ handle_info/2,format_status/2]).
-export([set_timeout/2, prompt/2]).
-define(STATE, native_proc_state).
@@ -125,6 +125,21 @@ handle_info({'EXIT',_,Reason}, State) ->
terminate(_Reason, _State) -> ok.
code_change(_OldVersion, State, _Extra) -> {ok, State}.
+format_status(_Opt, [_PDict, State]) ->
+ #evstate{
+ ddocs = DDocs,
+ funs = Functions,
+ query_config = QueryConfig
+ } = State,
+ Scrubbed = State#evstate{
+ ddocs = {dict_size, dict:size(DDocs)},
+ funs = {length, length(Functions)},
+ query_config = {length, length(QueryConfig)}
+ },
+ [{data, [{"State",
+ ?record_to_keyval(evstate, Scrubbed)
+ }]}].
+
run(#evstate{list_pid=Pid}=State, [<<"list_row">>, Row]) when is_pid(Pid) ->
Pid ! {self(), list_row, Row},
receive
diff --git a/src/couch/src/couch_proc_manager.erl b/src/couch/src/couch_proc_manager.erl
index 0daef3ee9..b83d78882 100644
--- a/src/couch/src/couch_proc_manager.erl
+++ b/src/couch/src/couch_proc_manager.erl
@@ -31,7 +31,8 @@
handle_call/3,
handle_cast/2,
handle_info/2,
- code_change/3
+ code_change/3,
+ format_status/2
]).
-export([
@@ -109,7 +110,6 @@ init([]) ->
ets:new(?SERVERS, [public, named_table, set]),
ets:insert(?SERVERS, get_servers_from_env("COUCHDB_QUERY_SERVER_")),
ets:insert(?SERVERS, get_servers_from_env("COUCHDB_NATIVE_QUERY_SERVER_")),
- ets:insert(?SERVERS, [{"QUERY", {mango_native_proc, start_link, []}}]),
maybe_configure_erlang_native_servers(),
{ok, #state{
@@ -269,6 +269,19 @@ handle_info(_Msg, State) ->
code_change(_OldVsn, #state{}=State, _Extra) ->
{ok, State}.
+
+format_status(_Opt, [_PDict, State]) ->
+ #state{
+ counts=Counts
+ } = State,
+ Scrubbed = State#state{
+ counts={dict_size, dict:size(Counts)}
+ },
+ [{data, [{"State",
+ ?record_to_keyval(state, Scrubbed)
+ }]}].
+
+
handle_config_terminate(_, stop, _) ->
ok;
handle_config_terminate(_Server, _Reason, _State) ->
diff --git a/src/couch/src/couch_server.erl b/src/couch/src/couch_server.erl
index 6db3f7448..42eab738c 100644
--- a/src/couch/src/couch_server.erl
+++ b/src/couch/src/couch_server.erl
@@ -18,7 +18,7 @@
-export([open/2,create/2,delete/2,get_version/0,get_version/1,get_git_sha/0,get_uuid/0]).
-export([all_databases/0, all_databases/2]).
-export([init/1, handle_call/3,sup_start_link/0]).
--export([handle_cast/2,code_change/3,handle_info/2,terminate/2]).
+-export([handle_cast/2,code_change/3,handle_info/2,terminate/2,format_status/2]).
-export([dev_start/0,is_admin/2,has_admins/0,get_stats/0]).
-export([close_lru/0]).
-export([close_db_if_idle/1]).
@@ -237,12 +237,6 @@ init([]) ->
couch_util:set_mqd_off_heap(?MODULE),
couch_util:set_process_priority(?MODULE, high),
- % Mark pluggable storage engines as a supported feature
- config:enable_feature('pluggable-storage-engines'),
-
- % Mark partitioned databases as a supported feature
- config:enable_feature(partitioned),
-
% Mark being able to receive documents with an _access property as a supported feature
config:enable_feature('access-ready'),
@@ -304,6 +298,10 @@ terminate(Reason, Srv) ->
end, nil, couch_dbs),
ok.
+format_status(_Opt, [_PDict, Srv]) ->
+ Scrubbed = Srv#server{lru=couch_lru:sizes(Srv#server.lru)},
+ [{data, [{"State", ?record_to_keyval(server, Scrubbed)}]}].
+
handle_config_change("couchdb", "database_dir", _, _, _) ->
exit(whereis(couch_server), config_change),
remove_handler;
diff --git a/src/couch/src/couch_stream.erl b/src/couch/src/couch_stream.erl
index 2ab46d7e7..d8b7e0ffe 100644
--- a/src/couch/src/couch_stream.erl
+++ b/src/couch/src/couch_stream.erl
@@ -36,7 +36,8 @@
handle_call/3,
handle_cast/2,
handle_info/2,
- code_change/3
+ code_change/3,
+ format_status/2
]).
@@ -294,6 +295,19 @@ handle_info(_Info, State) ->
{noreply, State}.
+format_status(_Opt, [_PDict, Stream]) ->
+ #stream{
+ written_pointers=Pointers,
+ buffer_list = Buffer
+ } = Stream,
+ Scrubbed = Stream#stream{
+ written_pointers={length, length(Pointers)},
+ buffer_list = {length, length(Buffer)}
+ },
+ [{data, [{"State",
+ ?record_to_keyval(stream, Scrubbed)
+ }]}].
+
do_seek({Engine, EngineState}, Offset) ->
{ok, NewState} = Engine:seek(EngineState, Offset),
{Engine, NewState}.
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
index dffb68152..fc66f36f4 100644
--- a/src/couch/src/couch_util.erl
+++ b/src/couch/src/couch_util.erl
@@ -14,7 +14,7 @@
-export([priv_dir/0, normpath/1, fold_files/5]).
-export([should_flush/0, should_flush/1, to_existing_atom/1]).
--export([rand32/0, implode/2, collate/2, collate/3]).
+-export([rand32/0, implode/2, collate/2, collate/3, get_sort_key/1]).
-export([abs_pathname/1,abs_pathname/2, trim/1, drop_dot_couch_ext/1]).
-export([encodeBase64Url/1, decodeBase64Url/1]).
-export([validate_utf8/1, to_hex/1, parse_term/1, dict_find/3]).
@@ -47,15 +47,17 @@
-define(FLUSH_MAX_MEM, 10000000).
-define(BLACKLIST_CONFIG_SECTIONS, [
- <<"daemons">>,
- <<"external">>,
- <<"httpd_design_handlers">>,
- <<"httpd_db_handlers">>,
- <<"httpd_global_handlers">>,
- <<"native_query_servers">>,
- <<"os_daemons">>,
- <<"query_servers">>,
- <<"feature_flags">>
+ <<"^daemons$">>,
+ <<"^external$">>,
+ <<"^httpd_design_handlers$">>,
+ <<"^httpd_db_handlers$">>,
+ <<"^httpd_global_handlers$">>,
+ <<"^native_query_servers$">>,
+ <<"^os_daemons$">>,
+ <<"^query_servers$">>,
+ <<"^feature_flags$">>,
+ <<"^tracing\..*$">>,
+ <<"^tracing$">>
]).
@@ -407,11 +409,20 @@ collate(A, B, Options) when is_binary(A), is_binary(B) ->
SizeA = byte_size(A),
SizeB = byte_size(B),
Bin = <<SizeA:32/native, A/binary, SizeB:32/native, B/binary>>,
- [Result] = erlang:port_control(drv_port(), Operation, Bin),
+ <<Result>> = erlang:port_control(drv_port(), Operation, Bin),
% Result is 0 for lt, 1 for eq and 2 for gt. Subtract 1 to return the
% expected typical -1, 0, 1
Result - 1.
+get_sort_key(Str) when is_binary(Str) ->
+ Operation = 2, % get_sort_key
+ Size = byte_size(Str),
+ Bin = <<Size:32/native, Str/binary>>,
+ case erlang:port_control(drv_port(), Operation, Bin) of
+ <<>> -> error;
+ Res -> Res
+ end.
+
should_flush() ->
should_flush(?FLUSH_MAX_MEM).
@@ -756,10 +767,13 @@ unique_monotonic_integer() ->
check_config_blacklist(Section) ->
- case lists:member(Section, ?BLACKLIST_CONFIG_SECTIONS) of
- true ->
- Msg = <<"Config section blacklisted for modification over HTTP API.">>,
- throw({forbidden, Msg});
- _ ->
- ok
- end.
+ lists:foreach(fun(RegExp) ->
+ case re:run(Section, RegExp) of
+ nomatch ->
+ ok;
+ _ ->
+ Msg = <<"Config section blacklisted for modification over HTTP API.">>,
+ throw({forbidden, Msg})
+ end
+ end, ?BLACKLIST_CONFIG_SECTIONS),
+ ok.
diff --git a/src/couch/src/couch_work_queue.erl b/src/couch/src/couch_work_queue.erl
index 5d747de82..01271bb35 100644
--- a/src/couch/src/couch_work_queue.erl
+++ b/src/couch/src/couch_work_queue.erl
@@ -21,7 +21,7 @@
% gen_server callbacks
-export([init/1, terminate/2]).
--export([handle_call/3, handle_cast/2, code_change/3, handle_info/2]).
+-export([handle_call/3, handle_cast/2, code_change/3, handle_info/2, format_status/2]).
-record(q, {
queue = queue:new(),
@@ -49,7 +49,7 @@ queue(Wq, Item) ->
dequeue(Wq) ->
dequeue(Wq, all).
-
+
dequeue(Wq, MaxItems) ->
try
gen_server:call(Wq, {dequeue, MaxItems}, infinity)
@@ -76,7 +76,7 @@ size(Wq) ->
close(Wq) ->
gen_server:cast(Wq, close).
-
+
init(Options) ->
Q = #q{
@@ -90,7 +90,7 @@ init(Options) ->
terminate(_Reason, #q{work_waiters=Workers}) ->
lists:foreach(fun({W, _}) -> gen_server:reply(W, closed) end, Workers).
-
+
handle_call({queue, Item, Size}, From, #q{work_waiters = []} = Q0) ->
Q = Q0#q{size = Q0#q.size + Size,
items = Q0#q.items + 1,
@@ -172,7 +172,7 @@ dequeue_items(NumItems, Size, Queue, Blocked, DequeuedAcc) ->
end,
dequeue_items(
NumItems - 1, Size - ItemSize, Queue2, Blocked2, [Item | DequeuedAcc]).
-
+
handle_cast(close, #q{items = 0} = Q) ->
{stop, normal, Q};
@@ -186,3 +186,18 @@ code_change(_OldVsn, State, _Extra) ->
handle_info(X, Q) ->
{stop, X, Q}.
+
+format_status(_Opt, [_PDict, Queue]) ->
+ #q{
+ queue = Q,
+ blocked = Blocked,
+ work_waiters = Waiters
+ } = Queue,
+ Scrubbed = Queue#q{
+ queue = {queue_length, queue:len(Q)},
+ blocked = {length, length(Blocked)},
+ work_waiters = {length, length(Waiters)}
+ },
+ [{data, [{"State",
+ ?record_to_keyval(q, Scrubbed)
+ }]}].
diff --git a/src/couch/test/eunit/couch_util_tests.erl b/src/couch/test/eunit/couch_util_tests.erl
index 012c961a4..e37691110 100644
--- a/src/couch/test/eunit/couch_util_tests.erl
+++ b/src/couch/test/eunit/couch_util_tests.erl
@@ -14,6 +14,12 @@
-include_lib("couch/include/couch_eunit.hrl").
+% For generating poisson distributed string lengths
+% in the random unicode generation. This shoots
+% for lengths centered around 24 characters. To
+% change, replace this value with math:exp(-Length).
+-define(POISSON_LIMIT, 3.775134544279098e-11).
+-define(RANDOM_TEST_SIZE, 10000).
setup() ->
%% We cannot start driver from here since it becomes bounded to eunit
@@ -175,3 +181,137 @@ json_decode_test_() ->
?_assertEqual({[]}, couch_util:json_decode(<<"{}">>, [])),
?_assertEqual(#{}, couch_util:json_decode(<<"{}">>, [return_maps]))
].
+
+sort_key_test_() ->
+ {
+ "Sort Key tests",
+ [
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun test_get_sort_key/1,
+ fun test_get_sort_key_jiffy_string/1,
+ fun test_get_sort_key_fails_on_bad_input/1,
+ fun test_get_sort_key_longer_than_buffer/1,
+ fun test_sort_key_collation/1,
+ fun test_sort_key_list_sort/1
+ ]
+ }
+ ]
+ }.
+
+test_get_sort_key(_) ->
+ Strs = [
+ <<"">>,
+ <<"foo">>,
+ <<"bar">>,
+ <<"Bar">>,
+ <<"baz">>,
+ <<"BAZ">>,
+ <<"quaz">>,
+ <<"1234fdsa">>,
+ <<"1234">>,
+ <<"pizza">>
+ ],
+ Pairs = [{S1, S2} || S1 <- Strs, S2 <- Strs],
+ lists:map(fun({S1, S2}) ->
+ S1K = couch_util:get_sort_key(S1),
+ S2K = couch_util:get_sort_key(S2),
+ SortRes = sort_keys(S1K, S2K),
+ Comment = list_to_binary(io_lib:format("strcmp(~p, ~p)", [S1, S2])),
+ CollRes = couch_util:collate(S1, S2),
+ {Comment, ?_assertEqual(SortRes, CollRes)}
+ end, Pairs).
+
+test_get_sort_key_jiffy_string(_) ->
+ %% jiffy:decode does not null terminate strings
+ %% so we use it here to test unterminated strings
+ {[{S1,S2}]} = jiffy:decode(<<"{\"foo\": \"bar\"}">>),
+ S1K = couch_util:get_sort_key(S1),
+ S2K = couch_util:get_sort_key(S2),
+ SortRes = sort_keys(S1K, S2K),
+ CollRes = couch_util:collate(S1, S2),
+ ?_assertEqual(SortRes, CollRes).
+
+test_get_sort_key_fails_on_bad_input(_) ->
+ %% generated with crypto:strong_rand_bytes
+ %% contains invalid character, should error
+ S = <<209,98,222,144,60,163,72,134,206,157>>,
+ Res = couch_util:get_sort_key(S),
+ ?_assertEqual(error, Res).
+
+test_get_sort_key_longer_than_buffer(_) ->
+ %% stack allocated buffer is 1024 units
+ %% test resize logic with strings > 1024 char
+ Extra = list_to_binary(["a" || _ <- lists:seq(1, 1200)]),
+ ?_assert(is_binary(Extra)).
+
+test_sort_key_collation(_) ->
+ ?_test(begin
+ lists:foreach(fun(_) ->
+ K1 = random_unicode_binary(),
+ SK1 = couch_util:get_sort_key(K1),
+
+ K2 = random_unicode_binary(),
+ SK2 = couch_util:get_sort_key(K2),
+
+ % Probably kinda silly but whatevs
+ ?assertEqual(couch_util:collate(K1, K1), sort_keys(SK1, SK1)),
+ ?assertEqual(couch_util:collate(K2, K2), sort_keys(SK2, SK2)),
+
+ ?assertEqual(couch_util:collate(K1, K2), sort_keys(SK1, SK2)),
+ ?assertEqual(couch_util:collate(K2, K1), sort_keys(SK2, SK1))
+ end, lists:seq(1, ?RANDOM_TEST_SIZE))
+ end).
+
+test_sort_key_list_sort(_) ->
+ ?_test(begin
+ RandomKeys = lists:map(fun(_) ->
+ random_unicode_binary()
+ end, lists:seq(1, ?RANDOM_TEST_SIZE)),
+
+ CollationSorted = lists:sort(fun(A, B) ->
+ couch_util:collate(A, B) =< 0
+ end, RandomKeys),
+
+ SortKeys = lists:map(fun(K) ->
+ {couch_util:get_sort_key(K), K}
+ end, RandomKeys),
+ {_, SortKeySorted} = lists:unzip(lists:sort(SortKeys)),
+
+ ?assertEqual(CollationSorted, SortKeySorted)
+ end).
+
+sort_keys(S1, S2) ->
+ case S1 < S2 of
+ true ->
+ -1;
+ false -> case S1 =:= S2 of
+ true ->
+ 0;
+ false ->
+ 1
+ end
+ end.
+
+random_unicode_binary() ->
+ Size = poisson_length(0, rand:uniform()),
+ Chars = [random_unicode_char() || _ <- lists:seq(1, Size)],
+ <<_/binary>> = unicode:characters_to_binary(Chars).
+
+poisson_length(N, Acc) when Acc > ?POISSON_LIMIT ->
+ poisson_length(N + 1, Acc * rand:uniform());
+poisson_length(N, _) ->
+ N.
+
+random_unicode_char() ->
+ BaseChar = rand:uniform(16#FFFD + 1) - 1,
+ case BaseChar of
+ BC when BC >= 16#D800, BC =< 16#DFFF ->
+ % This range is reserved for surrogate pair
+ % encodings.
+ random_unicode_char();
+ BC ->
+ BC
+ end.
diff --git a/src/couch_epi/test/eunit/couch_epi_tests.erl b/src/couch_epi/test/eunit/couch_epi_tests.erl
index 12d8610c1..23b9e6103 100644
--- a/src/couch_epi/test/eunit/couch_epi_tests.erl
+++ b/src/couch_epi/test/eunit/couch_epi_tests.erl
@@ -162,7 +162,8 @@ start_epi(Plugins) ->
Module
end, Plugins),
application:set_env(couch_epi, plugins, PluginsModules),
- application:start(couch_epi).
+ {ok, _} = application:ensure_all_started(couch_epi),
+ ok.
setup(data_file) ->
error_logger:tty(false),
diff --git a/src/couch_eval/README.md b/src/couch_eval/README.md
new file mode 100644
index 000000000..048a165fb
--- /dev/null
+++ b/src/couch_eval/README.md
@@ -0,0 +1,5 @@
+couch_eval
+=====
+
+An an initial abstraction layer for evaluating user provided code. So far
+this is only used by `couch_views` to provide map function support. Currently this is implemented in `couch_js` by reusing the existing `couchjs` mechanics.
diff --git a/src/mango/src/mango_idx_view.hrl b/src/couch_eval/rebar.config
index 0d213e56e..362c8785e 100644
--- a/src/mango/src/mango_idx_view.hrl
+++ b/src/couch_eval/rebar.config
@@ -2,7 +2,7 @@
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
-% http://www.apache.org/licenses/LICENSE-2.0
+% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
@@ -10,4 +10,5 @@
% License for the specific language governing permissions and limitations under
% the License.
--define(MAX_JSON_OBJ, {<<255, 255, 255, 255>>}). \ No newline at end of file
+{cover_enabled, true}.
+{cover_print_enabled, true}.
diff --git a/src/couch_eval/src/couch_eval.app.src b/src/couch_eval/src/couch_eval.app.src
new file mode 100644
index 000000000..87193d806
--- /dev/null
+++ b/src/couch_eval/src/couch_eval.app.src
@@ -0,0 +1,23 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{application, couch_eval, [
+ {description, "An OTP application"},
+ {vsn, git},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib,
+ couch_log,
+ config
+ ]}
+ ]}.
diff --git a/src/couch_eval/src/couch_eval.erl b/src/couch_eval/src/couch_eval.erl
new file mode 100644
index 000000000..3541a5b94
--- /dev/null
+++ b/src/couch_eval/src/couch_eval.erl
@@ -0,0 +1,100 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-module(couch_eval).
+
+
+-export([
+ acquire_map_context/6,
+ release_map_context/1,
+ map_docs/2
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+
+-type db_name() :: binary().
+-type doc_id() :: binary().
+-type ddoc_id() :: binary().
+-type language() :: binary().
+-type sig() :: binary().
+-type lib() :: any().
+-type map_fun() :: binary().
+-type map_funs() :: [map_fun()].
+-type result() :: {doc_id(), [[{any(), any()}]]}.
+-type api_mod() :: atom().
+-type context() :: {api_mod(), any()}.
+
+-type context_opts() :: #{
+ db_name := db_name(),
+ ddoc_id => ddoc_id(),
+ language => language(),
+ sig => sig(),
+ lib => lib(),
+ map_funs => map_funs(),
+ api_mod => api_mod()
+}.
+
+
+-callback acquire_map_context(context_opts()) -> {ok, any()} | {error, any()}.
+-callback release_map_context(context()) -> ok | {error, any()}.
+-callback map_docs(context(), [doc()]) -> {ok, [result()]} | {error, any()}.
+
+
+-spec acquire_map_context(
+ db_name(),
+ ddoc_id(),
+ language(),
+ sig(),
+ lib(),
+ map_funs()
+ ) -> {ok, context()} | {error, any()}.
+acquire_map_context(DbName, DDocId, Language, Sig, Lib, MapFuns) ->
+ ApiMod = get_api_mod(Language),
+ CtxOpts = #{
+ db_name => DbName,
+ ddoc_id => DDocId,
+ language => Language,
+ sig => Sig,
+ lib => Lib,
+ map_funs => MapFuns
+ },
+ {ok, Ctx} = ApiMod:acquire_map_context(CtxOpts),
+ {ok, {ApiMod, Ctx}}.
+
+
+-spec release_map_context(context()) -> ok | {error, any()}.
+release_map_context(nil) ->
+ ok;
+
+release_map_context({ApiMod, Ctx}) ->
+ ApiMod:release_map_context(Ctx).
+
+
+-spec map_docs(context(), [doc()]) -> {ok, result()} | {error, any()}.
+map_docs({ApiMod, Ctx}, Docs) ->
+ ApiMod:map_docs(Ctx, Docs).
+
+
+get_api_mod(Language) when is_binary(Language) ->
+ try
+ LangStr = binary_to_list(Language),
+ ModStr = config:get("couch_eval.languages", LangStr),
+ if ModStr /= undefined -> ok; true ->
+ erlang:error({unknown_eval_api_language, Language})
+ end,
+ list_to_existing_atom(ModStr)
+ catch error:badarg ->
+ erlang:error({invalid_eval_api_mod, Language})
+ end.
diff --git a/src/couch_expiring_cache/README.md b/src/couch_expiring_cache/README.md
new file mode 100644
index 000000000..2ab1699db
--- /dev/null
+++ b/src/couch_expiring_cache/README.md
@@ -0,0 +1,71 @@
+# Couch Expiring Cache
+
+This is a library for creating an FDB backed key value cache, where
+each entry has a `stale` and `expires` time associated with it. Once
+the current time exceeds the `expires` time, the entry is
+automatically removed. The `stale` time can be used to indicate that a
+refresh is necessary, while still returning a non-expired value. It is
+potentially useful for implementing e.g. caches to external systems of
+record, such as OAuth 2.
+
+The data model is based on this [FDB forum discussion](
+https://forums.foundationdb.org/t/designing-key-value-expiration-in-fdb/156).
+
+```
+(?EXPIRING_CACHE, Name, ?PK, Key) := (Val, StaleTS, ExpireTS)
+(?EXPIRING_CACHE, Name, ?EXP, ExpireTS, Key) := ()
+```
+where `Name` is a unique namespace for a particular use case. N.B.
+that it's possible for cache data remain indefinitely in FDB when a
+`Name` is changed or retired with unexpired entries. For such cases,
+we provide `couch_expiring_cache_fdb:clear_all/1` to manually clean
+up those entries.
+
+## Example
+
+Typical usage for this library is to create a separate behaviour
+module for each `Name`, which internally starts a uniquely named
+`couch_expiring_cache_server` to handle expiration and removal of
+entries for that `Name`. For example, to cache authorization decisions
+from an external source, one could implement a module like the
+following:
+
+```erlang
+-module(auth_fdb_decision_cache).
+
+-behaviour(couch_expiring_cache_server).
+
+-export([
+ start_link/0
+]).
+
+
+-define(CACHE_NAME, <<"auth-decision">>).
+
+
+start_link() ->
+ Opts = #{
+ cache_name => ?CACHE_NAME,
+ period => 1000, % clear expired entries every second
+ batch_size => 500, % clear at most 500 entries each period
+ max_jitter => 10
+ },
+ couch_expiring_cache_server:start_link(?MODULE, Opts).
+```
+
+## Modules
+
+* `couch_expiring_cache`: The API module, it contains functions for
+ inserting and looking up cache entries, which are simply
+ pass-throughs to `couch_expiring_cache_fdb`.
+
+* `couch_expiring_cache_fdb`: The module which interacts with FDB, in
+ addition to insertion and lookup functions, it also contains a
+ function to clear an expired range, which is called periodically
+ from instances of `couch_expiring_cache_server`.
+
+* `couch_expiring_cache_server`: An "abstract" gen_server, a specific
+ behaviour of this module should be created for each `Name`, which
+ can override the default expiration parameters. It periodically
+ removes expired cache entries using configurable parameters for
+ period, jitter, and batch size.
diff --git a/src/couch_expiring_cache/include/couch_expiring_cache.hrl b/src/couch_expiring_cache/include/couch_expiring_cache.hrl
new file mode 100644
index 000000000..78e6a8552
--- /dev/null
+++ b/src/couch_expiring_cache/include/couch_expiring_cache.hrl
@@ -0,0 +1,17 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-define(TIME_UNIT, millisecond).
+
+-type millisecond() :: non_neg_integer().
+
+-type jtx() :: map() | undefined | tuple(). % copied from couch_jobs.hrl
diff --git a/src/couch_expiring_cache/rebar.config b/src/couch_expiring_cache/rebar.config
new file mode 100644
index 000000000..362c8785e
--- /dev/null
+++ b/src/couch_expiring_cache/rebar.config
@@ -0,0 +1,14 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{cover_enabled, true}.
+{cover_print_enabled, true}.
diff --git a/src/couch_expiring_cache/src/couch_expiring_cache.app.src b/src/couch_expiring_cache/src/couch_expiring_cache.app.src
new file mode 100644
index 000000000..27d58ee0e
--- /dev/null
+++ b/src/couch_expiring_cache/src/couch_expiring_cache.app.src
@@ -0,0 +1,27 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{application, couch_expiring_cache, [
+ {description, "CouchDB Expiring Cache"},
+ {vsn, git},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib,
+ erlfdb,
+ config,
+ couch_log,
+ couch_stats,
+ couch_jobs,
+ fabric
+ ]}
+]}.
diff --git a/src/couch_expiring_cache/src/couch_expiring_cache.erl b/src/couch_expiring_cache/src/couch_expiring_cache.erl
new file mode 100644
index 000000000..b26556e98
--- /dev/null
+++ b/src/couch_expiring_cache/src/couch_expiring_cache.erl
@@ -0,0 +1,56 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_expiring_cache).
+
+-export([
+ insert/5,
+ insert/6,
+ lookup/2,
+ lookup/3
+]).
+
+
+-include_lib("couch_expiring_cache/include/couch_expiring_cache.hrl").
+
+
+-spec insert(Name :: binary(), Key :: binary(), Value :: binary(),
+ StaleTS :: ?TIME_UNIT(), ExpiresTS :: ?TIME_UNIT()) -> ok.
+insert(Name, Key, Value, StaleTS, ExpiresTS)
+ when is_binary(Name), is_binary(Key), is_binary(Value),
+ is_integer(StaleTS), is_integer(ExpiresTS) ->
+ insert(undefined, Name, Key, Value, StaleTS, ExpiresTS).
+
+
+-spec insert(Tx :: jtx(), Name :: binary(), Key :: binary(), Value :: binary(),
+ StaleTS :: ?TIME_UNIT(), ExpiresTS :: ?TIME_UNIT()) -> ok.
+insert(Tx, Name, Key, Value, StaleTS, ExpiresTS)
+ when is_binary(Name), is_binary(Key), is_binary(Value),
+ is_integer(StaleTS), is_integer(ExpiresTS) ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+ couch_expiring_cache_fdb:insert(
+ JTx, Name, Key, Value, StaleTS, ExpiresTS)
+ end).
+
+
+-spec lookup(Name :: binary(), Key :: binary()) ->
+ not_found | {fresh, Val :: binary()} | {stale, Val :: binary()} | expired.
+lookup(Name, Key) when is_binary(Name), is_binary(Key) ->
+ lookup(undefined, Name, Key).
+
+
+-spec lookup(Tx :: jtx(), Name :: binary(), Key :: binary()) ->
+ not_found | {fresh, Val :: binary()} | {stale, Val :: binary()} | expired.
+lookup(Tx, Name, Key) when is_binary(Name), is_binary(Key) ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+ couch_expiring_cache_fdb:lookup(JTx, Name, Key)
+ end).
diff --git a/src/couch_expiring_cache/src/couch_expiring_cache_fdb.erl b/src/couch_expiring_cache/src/couch_expiring_cache_fdb.erl
new file mode 100644
index 000000000..7c4ad8f6f
--- /dev/null
+++ b/src/couch_expiring_cache/src/couch_expiring_cache_fdb.erl
@@ -0,0 +1,155 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_expiring_cache_fdb).
+
+-export([
+ get_range_to/3,
+ insert/6,
+ lookup/3,
+ clear_all/1,
+ clear_range_to/3
+]).
+
+
+-define(PK, 1).
+-define(EXP, 2).
+
+
+-include_lib("fabric/include/fabric2.hrl").
+-include_lib("couch_expiring_cache/include/couch_expiring_cache.hrl").
+
+
+% Data model
+% see: https://forums.foundationdb.org/t/designing-key-value-expiration-in-fdb/156
+%
+% (?EXPIRING_CACHE, Name, ?PK, Key) := (Val, StaleTS, ExpiresTS)
+% (?EXPIRING_CACHE, Name, ?EXP, ExpiresTS, Key) := ()
+
+
+-spec insert(JTx :: jtx(), Name :: binary(), Key :: binary(), Value :: binary(),
+ StaleTS :: ?TIME_UNIT, ExpiresTS :: ?TIME_UNIT) -> ok.
+insert(#{jtx := true} = JTx, Name, Key, Val, StaleTS, ExpiresTS) ->
+ #{tx := Tx, layer_prefix := LayerPrefix} = couch_jobs_fdb:get_jtx(JTx),
+ PK = primary_key(Name, Key, LayerPrefix),
+ case get_val(Tx, PK) of
+ not_found ->
+ ok;
+ {_OldVal, _OldStaleTS, OldExpiresTS} ->
+ % Clean up current expiry key for this primary key. No
+ % need to clean up the existing primary key since it will
+ % be overwritten below.
+ OldXK = expiry_key(OldExpiresTS, Name, Key, LayerPrefix),
+ ok = erlfdb:clear(Tx, OldXK)
+ end,
+ PV = erlfdb_tuple:pack({Val, StaleTS, ExpiresTS}),
+ ok = erlfdb:set(Tx, PK, PV),
+ XK = expiry_key(ExpiresTS, Name, Key, LayerPrefix),
+ XV = erlfdb_tuple:pack({}),
+ ok = erlfdb:set(Tx, XK, XV).
+
+
+-spec lookup(JTx :: jtx(), Name :: binary(), Key :: binary()) ->
+ not_found | {fresh, Val :: binary()} | {stale, Val :: binary()} | expired.
+lookup(#{jtx := true} = JTx, Name, Key) ->
+ #{tx := Tx, layer_prefix := LayerPrefix} = couch_jobs_fdb:get_jtx(JTx),
+ PK = primary_key(Name, Key, LayerPrefix),
+ case get_val(Tx, PK) of
+ not_found ->
+ not_found;
+ {Val, StaleTS, ExpiresTS} ->
+ Now = erlang:system_time(?TIME_UNIT),
+ if
+ Now < StaleTS -> {fresh, Val};
+ Now < ExpiresTS -> {stale, Val};
+ true -> expired
+ end
+ end.
+
+
+-spec clear_all(Name :: binary()) ->
+ ok.
+clear_all(Name) ->
+ fabric2_fdb:transactional(fun(Tx) ->
+ LayerPrefix = fabric2_fdb:get_dir(Tx),
+ NamePrefix = erlfdb_tuple:pack({?EXPIRING_CACHE, Name}, LayerPrefix),
+ erlfdb:clear_range_startswith(Tx, NamePrefix)
+ end).
+
+
+-spec clear_range_to(Name :: binary(), EndTS :: ?TIME_UNIT,
+ Limit :: non_neg_integer()) ->
+ OldestTS :: ?TIME_UNIT.
+clear_range_to(Name, EndTS, Limit) when Limit > 0 ->
+ fold_range(Name, EndTS, Limit,
+ fun(Tx, PK, XK, _Key, ExpiresTS, Acc) ->
+ ok = erlfdb:clear(Tx, PK),
+ ok = erlfdb:clear(Tx, XK),
+ oldest_ts(ExpiresTS, Acc)
+ end, 0).
+
+
+-spec get_range_to(Name :: binary(), EndTS :: ?TIME_UNIT,
+ Limit :: non_neg_integer()) ->
+ [{Key :: binary(), Val :: binary()}].
+get_range_to(Name, EndTS, Limit) when Limit > 0 ->
+ fold_range(Name, EndTS, Limit,
+ fun(Tx, PK, _XK, Key, _ExpiresTS, Acc) ->
+ case get_val(Tx, PK) of
+ not_found ->
+ couch_log:error("~p:entry missing Key: ~p", [?MODULE, Key]),
+ Acc;
+ Val ->
+ [{Key, Val} | Acc]
+ end
+ end, []).
+
+
+%% Private
+
+
+fold_range(Name, EndTS, Limit, Fun, Acc0) when Limit > 0 ->
+ fabric2_fdb:transactional(fun(Tx) ->
+ {LayerPrefix, ExpiresPrefix} = prefixes(Tx, Name),
+ fabric2_fdb:fold_range({tx, Tx}, ExpiresPrefix, fun({XK, _XV}, Acc) ->
+ {ExpiresTS, Key} = erlfdb_tuple:unpack(XK, ExpiresPrefix),
+ PK = primary_key(Name, Key, LayerPrefix),
+ Fun(Tx, PK, XK, Key, ExpiresTS, Acc)
+ end, Acc0, [{end_key, EndTS}, {limit, Limit}])
+ end).
+
+
+oldest_ts(TS, 0) -> TS; % handle initial Acc = 0 case
+oldest_ts(TS, OldestTS) -> min(TS, OldestTS).
+
+
+primary_key(Name, Key, Prefix) ->
+ erlfdb_tuple:pack({?EXPIRING_CACHE, Name, ?PK, Key}, Prefix).
+
+
+expiry_key(ExpiresTS, Name, Key, Prefix) ->
+ erlfdb_tuple:pack({?EXPIRING_CACHE, Name, ?EXP, ExpiresTS, Key}, Prefix).
+
+
+prefixes(Tx, Name) ->
+ Layer = fabric2_fdb:get_dir(Tx),
+ Expires = erlfdb_tuple:pack({?EXPIRING_CACHE, Name, ?EXP}, Layer),
+ {Layer, Expires}.
+
+
+get_val(Tx, PK) ->
+ case erlfdb:wait(erlfdb:get(Tx, PK)) of
+ not_found ->
+ not_found;
+ Bin when is_binary(Bin) ->
+ erlfdb_tuple:unpack(Bin)
+ end.
diff --git a/src/couch_expiring_cache/src/couch_expiring_cache_server.erl b/src/couch_expiring_cache/src/couch_expiring_cache_server.erl
new file mode 100644
index 000000000..74c432e25
--- /dev/null
+++ b/src/couch_expiring_cache/src/couch_expiring_cache_server.erl
@@ -0,0 +1,123 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_expiring_cache_server).
+
+-behaviour(gen_server).
+
+-callback start_link() -> {ok, pid()} | ignore | {error, term()}.
+
+-export([
+ now_ts/0,
+ start_link/2
+]).
+
+-export([
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3
+]).
+
+
+-define(DEFAULT_BATCH_SIZE, 1000).
+-define(DEFAULT_PERIOD_MSEC, 5000).
+-define(DEFAULT_MAX_JITTER_MSEC, 1000).
+
+
+-include_lib("couch_expiring_cache/include/couch_expiring_cache.hrl").
+
+
+start_link(Name, Opts) when is_atom(Name) ->
+ gen_server:start_link({local, Name}, ?MODULE, Opts#{name => Name}, []).
+
+
+init(Opts) ->
+ DefaultCacheName = atom_to_binary(maps:get(name, Opts), utf8),
+ Period = maps:get(period, Opts, ?DEFAULT_PERIOD_MSEC),
+ MaxJitter = maps:get(max_jitter, Opts, ?DEFAULT_MAX_JITTER_MSEC),
+ {ok, #{
+ cache_name => maps:get(cache_name, Opts, DefaultCacheName),
+ batch_size => maps:get(batch_size, Opts, ?DEFAULT_BATCH_SIZE),
+ period => Period,
+ max_jitter => MaxJitter,
+ timer_ref => schedule_remove_expired(Period, MaxJitter),
+ oldest_ts => 0,
+ elapsed => 0,
+ largest_elapsed => 0,
+ lag => 0}}.
+
+
+terminate(_, _) ->
+ ok.
+
+
+handle_call(Msg, _From, St) ->
+ {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(Msg, St) ->
+ {stop, {bad_cast, Msg}, St}.
+
+
+handle_info(remove_expired, St) ->
+ #{
+ cache_name := Name,
+ batch_size := BatchSize,
+ period := Period,
+ max_jitter := MaxJitter,
+ oldest_ts := OldestTS0,
+ largest_elapsed := LargestElapsed
+ } = St,
+
+ NowTS = now_ts(),
+ OldestTS = max(OldestTS0,
+ couch_expiring_cache_fdb:clear_range_to(Name, NowTS, BatchSize)),
+ Elapsed = now_ts() - NowTS,
+
+ {noreply, St#{
+ timer_ref := schedule_remove_expired(Period, MaxJitter),
+ oldest_ts := OldestTS,
+ elapsed := Elapsed,
+ largest_elapsed := max(Elapsed, LargestElapsed),
+ lag := NowTS - OldestTS}};
+
+
+handle_info({Ref, ready}, St) when is_reference(Ref) ->
+ % Prevent crashing server and application
+ LogMsg = "~p : spurious erlfdb future ready message ~p",
+ couch_log:error(LogMsg, [?MODULE, Ref]),
+ {noreply, St};
+
+
+handle_info(Msg, St) ->
+ {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+ {ok, St}.
+
+
+now_ts() ->
+ {Mega, Sec, Micro} = os:timestamp(),
+ ((Mega * 1000000) + Sec) * 1000 + Micro div 1000.
+
+
+%% Private
+
+
+schedule_remove_expired(Timeout, MaxJitter) ->
+ Jitter = max(Timeout div 2, MaxJitter),
+ Wait = Timeout + rand:uniform(max(1, Jitter)),
+ erlang:send_after(Wait, self(), remove_expired).
diff --git a/src/couch_expiring_cache/test/couch_expiring_cache_tests.erl b/src/couch_expiring_cache/test/couch_expiring_cache_tests.erl
new file mode 100644
index 000000000..0780b8847
--- /dev/null
+++ b/src/couch_expiring_cache/test/couch_expiring_cache_tests.erl
@@ -0,0 +1,147 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_expiring_cache_tests).
+
+
+-include_lib("couch/include/couch_eunit.hrl").
+
+-include_lib("couch_expiring_cache/include/couch_expiring_cache.hrl").
+
+
+-define(CACHE_NAME, atom_to_binary(?MODULE, utf8)).
+
+-define(FOREVER, 576460752303423488). % max int 64 bit
+
+
+couch_expiring_cache_basic_test_() ->
+ {
+ "Test expiring cache basics",
+ {
+ setup,
+ fun setup_couch/0, fun teardown_couch/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun simple_lifecycle/1
+ ]
+ }
+ }
+ }.
+
+
+setup_couch() ->
+ test_util:start_couch([fabric, couch_jobs]).
+
+
+teardown_couch(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+
+setup() ->
+ Opts = #{
+ cache_name => ?CACHE_NAME,
+ period => 10,
+ max_jitter => 0},
+ {ok, Pid} = couch_expiring_cache_server:start_link(?MODULE, Opts),
+ true = unlink(Pid),
+ #{pid => Pid}.
+
+
+teardown(#{pid := Pid}) ->
+ exit(Pid, kill).
+
+
+simple_lifecycle(_) ->
+ % The entire test is racing against FDB being faster than timeout seconds
+ {timeout, 20, ?_test(begin
+ Start = couch_expiring_cache_server:now_ts(),
+ % Race Alert!
+ % We're betting on FDB returning a lookup faster than these:
+ Stale = 500,
+ Expires = 1000,
+ Timeout = 5000,
+ Interval = 5,
+
+ StaleTS = Start + Stale,
+ ExpiresTS = Start + Expires,
+ Name = ?CACHE_NAME,
+ Key = <<"key">>,
+ Val = <<"val">>,
+
+ ?assertEqual(ok, couch_expiring_cache_fdb:clear_all(Name)),
+ ?assertEqual(not_found, couch_expiring_cache:lookup(Name, Key)),
+ ?assertEqual([], entries(Name)),
+ ?assertEqual(ok, couch_expiring_cache:insert(Name, Key, Val,
+ StaleTS, ExpiresTS)),
+ ok = attempt_fresh_and_stale_lookups(Name, Key, Timeout, Interval),
+
+ % Refresh the existing key with updated timestamps
+ Refresh = couch_expiring_cache_server:now_ts(),
+ ?assertEqual(ok, couch_expiring_cache:insert(Name, Key, Val,
+ Refresh + Stale, Refresh + Expires)),
+ ok = attempt_fresh_and_stale_lookups(Name, Key, Timeout, Interval),
+ ?assertEqual(1, length(entries(Name))),
+ % These last 2 are also races, betting on FDB to be reasonably
+ % fast on the home stretch
+ ok = wait_lookup(Name, Key, expired, Timeout, Interval),
+ ok = wait_lookup(Name, Key, not_found, Timeout, Interval),
+ ?assertEqual([], entries(Name))
+ end)}.
+
+
+% In this race we're betting on FDB to take less than `Stale` and then
+% `Expired` milliseconds to respond
+attempt_fresh_and_stale_lookups(Name, Key, Timeout, Interval) ->
+ case couch_expiring_cache:lookup(Name, Key) of
+ {fresh, Val} ->
+ % We won that race, let's bet on another!
+ ok = wait_lookup(Name, Key, {stale, Val}, Timeout, Interval);
+ _ ->
+ % Unlucky! But don't fail the test just yet...
+ ok
+ end.
+
+
+entries(Name) ->
+ couch_expiring_cache_fdb:get_range_to(Name, ?FOREVER, _Limit=100).
+
+
+% This lookup races against Timeout
+wait_lookup(Name, Key, Expect, Timeout, Interval) ->
+ wait(fun() ->
+ case couch_expiring_cache:lookup(Name, Key) of
+ Expect -> ok;
+ _ -> wait
+ end
+ end, Timeout, Interval).
+
+
+wait(Fun, Timeout, Delay) ->
+ Now = couch_expiring_cache_server:now_ts(),
+ wait(Fun, Timeout, Delay, Now, Now).
+
+
+wait(_Fun, Timeout, _Delay, Started, Prev) when Prev - Started > Timeout ->
+ timeout;
+
+wait(Fun, Timeout, Delay, Started, _Prev) ->
+ case Fun() of
+ wait ->
+ % http://erlang.org/doc/man/timer.html#sleep-1
+ ok = timer:sleep(Delay), % always millisecond
+ wait(Fun, Timeout, Delay, Started,
+ couch_expiring_cache_server:now_ts());
+ Else ->
+ Else
+ end.
diff --git a/src/couch_index/src/couch_index.erl b/src/couch_index/src/couch_index.erl
index cfe0d9e4f..09bd48c61 100644
--- a/src/couch_index/src/couch_index.erl
+++ b/src/couch_index/src/couch_index.erl
@@ -23,7 +23,7 @@
-export([compact/1, compact/2, get_compactor_pid/1]).
%% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
+-export([init/1, terminate/2, code_change/3, format_status/2]).
-export([handle_call/3, handle_cast/2, handle_info/2]).
@@ -375,6 +375,23 @@ handle_info({'DOWN', _, _, _Pid, _}, #st{mod=Mod, idx_state=IdxState}=State) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
+format_status(Opt, [PDict, State]) ->
+ #st{
+ mod = Mod,
+ waiters = Waiters,
+ idx_state = IdxState
+ } = State,
+ Scrubbed = State#st{waiters = {length, length(Waiters)}},
+ IdxSafeState = case erlang:function_exported(Mod, format_status, 2) of
+ true ->
+ Mod:format_status(Opt, [PDict, IdxState]);
+ false ->
+ []
+ end,
+ [{data, [{"State",
+ ?record_to_keyval(st, Scrubbed) ++ IdxSafeState
+ }]}].
+
maybe_restart_updater(#st{waiters=[]}) ->
ok;
maybe_restart_updater(#st{idx_state=IdxState}=State) ->
diff --git a/src/couch_jobs/.gitignore b/src/couch_jobs/.gitignore
new file mode 100644
index 000000000..6ef4c5212
--- /dev/null
+++ b/src/couch_jobs/.gitignore
@@ -0,0 +1,4 @@
+*.beam
+.eunit
+ebin/couch_jobs.app
+.DS_Store \ No newline at end of file
diff --git a/src/couch_jobs/README.md b/src/couch_jobs/README.md
new file mode 100644
index 000000000..bc45d323c
--- /dev/null
+++ b/src/couch_jobs/README.md
@@ -0,0 +1,62 @@
+CouchDB Jobs Application
+========================
+
+Run background jobs in CouchDB
+
+Design (RFC) discussion: https://github.com/apache/couchdb-documentation/pull/409/files
+
+This is a description of some of the modules:
+
+ * `couch_jobs`: The main API module. It contains functions for creating,
+ accepting, executing, and monitoring jobs. A common pattern in this module
+ is to get a jobs transaction object (named `JTx` throughout the code), then
+ start a transaction and call a bunch of functions from `couch_jobs_fdb` in
+ that transaction.
+
+ * `couch_jobs_fdb`: This is a layer that talks to FDB. There is a lot of tuple
+ packing and unpacking, reading ranges and also managing transaction objects.
+
+ * `couch_jobs_pending`: This module implements the pending jobs queue. These
+ functions could all go in `couch_jobs_fdb` but the implemention was fairly
+ self-contained, with its own private helper functions, so it made sense to
+ move to a separate module.
+
+ * `couch_jobs_activity_monitor`: Here is where the "activity monitor"
+ functionality is implemented. That's done with a `gen_server` instance
+ running for each type. This `gen_server` periodically check if there are
+ inactive jobs for its type, and if they are, it re-enqueues them. If the
+ timeout value changes, then it skips the pending check, until the new
+ timeout expires.
+
+ * `couch_jobs_activity_monitor_sup` : This is a simple one-for-one supervisor
+ to spawn `couch_jobs_activity_monitor` instances for each type.
+
+ * `couch_jobs_type_monitor` : This is a helper process meant to be
+ `spawn_link`-ed from a parent `gen_server`. It then monitors activity for a
+ particular job type. If any jobs of that type have an update it notifies the
+ parent process.
+
+ * `couch_jobs_notifier`: Is responsible for subscriptions. Just like
+ with activity monitor there is a `gen_server` instance running per
+ each type. It uses a linked `couch_jobs_type_monitor` process to wait for
+ any job updates. When an update notification arrives, it can efficiently
+ find out if any active jobs have been updated, by reading the `(?JOBS,
+ ?ACTIVITY, Type, Sequence)` range. That should account for the bulk of
+ changes. The jobs that are not active anymore, are queried individually.
+ Subscriptions are managed in an ordered set ETS table.
+
+ * `couch_jobs_notifier_sup`: A simple one-for-one supervisor to spawn
+ `couch_jobs_notifier` processes for each type.
+
+ * `couch_jobs_server`: This is a `gen_server` which keeps track of job
+ types. It then starts or stops activity monitors and notifiers for each
+ type. To do that it queries the ` (?JOBS, ?ACTIVITY_TIMEOUT)` periodically.
+
+ * `couch_jobs_sup`: This is the main application supervisor. The restart
+ strategy is `rest_for_one`, meaning that a when a child restarts, the
+ sibling following it will restart. One interesting entry there is the first
+ child which is used just to create an ETS table used by `couch_jobs_fdb` to
+ cache transaction object (`JTx` mentioned above). That child calls
+ `init_cache/0`, where it creates the ETS then returns with `ignore` so it
+ doesn't actually spawn a process. The ETS table will be owned by the
+ supervisor process.
diff --git a/src/couch_jobs/rebar.config b/src/couch_jobs/rebar.config
new file mode 100644
index 000000000..362c8785e
--- /dev/null
+++ b/src/couch_jobs/rebar.config
@@ -0,0 +1,14 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{cover_enabled, true}.
+{cover_print_enabled, true}.
diff --git a/src/couch_jobs/src/couch_jobs.app.src b/src/couch_jobs/src/couch_jobs.app.src
new file mode 100644
index 000000000..8ded14c6f
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs.app.src
@@ -0,0 +1,31 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{application, couch_jobs, [
+ {description, "CouchDB Jobs"},
+ {vsn, git},
+ {mod, {couch_jobs_app, []}},
+ {registered, [
+ couch_jobs_sup,
+ couch_jobs_activity_monitor_sup,
+ couch_jobs_notifier_sup,
+ couch_jobs_server
+ ]},
+ {applications, [
+ kernel,
+ stdlib,
+ erlfdb,
+ couch_log,
+ config,
+ fabric
+ ]}
+]}.
diff --git a/src/couch_jobs/src/couch_jobs.erl b/src/couch_jobs/src/couch_jobs.erl
new file mode 100644
index 000000000..f6fb62664
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs.erl
@@ -0,0 +1,423 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs).
+
+-export([
+ % Job creation
+ add/4,
+ add/5,
+ remove/3,
+ get_job_data/3,
+ get_job_state/3,
+ get_active_jobs_ids/2,
+ get_types/1,
+
+ % Job processing
+ accept/1,
+ accept/2,
+ finish/2,
+ finish/3,
+ resubmit/2,
+ resubmit/3,
+ resubmit/4,
+ is_resubmitted/1,
+ update/2,
+ update/3,
+
+ % Subscriptions
+ subscribe/2,
+ subscribe/3,
+ unsubscribe/1,
+ wait/2,
+ wait/3,
+
+ % Type timeouts
+ set_type_timeout/2,
+ clear_type_timeout/1,
+ get_type_timeout/1
+]).
+
+
+-include("couch_jobs.hrl").
+
+
+-define(MIN_ACCEPT_WAIT_MSEC, 100).
+
+
+%% Job Creation API
+
+-spec add(jtx(), job_type(), job_id(), job_data()) -> ok | {error, any()}.
+add(Tx, Type, JobId, JobData) ->
+ add(Tx, Type, JobId, JobData, 0).
+
+
+-spec add(jtx(), job_type(), job_id(), job_data(), scheduled_time()) ->
+ ok | {error, any()}.
+add(Tx, Type, JobId, JobData, ScheduledTime) when is_binary(JobId),
+ is_map(JobData), is_integer(ScheduledTime) ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+ case couch_jobs_fdb:add(JTx, Type, JobId, JobData, ScheduledTime) of
+ {ok, _, _, _} -> ok;
+ {error, Error} -> {error, Error}
+ end
+ end).
+
+
+-spec remove(jtx(), job_type(), job_id()) -> ok | {error, any()}.
+remove(Tx, Type, JobId) when is_binary(JobId) ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+ couch_jobs_fdb:remove(JTx, job(Type, JobId))
+ end).
+
+
+-spec get_job_data(jtx(), job_type(), job_id()) -> {ok, job_data()} | {error,
+ any()}.
+get_job_data(Tx, Type, JobId) when is_binary(JobId) ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+ case couch_jobs_fdb:get_job_state_and_data(JTx, job(Type, JobId)) of
+ {ok, _Seq, _State, Data} ->
+ {ok, couch_jobs_fdb:decode_data(Data)};
+ {error, Error} ->
+ {error, Error}
+ end
+ end).
+
+
+-spec get_job_state(jtx(), job_type(), job_id()) -> {ok, job_state()} | {error,
+ any()}.
+get_job_state(Tx, Type, JobId) when is_binary(JobId) ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+ case couch_jobs_fdb:get_job_state_and_data(JTx, job(Type, JobId)) of
+ {ok, _Seq, State, _Data} ->
+ {ok, State};
+ {error, Error} ->
+ {error, Error}
+ end
+ end).
+
+
+-spec get_active_jobs_ids(jtx(), job_type()) -> [job_id()] | {error,
+ any()}.
+get_active_jobs_ids(Tx, Type) ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+ Since = couch_jobs_fdb:get_active_since(JTx, Type,
+ {versionstamp, 0, 0}),
+ maps:keys(Since)
+ end).
+
+
+-spec get_types(jtx()) -> [job_type()] | {error, any()}.
+get_types(Tx) ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+ couch_jobs_fdb:get_types(JTx)
+ end).
+
+
+%% Job processor API
+
+-spec accept(job_type()) -> {ok, job(), job_data()} | {error, any()}.
+accept(Type) ->
+ accept(Type, #{}).
+
+
+-spec accept(job_type(), job_accept_opts()) -> {ok, job()} | {error, any()}.
+accept(Type, #{} = Opts) ->
+ NoSched = maps:get(no_schedule, Opts, false),
+ MaxSchedTimeDefault = case NoSched of
+ true -> 0;
+ false -> ?UNDEFINED_MAX_SCHEDULED_TIME
+ end,
+ MaxSchedTime = maps:get(max_sched_time, Opts, MaxSchedTimeDefault),
+ Timeout = maps:get(timeout, Opts, infinity),
+ case NoSched andalso MaxSchedTime =/= 0 of
+ true ->
+ {error, no_schedule_require_0_max_sched_time};
+ false ->
+ accept_loop(Type, NoSched, MaxSchedTime, Timeout)
+ end.
+
+
+-spec finish(jtx(), job()) -> ok | {error, any()}.
+finish(Tx, Job) ->
+ finish(Tx, Job, undefined).
+
+
+-spec finish(jtx(), job(), job_data()) -> ok | {error, any()}.
+finish(Tx, #{jlock := <<_/binary>>} = Job, JobData) ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+ couch_jobs_fdb:finish(JTx, Job, JobData)
+ end).
+
+
+-spec resubmit(jtx(), job()) -> {ok, job()} | {error, any()}.
+resubmit(Tx, Job) ->
+ resubmit(Tx, Job, ?UNDEFINED_MAX_SCHEDULED_TIME).
+
+
+-spec resubmit(jtx(), job(), scheduled_time()) -> {ok, job()} | {error, any()}.
+resubmit(Tx, #{jlock := <<_/binary>>} = Job, SchedTime) ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+ couch_jobs_fdb:resubmit(JTx, Job, SchedTime)
+ end).
+
+
+-spec resubmit(jtx(), job(), scheduled_time(), job_data()) -> {ok, job()} | {error, any()}.
+resubmit(Tx, #{jlock := <<_/binary>>} = Job, SchedTime, Data) ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+ couch_jobs_fdb:resubmit(JTx, Job, SchedTime, Data)
+ end).
+
+
+-spec is_resubmitted(job()) -> true | false.
+is_resubmitted(#{job := true} = Job) ->
+ maps:get(resubmit, Job, false).
+
+
+-spec update(jtx(), job()) -> {ok, job()} | {error, any()}.
+update(Tx, Job) ->
+ update(Tx, Job, undefined).
+
+
+-spec update(jtx(), job(), job_data()) -> {ok, job()} | {error, any()}.
+update(Tx, #{jlock := <<_/binary>>} = Job, JobData) ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+ couch_jobs_fdb:update(JTx, Job, JobData)
+ end).
+
+
+%% Subscription API
+
+% Receive events as messages. Wait for them using `wait/2,3`
+% functions.
+%
+
+-spec subscribe(job_type(), job_id()) -> {ok, job_subscription(), job_state(),
+ job_data()} | {ok, finished, job_data()} | {error, any()}.
+subscribe(Type, JobId) ->
+ subscribe(undefined, Type, JobId).
+
+
+-spec subscribe(jtx(), job_type(), job_id()) -> {ok, job_subscription(),
+ job_state(), job_data()} | {ok, finished, job_data()} | {error, any()}.
+subscribe(Tx, Type, JobId) ->
+ StateData = couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+ Job = #{job => true, type => Type, id => JobId},
+ couch_jobs_fdb:get_job_state_and_data(JTx, Job)
+ end),
+ case StateData of
+ {ok, _Seq, finished, Data} ->
+ {ok, finished, couch_jobs_fdb:decode_data(Data)};
+ {ok, Seq, State, Data} ->
+ case couch_jobs_notifier:subscribe(Type, JobId, State, Seq) of
+ {ok, SubRef} ->
+ Data1 = couch_jobs_fdb:decode_data(Data),
+ {ok, SubRef, State, Data1};
+ {error, Error} ->
+ {error, Error}
+ end;
+ {error, Error} ->
+ {error, Error}
+ end.
+
+
+% Unsubscribe from getting notifications based on a particular subscription.
+% Each subscription should be followed by its own unsubscription call. However,
+% subscriber processes are also monitored and auto-unsubscribed if they exit.
+% If subscribing process is exiting, calling this function is optional.
+%
+-spec unsubscribe(job_subscription()) -> ok.
+unsubscribe({Server, Ref}) when is_pid(Server), is_reference(Ref) ->
+ try
+ couch_jobs_notifier:unsubscribe(Server, Ref)
+ after
+ flush_notifications(Ref)
+ end.
+
+
+% Wait to receive job state updates
+%
+-spec wait(job_subscription() | [job_subscription()], timeout()) ->
+ {job_type(), job_id(), job_state(), job_data()} | timeout.
+wait({_, Ref}, Timeout) ->
+ receive
+ {?COUCH_JOBS_EVENT, Ref, Type, Id, State, Data} ->
+ {Type, Id, State, couch_jobs_fdb:decode_data(Data)}
+ after
+ Timeout -> timeout
+ end;
+
+wait(Subs, Timeout) when is_list(Subs) ->
+ {Result, ResendQ} = wait_any(Subs, Timeout, []),
+ lists:foreach(fun(Msg) -> self() ! Msg end, ResendQ),
+ Result.
+
+
+-spec wait(job_subscription() | [job_subscription()], job_state(), timeout())
+ -> {job_type(), job_id(), job_state(), job_data()} | timeout.
+wait({_, Ref} = Sub, State, Timeout) when is_atom(State) ->
+ receive
+ {?COUCH_JOBS_EVENT, Ref, Type, Id, MsgState, Data0} ->
+ case MsgState =:= State of
+ true ->
+ Data = couch_jobs_fdb:decode_data(Data0),
+ {Type, Id, State, Data};
+ false ->
+ wait(Sub, State, Timeout)
+ end
+ after
+ Timeout -> timeout
+ end;
+
+wait(Subs, State, Timeout) when is_list(Subs),
+ is_atom(State) ->
+ {Result, ResendQ} = wait_any(Subs, State, Timeout, []),
+ lists:foreach(fun(Msg) -> self() ! Msg end, ResendQ),
+ Result.
+
+
+%% Job type timeout API
+
+% These functions manipulate the activity timeout for each job type.
+
+-spec set_type_timeout(job_type(), timeout()) -> ok.
+set_type_timeout(Type, Timeout) ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
+ couch_jobs_fdb:set_type_timeout(JTx, Type, Timeout)
+ end).
+
+
+-spec clear_type_timeout(job_type()) -> ok.
+clear_type_timeout(Type) ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
+ couch_jobs_fdb:clear_type_timeout(JTx, Type)
+ end).
+
+
+-spec get_type_timeout(job_type()) -> timeout().
+get_type_timeout(Type) ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
+ couch_jobs_fdb:get_type_timeout(JTx, Type)
+ end).
+
+
+%% Private utilities
+
+accept_loop(Type, NoSched, MaxSchedTime, Timeout) ->
+ TxFun = fun(JTx) ->
+ couch_jobs_fdb:accept(JTx, Type, MaxSchedTime, NoSched)
+ end,
+ AcceptResult = try
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), TxFun)
+ catch
+ error:{timeout, _} ->
+ retry;
+ error:{erlfdb_error, Err} when Err =:= 1020 orelse Err =:= 1031 ->
+ retry
+ end,
+ case AcceptResult of
+ {ok, Job, Data} ->
+ {ok, Job, Data};
+ retry ->
+ accept_loop(Type, NoSched, MaxSchedTime, Timeout);
+ {not_found, PendingWatch} ->
+ case wait_pending(PendingWatch, MaxSchedTime, Timeout, NoSched) of
+ {error, not_found} ->
+ {error, not_found};
+ retry ->
+ accept_loop(Type, NoSched, MaxSchedTime, Timeout);
+ ok ->
+ accept_loop(Type, NoSched, MaxSchedTime, Timeout)
+ end
+ end.
+
+
+job(Type, JobId) ->
+ #{job => true, type => Type, id => JobId}.
+
+
+wait_pending(PendingWatch, _MaxSTime, _UserTimeout = 0, _NoSched) ->
+ erlfdb:cancel(PendingWatch, [flush]),
+ {error, not_found};
+
+wait_pending(PendingWatch, MaxSTime, UserTimeout, NoSched) ->
+ NowMSec = erlang:system_time(millisecond),
+ Timeout0 = max(?MIN_ACCEPT_WAIT_MSEC, MaxSTime * 1000 - NowMSec),
+ Timeout = min(limit_timeout(Timeout0, NoSched), UserTimeout),
+ try
+ erlfdb:wait(PendingWatch, [{timeout, Timeout}]),
+ ok
+ catch
+ error:{erlfdb_error, ?FUTURE_VERSION} ->
+ erlfdb:cancel(PendingWatch, [flush]),
+ retry;
+ error:{timeout, _} ->
+ erlfdb:cancel(PendingWatch, [flush]),
+ {error, not_found}
+ end.
+
+
+wait_any(Subs, Timeout0, ResendQ) when is_list(Subs) ->
+ Timeout = limit_timeout(Timeout0, false),
+ receive
+ {?COUCH_JOBS_EVENT, Ref, Type, Id, State, Data0} = Msg ->
+ case lists:keyfind(Ref, 2, Subs) of
+ false ->
+ wait_any(Subs, Timeout, [Msg | ResendQ]);
+ {_, Ref} ->
+ Data = couch_jobs_fdb:decode_data(Data0),
+ {{Type, Id, State, Data}, ResendQ}
+ end
+ after
+ Timeout -> {timeout, ResendQ}
+ end.
+
+
+wait_any(Subs, State, Timeout0, ResendQ) when
+ is_list(Subs) ->
+ Timeout = limit_timeout(Timeout0, false),
+ receive
+ {?COUCH_JOBS_EVENT, Ref, Type, Id, MsgState, Data0} = Msg ->
+ case lists:keyfind(Ref, 2, Subs) of
+ false ->
+ wait_any(Subs, Timeout, [Msg | ResendQ]);
+ {_, Ref} ->
+ case MsgState =:= State of
+ true ->
+ Data = couch_jobs_fdb:decode_data(Data0),
+ {{Type, Id, State, Data}, ResendQ};
+ false ->
+ wait_any(Subs, Timeout, ResendQ)
+ end
+ end
+ after
+ Timeout -> {timeout, ResendQ}
+ end.
+
+
+limit_timeout(_Timeout, true) ->
+ infinity;
+
+limit_timeout(Timeout, false) when is_integer(Timeout), Timeout < 16#FFFFFFFF ->
+ Timeout;
+
+limit_timeout(_Timeout, false) ->
+ infinity.
+
+
+flush_notifications(Ref) ->
+ receive
+ {?COUCH_JOBS_EVENT, Ref, _, _, _} ->
+ flush_notifications(Ref)
+ after
+ 0 -> ok
+ end.
diff --git a/src/couch_jobs/src/couch_jobs.hrl b/src/couch_jobs/src/couch_jobs.hrl
new file mode 100644
index 000000000..bb561b136
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs.hrl
@@ -0,0 +1,52 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-include_lib("fabric/include/fabric2.hrl").
+
+
+% Job map/json field definitions
+%
+-define(OPT_PRIORITY, <<"priority">>).
+-define(OPT_DATA, <<"data">>).
+-define(OPT_CANCEL, <<"cancel">>).
+-define(OPT_RESUBMIT, <<"resubmit">>).
+
+% These might be in a fabric public hrl eventually
+%
+-define(UNSET_VS, {versionstamp, 16#FFFFFFFFFFFFFFFF, 16#FFFF}).
+
+% Data model definitions
+%
+-define(JOBS, 51). % coordinate with fabric2.hrl
+-define(DATA, 1).
+-define(PENDING, 2).
+-define(WATCHES_PENDING, 3).
+-define(WATCHES_ACTIVITY, 4).
+-define(ACTIVITY_TIMEOUT, 5).
+-define(ACTIVITY, 6).
+
+
+-define(COUCH_JOBS_MD_VERSION, <<"couch_jobs_md_version">>).
+-define(COUCH_JOBS_EVENT, '$couch_jobs_event').
+-define(COUCH_JOBS_CURRENT, '$couch_jobs_current').
+-define(UNDEFINED_MAX_SCHEDULED_TIME, 1 bsl 36).
+
+
+-type jtx() :: map() | undefined | tuple().
+-type job_id() :: binary().
+-type job_type() :: tuple() | binary() | non_neg_integer().
+-type job() :: map().
+-type job_data() :: map() | undefined.
+-type job_accept_opts() :: map().
+-type scheduled_time() :: non_neg_integer() | undefined.
+-type job_state() :: running | pending | finished.
+-type job_subscription() :: {pid(), reference()}.
diff --git a/src/couch_jobs/src/couch_jobs_activity_monitor.erl b/src/couch_jobs/src/couch_jobs_activity_monitor.erl
new file mode 100644
index 000000000..9802f5798
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_activity_monitor.erl
@@ -0,0 +1,140 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_activity_monitor).
+
+-behaviour(gen_server).
+
+
+-export([
+ start_link/1
+]).
+
+-export([
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3
+]).
+
+-record(st, {
+ jtx,
+ type,
+ tref,
+ timeout = 0,
+ vs = not_found
+}).
+
+
+-define(MAX_JITTER_DEFAULT, 10000).
+-define(MISSING_TIMEOUT_CHECK, 5000).
+
+
+start_link(Type) ->
+ gen_server:start_link(?MODULE, [Type], []).
+
+
+%% gen_server callbacks
+
+init([Type]) ->
+ St = #st{jtx = couch_jobs_fdb:get_jtx(), type = Type},
+ {ok, schedule_check(St)}.
+
+
+terminate(_, _St) ->
+ ok.
+
+
+handle_call(Msg, _From, St) ->
+ {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(Msg, St) ->
+ {stop, {bad_cast, Msg}, St}.
+
+
+handle_info(check_activity, St) ->
+ St1 = try
+ check_activity(St)
+ catch
+ error:{erlfdb_error, Err} when Err =:= 1020 orelse Err =:= 1031 ->
+ LogMsg = "~p : type:~p got ~p error, possibly from overload",
+ couch_log:error(LogMsg, [?MODULE, St#st.type, Err]),
+ St
+ end,
+ St2 = schedule_check(St1),
+ {noreply, St2};
+
+handle_info({Ref, ready}, St) when is_reference(Ref) ->
+ % Don't crash out couch_jobs_server and the whole application would need to
+ % eventually do proper cleanup in erlfdb:wait timeout code.
+ LogMsg = "~p : spurious erlfdb future ready message ~p",
+ couch_log:error(LogMsg, [?MODULE, Ref]),
+ {noreply, St};
+
+handle_info(Msg, St) ->
+ {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+ {ok, St}.
+
+
+% Private helper functions
+
+check_activity(#st{jtx = JTx, type = Type, vs = not_found} = St) ->
+ NewVS = couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+ couch_jobs_fdb:get_activity_vs(JTx1, Type)
+ end),
+ St#st{vs = NewVS};
+
+check_activity(#st{jtx = JTx, type = Type, vs = VS} = St) ->
+ NewVS = couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+ NewVS = couch_jobs_fdb:get_activity_vs(JTx1, Type),
+ JobIds = couch_jobs_fdb:get_inactive_since(JTx1, Type, VS),
+ couch_jobs_fdb:re_enqueue_inactive(JTx1, Type, JobIds),
+ NewVS
+ end),
+ St#st{vs = NewVS}.
+
+
+get_timeout_msec(JTx, Type) ->
+ TimeoutVal = couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+ couch_jobs_fdb:get_type_timeout(JTx1, Type)
+ end),
+ case TimeoutVal of
+ not_found -> not_found;
+ ValSeconds -> timer:seconds(ValSeconds)
+ end.
+
+
+schedule_check(#st{jtx = JTx, type = Type, timeout = OldTimeout} = St) ->
+ % Reset versionstamp if timeout changed.
+ St1 = case get_timeout_msec(JTx, Type) of
+ not_found ->
+ St#st{vs = not_found, timeout = ?MISSING_TIMEOUT_CHECK};
+ OldTimeout ->
+ St;
+ NewTimeout ->
+ St#st{vs = not_found, timeout = NewTimeout}
+ end,
+ #st{timeout = Timeout} = St1,
+ MaxJitter = min(Timeout div 2, get_max_jitter_msec()),
+ Wait = Timeout + rand:uniform(max(1, MaxJitter)),
+ St1#st{tref = erlang:send_after(Wait, self(), check_activity)}.
+
+
+get_max_jitter_msec()->
+ config:get_integer("couch_jobs", "activity_monitor_max_jitter_msec",
+ ?MAX_JITTER_DEFAULT).
diff --git a/src/couch_jobs/src/couch_jobs_activity_monitor_sup.erl b/src/couch_jobs/src/couch_jobs_activity_monitor_sup.erl
new file mode 100644
index 000000000..b11161a24
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_activity_monitor_sup.erl
@@ -0,0 +1,64 @@
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_activity_monitor_sup).
+
+
+-behaviour(supervisor).
+
+
+-export([
+ start_link/0,
+
+ start_monitor/1,
+ stop_monitor/1,
+ get_child_pids/0
+]).
+
+-export([
+ init/1
+]).
+
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+
+start_monitor(Type) ->
+ supervisor:start_child(?MODULE, [Type]).
+
+
+stop_monitor(Pid) ->
+ supervisor:terminate_child(?MODULE, Pid).
+
+
+get_child_pids() ->
+ lists:map(fun({_Id, Pid, _Type, _Mod}) ->
+ Pid
+ end, supervisor:which_children(?MODULE)).
+
+
+init(_) ->
+ Flags = #{
+ strategy => simple_one_for_one,
+ intensity => 10,
+ period => 3
+ },
+ Children = [
+ #{
+ id => couch_jobs_monitor,
+ restart => temporary,
+ start => {couch_jobs_activity_monitor, start_link, []}
+ }
+ ],
+ {ok, {Flags, Children}}.
diff --git a/src/couch_jobs/src/couch_jobs_app.erl b/src/couch_jobs/src/couch_jobs_app.erl
new file mode 100644
index 000000000..720b94891
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_app.erl
@@ -0,0 +1,26 @@
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_app).
+
+
+-behaviour(application).
+
+
+-export([
+ start/2,
+ stop/1
+]).
+
+
+start(_Type, []) ->
+ couch_jobs_sup:start_link().
+
+
+stop([]) ->
+ ok.
diff --git a/src/couch_jobs/src/couch_jobs_fdb.erl b/src/couch_jobs/src/couch_jobs_fdb.erl
new file mode 100644
index 000000000..3fcad554a
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_fdb.erl
@@ -0,0 +1,725 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_fdb).
+
+
+-export([
+ add/5,
+ remove/2,
+ get_job_state_and_data/2,
+ get_jobs/2,
+ get_jobs/3,
+
+ accept/4,
+ finish/3,
+ resubmit/3,
+ resubmit/4,
+ update/3,
+
+ set_type_timeout/3,
+ clear_type_timeout/2,
+ get_type_timeout/2,
+ get_types/1,
+
+ get_activity_vs/2,
+ get_activity_vs_and_watch/2,
+ get_active_since/3,
+ get_inactive_since/3,
+ re_enqueue_inactive/3,
+
+ init_cache/0,
+
+ encode_data/1,
+ decode_data/1,
+
+ get_jtx/0,
+ get_jtx/1,
+ tx/2,
+
+ get_job/2,
+ get_jobs/0,
+
+ bump_metadata_version/0,
+ bump_metadata_version/1
+]).
+
+
+-include("couch_jobs.hrl").
+
+
+-record(jv, {
+ seq,
+ jlock,
+ stime,
+ resubmit,
+ data
+}).
+
+
+-define(JOBS_ETS_KEY, jobs).
+-define(MD_TIMESTAMP_ETS_KEY, md_timestamp).
+-define(MD_VERSION_MAX_AGE_SEC, 10).
+-define(PENDING_SEQ, 0).
+
+
+% Data model
+%
+% (?JOBS, ?DATA, Type, JobId) = (Sequence, Lock, SchedTime, Resubmit, JobData)
+% (?JOBS, ?PENDING, Type, ScheduledTime, JobId) = ""
+% (?JOBS, ?WATCHES_PENDING, Type) = Counter
+% (?JOBS, ?WATCHES_ACTIVITY, Type) = Sequence
+% (?JOBS, ?ACTIVITY_TIMEOUT, Type) = ActivityTimeout
+% (?JOBS, ?ACTIVITY, Type, Sequence) = JobId
+%
+% In the ?DATA row Sequence can have these values:
+% 0 - when the job is pending
+% null - when the job is finished
+% Versionstamp - when the job is running
+
+
+% Job creation API
+
+add(#{jtx := true} = JTx0, Type, JobId, Data, STime) ->
+ #{tx := Tx} = JTx = get_jtx(JTx0),
+ Job = #{job => true, type => Type, id => JobId},
+ case get_type_timeout(JTx, Type) of
+ not_found ->
+ {error, no_type_timeout};
+ Int when is_integer(Int) ->
+ Key = job_key(JTx, Job),
+ case erlfdb:wait(erlfdb:get(Tx, Key)) of
+ <<_/binary>> ->
+ {ok, Job1} = resubmit(JTx, Job, STime, Data),
+ #{seq := Seq, state := State, data := Data1} = Job1,
+ {ok, State, Seq, Data1};
+ not_found ->
+ try
+ maybe_enqueue(JTx, Type, JobId, STime, true, Data),
+ {ok, pending, ?PENDING_SEQ, Data}
+ catch
+ error:{json_encoding_error, Error} ->
+ {error, {json_encoding_error, Error}}
+ end
+ end
+ end.
+
+
+remove(#{jtx := true} = JTx0, #{job := true} = Job) ->
+ #{tx := Tx} = JTx = get_jtx(JTx0),
+ #{type := Type, id := JobId} = Job,
+ Key = job_key(JTx, Job),
+ case get_job_val(Tx, Key) of
+ #jv{stime = STime, seq = Seq} ->
+ couch_jobs_pending:remove(JTx, Type, JobId, STime),
+ clear_activity(JTx, Type, Seq),
+ erlfdb:clear(Tx, Key),
+ update_watch(JTx, Type),
+ ok;
+ not_found ->
+ {error, not_found}
+ end.
+
+
+get_job_state_and_data(#{jtx := true} = JTx, #{job := true} = Job) ->
+ case get_job_val(get_jtx(JTx), Job) of
+ #jv{seq = Seq, jlock = JLock, data = Data} ->
+ {ok, Seq, job_state(JLock, Seq), Data};
+ not_found ->
+ {error, not_found}
+ end.
+
+
+get_jobs(JTx, Type) ->
+ get_jobs(JTx, Type, fun(_) -> true end).
+
+
+get_jobs(#{jtx := true} = JTx, Type, Filter) when is_function(Filter, 1) ->
+ #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+ Prefix = erlfdb_tuple:pack({?DATA, Type}, Jobs),
+ Opts = [{streaming_mode, want_all}],
+ Result = erlfdb:wait(erlfdb:get_range_startswith(Tx, Prefix, Opts)),
+ lists:foldl(fun({K, V}, #{} = Acc) ->
+ {JobId} = erlfdb_tuple:unpack(K, Prefix),
+ case Filter(JobId) of
+ true ->
+ {Seq, JLock, _, _, Data} = erlfdb_tuple:unpack(V),
+ Acc#{JobId => {Seq, job_state(JLock, Seq), Data}};
+ false ->
+ Acc
+ end
+ end, #{}, Result).
+
+
+% Job processor API
+
+accept(#{jtx := true} = JTx0, Type, MaxSTime, NoSched)
+ when is_integer(MaxSTime), is_boolean(NoSched) ->
+ #{jtx := true, tx := Tx} = JTx = get_jtx(JTx0),
+ case couch_jobs_pending:dequeue(JTx, Type, MaxSTime, NoSched) of
+ {not_found, PendingWatch} ->
+ {not_found, PendingWatch};
+ {ok, JobId} ->
+ JLock = fabric2_util:uuid(),
+ Key = job_key(JTx, Type, JobId),
+ JV0 = get_job_val(Tx, Key),
+ #jv{jlock = null, data = Data} = JV0,
+ JV = JV0#jv{seq = ?UNSET_VS, jlock = JLock, resubmit = false},
+ set_job_val(Tx, Key, JV),
+ update_activity(JTx, Type, JobId, null, Data),
+ Job = #{
+ job => true,
+ type => Type,
+ id => JobId,
+ jlock => JLock
+ },
+ {ok, Job, decode_data(Data)}
+ end.
+
+
+finish(#{jtx := true} = JTx0, #{jlock := <<_/binary>>} = Job, Data) when
+ is_map(Data) orelse Data =:= undefined ->
+ #{tx := Tx} = JTx = get_jtx(JTx0),
+ #{type := Type, jlock := JLock, id := JobId} = Job,
+ case get_job_or_halt(Tx, job_key(JTx, Job), JLock) of
+ #jv{seq = Seq, stime = STime, resubmit = Resubmit, data = OldData} ->
+ NewData = case Data =:= undefined of
+ true -> OldData;
+ false -> Data
+ end,
+ try maybe_enqueue(JTx, Type, JobId, STime, Resubmit, NewData) of
+ ok ->
+ clear_activity(JTx, Type, Seq),
+ update_watch(JTx, Type)
+ catch
+ error:{json_encoding_error, Error} ->
+ {error, {json_encoding_error, Error}}
+ end;
+ halt ->
+ {error, halt}
+ end.
+
+resubmit(JTx0, Job, NewSTime) ->
+ resubmit(JTx0, Job, NewSTime, undefined).
+
+
+resubmit(#{jtx := true} = JTx0, #{job := true} = Job, NewSTime, NewData) ->
+ #{tx := Tx} = JTx = get_jtx(JTx0),
+ #{type := Type, id := JobId} = Job,
+ Key = job_key(JTx, Job),
+ case get_job_val(Tx, Key) of
+ #jv{seq = Seq, jlock = JLock, stime = OldSTime, data = Data} = JV ->
+ STime = case NewSTime =:= undefined of
+ true -> OldSTime;
+ false -> NewSTime
+ end,
+ case job_state(JLock, Seq) of
+ finished ->
+ ok = maybe_enqueue(JTx, Type, JobId, STime, true, NewData),
+ NewData1 = update_job_data(Data, NewData),
+ Job1 = Job#{
+ seq => ?PENDING_SEQ,
+ state => pending,
+ data => NewData1
+ },
+ {ok, Job1};
+ pending when STime == OldSTime ->
+ % If pending and scheduled time doesn't change avoid generating
+ % un-necessary writes by removing and re-adding the jobs into the
+ % pending queue.
+ Job1 = Job#{
+ stime => STime,
+ seq => ?PENDING_SEQ,
+ state => pending,
+ data => Data
+ },
+ {ok, Job1};
+ pending ->
+ JV1 = JV#jv{seq = ?PENDING_SEQ, stime = STime, data = NewData},
+ set_job_val(Tx, Key, JV1),
+ couch_jobs_pending:remove(JTx, Type, JobId, OldSTime),
+ couch_jobs_pending:enqueue(JTx, Type, STime, JobId),
+ NewData1 = update_job_data(Data, NewData),
+ Job1 = Job#{
+ stime => STime,
+ seq => ?PENDING_SEQ,
+ state => pending,
+ data => NewData1
+ },
+ {ok, Job1};
+ running ->
+ JV1 = JV#jv{stime = STime, resubmit = true},
+ set_job_val(Tx, Key, JV1),
+ {ok, Job#{resubmit => true, stime => STime,
+ state => running, seq => Seq, data => Data}}
+ end;
+ not_found ->
+ {error, not_found}
+ end.
+
+
+update(#{jtx := true} = JTx0, #{jlock := <<_/binary>>} = Job, Data0) when
+ is_map(Data0) orelse Data0 =:= undefined ->
+ #{tx := Tx} = JTx = get_jtx(JTx0),
+ #{jlock := JLock, type := Type, id := JobId} = Job,
+ Key = job_key(JTx, Job),
+ case get_job_or_halt(Tx, Key, JLock) of
+ #jv{seq = Seq, stime = STime, resubmit = Resubmit} = JV0 ->
+ Data = case Data0 =:= undefined of
+ true -> JV0#jv.data;
+ false -> Data0
+ end,
+ JV = JV0#jv{seq = ?UNSET_VS, data = Data},
+ try set_job_val(Tx, Key, JV) of
+ ok ->
+ update_activity(JTx, Type, JobId, Seq, Data),
+ {ok, Job#{resubmit => Resubmit, stime => STime}}
+ catch
+ error:{json_encoding_error, Error} ->
+ {error, {json_encoding_error, Error}}
+ end;
+ halt ->
+ {error, halt}
+ end.
+
+
+% Type and activity monitoring API
+
+set_type_timeout(#{jtx := true} = JTx, Type, Timeout) ->
+ #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+ Key = erlfdb_tuple:pack({?ACTIVITY_TIMEOUT, Type}, Jobs),
+ Val = erlfdb_tuple:pack({Timeout}),
+ erlfdb:set(Tx, Key, Val).
+
+
+clear_type_timeout(#{jtx := true} = JTx, Type) ->
+ #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+ Key = erlfdb_tuple:pack({?ACTIVITY_TIMEOUT, Type}, Jobs),
+ erlfdb:clear(Tx, Key).
+
+
+get_type_timeout(#{jtx := true} = JTx, Type) ->
+ #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+ Key = erlfdb_tuple:pack({?ACTIVITY_TIMEOUT, Type}, Jobs),
+ case erlfdb:wait(erlfdb:get_ss(Tx, Key)) of
+ not_found ->
+ not_found;
+ Val ->
+ {Timeout} = erlfdb_tuple:unpack(Val),
+ Timeout
+ end.
+
+
+get_types(#{jtx := true} = JTx) ->
+ #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+ Prefix = erlfdb_tuple:pack({?ACTIVITY_TIMEOUT}, Jobs),
+ Opts = [{streaming_mode, want_all}],
+ Result = erlfdb:wait(erlfdb:get_range_startswith(Tx, Prefix, Opts)),
+ lists:map(fun({K, _V}) ->
+ {Type} = erlfdb_tuple:unpack(K, Prefix),
+ Type
+ end, Result).
+
+
+get_activity_vs(#{jtx := true} = JTx, Type) ->
+ #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+ Key = erlfdb_tuple:pack({?WATCHES_ACTIVITY, Type}, Jobs),
+ case erlfdb:wait(erlfdb:get(Tx, Key)) of
+ not_found ->
+ not_found;
+ Val ->
+ {VS} = erlfdb_tuple:unpack(Val),
+ VS
+ end.
+
+
+get_activity_vs_and_watch(#{jtx := true} = JTx, Type) ->
+ #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+ Key = erlfdb_tuple:pack({?WATCHES_ACTIVITY, Type}, Jobs),
+ Future = erlfdb:get(Tx, Key),
+ Watch = erlfdb:watch(Tx, Key),
+ case erlfdb:wait(Future) of
+ not_found ->
+ {not_found, Watch};
+ Val ->
+ {VS} = erlfdb_tuple:unpack(Val),
+ {VS, Watch}
+ end.
+
+
+get_active_since(#{jtx := true} = JTx, Type, Versionstamp) ->
+ #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+ Prefix = erlfdb_tuple:pack({?ACTIVITY}, Jobs),
+ StartKey = erlfdb_tuple:pack({Type, Versionstamp}, Prefix),
+ StartKeySel = erlfdb_key:first_greater_or_equal(StartKey),
+ {_, EndKey} = erlfdb_tuple:range({Type}, Prefix),
+ Opts = [{streaming_mode, want_all}],
+ Future = erlfdb:get_range(Tx, StartKeySel, EndKey, Opts),
+ maps:from_list(lists:map(fun({_K, V}) ->
+ erlfdb_tuple:unpack(V)
+ end, erlfdb:wait(Future))).
+
+
+get_inactive_since(#{jtx := true} = JTx, Type, Versionstamp) ->
+ #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+ Prefix = erlfdb_tuple:pack({?ACTIVITY}, Jobs),
+ {StartKey, _} = erlfdb_tuple:range({Type}, Prefix),
+ EndKey = erlfdb_tuple:pack({Type, Versionstamp}, Prefix),
+ EndKeySel = erlfdb_key:first_greater_than(EndKey),
+ Opts = [{streaming_mode, want_all}],
+ Future = erlfdb:get_range(Tx, StartKey, EndKeySel, Opts),
+ lists:map(fun({_K, V}) ->
+ {JobId, _} = erlfdb_tuple:unpack(V),
+ JobId
+ end, erlfdb:wait(Future)).
+
+
+re_enqueue_inactive(#{jtx := true} = JTx, Type, JobIds) when is_list(JobIds) ->
+ #{tx := Tx} = get_jtx(JTx),
+ lists:foreach(fun(JobId) ->
+ case get_job_val(Tx, job_key(JTx, Type, JobId)) of
+ #jv{seq = Seq, stime = STime, data = Data} ->
+ clear_activity(JTx, Type, Seq),
+ maybe_enqueue(JTx, Type, JobId, STime, true, Data);
+ not_found ->
+ ok
+ end
+ end, JobIds),
+ case length(JobIds) > 0 of
+ true -> update_watch(JTx, Type);
+ false -> ok
+ end.
+
+
+% Cache initialization API. Called from the supervisor just to create the ETS
+% table. It returns `ignore` to tell supervisor it won't actually start any
+% process, which is what we want here.
+%
+init_cache() ->
+ ConcurrencyOpts = [{read_concurrency, true}, {write_concurrency, true}],
+ ets:new(?MODULE, [public, named_table] ++ ConcurrencyOpts),
+ ignore.
+
+
+% Functions to encode / decode JobData
+%
+encode_data(#{} = JobData) ->
+ try
+ jiffy:encode(JobData)
+ catch
+ throw:{error, Error} ->
+ % legacy clause since new versions of jiffy raise error instead
+ error({json_encoding_error, Error});
+ error:Error ->
+ error({json_encoding_error, Error})
+ end.
+
+
+decode_data(not_found) ->
+ not_found;
+
+decode_data(#{} = JobData) ->
+ JobData;
+
+decode_data(<<_/binary>> = JobData) ->
+ jiffy:decode(JobData, [return_maps]).
+
+
+% Cached job transaction object. This object wraps a transaction, caches the
+% directory lookup path, and the metadata version. The function can be used
+% from inside or outside the transaction. When used from a transaction it will
+% verify if the metadata was changed, and will refresh automatically.
+%
+get_jtx() ->
+ get_jtx(undefined).
+
+
+get_jtx(#{tx := Tx} = _TxDb) ->
+ get_jtx(Tx);
+
+get_jtx(undefined = _Tx) ->
+ case ets:lookup(?MODULE, ?JOBS_ETS_KEY) of
+ [{_, #{} = JTx}] ->
+ JTx;
+ [] ->
+ JTx = update_jtx_cache(init_jtx(undefined)),
+ JTx#{tx := undefined}
+ end;
+
+get_jtx({erlfdb_transaction, _} = Tx) ->
+ case ets:lookup(?MODULE, ?JOBS_ETS_KEY) of
+ [{_, #{} = JTx}] ->
+ ensure_current(JTx#{tx := Tx});
+ [] ->
+ update_jtx_cache(init_jtx(Tx))
+ end.
+
+
+% Transaction processing to be used with couch jobs' specific transaction
+% contexts
+%
+tx(#{jtx := true} = JTx, Fun) when is_function(Fun, 1) ->
+ fabric2_fdb:transactional(JTx, Fun).
+
+
+% Debug and testing API
+
+get_job(Type, JobId) ->
+ fabric2_fdb:transactional(fun(Tx) ->
+ JTx = init_jtx(Tx),
+ case get_job_val(Tx, job_key(JTx, Type, JobId)) of
+ #jv{seq = Seq, jlock = JLock} = JV ->
+ #{
+ job => true,
+ type => Type,
+ id => JobId,
+ seq => Seq,
+ jlock => JLock,
+ stime => JV#jv.stime,
+ resubmit => JV#jv.resubmit,
+ data => decode_data(JV#jv.data),
+ state => job_state(JLock, Seq)
+ };
+ not_found ->
+ not_found
+ end
+ end).
+
+
+get_jobs() ->
+ fabric2_fdb:transactional(fun(Tx) ->
+ #{jobs_path := Jobs} = init_jtx(Tx),
+ Prefix = erlfdb_tuple:pack({?DATA}, Jobs),
+ Opts = [{streaming_mode, want_all}],
+ Result = erlfdb:wait(erlfdb:get_range_startswith(Tx, Prefix, Opts)),
+ lists:map(fun({K, V}) ->
+ {Type, JobId} = erlfdb_tuple:unpack(K, Prefix),
+ {Seq, JLock, _, _, Data} = erlfdb_tuple:unpack(V),
+ JobState = job_state(JLock, Seq),
+ {Type, JobId, JobState, decode_data(Data)}
+ end, Result)
+ end).
+
+
+% Call this function if the top level "couchdb" FDB directory layer
+% changes.
+%
+bump_metadata_version() ->
+ fabric2_fdb:transactional(fun(Tx) ->
+ bump_metadata_version(Tx)
+ end).
+
+
+bump_metadata_version(Tx) ->
+ erlfdb:set_versionstamped_value(Tx, ?COUCH_JOBS_MD_VERSION, <<0:112>>).
+
+
+% Private helper functions
+
+maybe_enqueue(#{jtx := true} = JTx, Type, JobId, STime, Resubmit, Data) ->
+ #{tx := Tx} = JTx,
+ Key = job_key(JTx, Type, JobId),
+ JV = #jv{
+ seq = null,
+ jlock = null,
+ stime = STime,
+ resubmit = false,
+ data = Data
+ },
+ case Resubmit of
+ true ->
+ set_job_val(Tx, Key, JV#jv{seq = ?PENDING_SEQ}),
+ couch_jobs_pending:enqueue(JTx, Type, STime, JobId);
+ false ->
+ set_job_val(Tx, Key, JV)
+ end,
+ ok.
+
+
+job_key(#{jtx := true, jobs_path := Jobs}, Type, JobId) ->
+ erlfdb_tuple:pack({?DATA, Type, JobId}, Jobs).
+
+
+job_key(JTx, #{type := Type, id := JobId}) ->
+ job_key(JTx, Type, JobId).
+
+
+get_job_val(#{jtx := true, tx := Tx} = JTx, #{job := true} = Job) ->
+ get_job_val(Tx, job_key(JTx, Job));
+
+get_job_val(Tx = {erlfdb_transaction, _}, Key) ->
+ case erlfdb:wait(erlfdb:get(Tx, Key)) of
+ <<_/binary>> = Val ->
+ {Seq, JLock, STime, Resubmit, Data} = erlfdb_tuple:unpack(Val),
+ #jv{
+ seq = Seq,
+ jlock = JLock,
+ stime = STime,
+ resubmit = Resubmit,
+ data = Data
+ };
+ not_found ->
+ not_found
+ end.
+
+
+set_job_val(Tx = {erlfdb_transaction, _}, Key, #jv{} = JV) ->
+ #jv{
+ seq = Seq,
+ jlock = JLock,
+ stime = STime,
+ resubmit = Resubmit,
+ data = Data0
+ } = JV,
+ Data = case Data0 of
+ #{} -> encode_data(Data0);
+ <<_/binary>> -> Data0
+ end,
+ case Seq of
+ ?UNSET_VS ->
+ Val = erlfdb_tuple:pack_vs({Seq, JLock, STime, Resubmit, Data}),
+ erlfdb:set_versionstamped_value(Tx, Key, Val);
+ _Other ->
+ Val = erlfdb_tuple:pack({Seq, JLock, STime, Resubmit, Data}),
+ erlfdb:set(Tx, Key, Val)
+ end,
+ ok.
+
+
+get_job_or_halt(Tx, Key, JLock) ->
+ case get_job_val(Tx, Key) of
+ #jv{jlock = CurJLock} when CurJLock =/= JLock ->
+ halt;
+ #jv{} = Res ->
+ Res;
+ not_found ->
+ halt
+ end.
+
+
+update_activity(#{jtx := true} = JTx, Type, JobId, Seq, Data0) ->
+ #{tx := Tx, jobs_path := Jobs} = JTx,
+ case Seq =/= null of
+ true -> clear_activity(JTx, Type, Seq);
+ false -> ok
+ end,
+ Key = erlfdb_tuple:pack_vs({?ACTIVITY, Type, ?UNSET_VS}, Jobs),
+ Data = case Data0 of
+ #{} -> encode_data(Data0);
+ <<_/binary>> -> Data0
+ end,
+ Val = erlfdb_tuple:pack({JobId, Data}),
+ erlfdb:set_versionstamped_key(Tx, Key, Val),
+ update_watch(JTx, Type).
+
+
+clear_activity(#{jtx := true} = JTx, Type, Seq) ->
+ #{tx := Tx, jobs_path := Jobs} = JTx,
+ Key = erlfdb_tuple:pack({?ACTIVITY, Type, Seq}, Jobs),
+ erlfdb:clear(Tx, Key).
+
+
+update_watch(#{jtx := true} = JTx, Type) ->
+ #{tx := Tx, jobs_path := Jobs} = JTx,
+ Key = erlfdb_tuple:pack({?WATCHES_ACTIVITY, Type}, Jobs),
+ Val = erlfdb_tuple:pack_vs({?UNSET_VS}),
+ erlfdb:set_versionstamped_value(Tx, Key, Val),
+ ok.
+
+
+job_state(JLock, Seq) ->
+ case {JLock, Seq} of
+ {null, null} -> finished;
+ {JLock, _} when JLock =/= null -> running;
+ {null, Seq} when Seq =/= null -> pending
+ end.
+
+
+% This a transaction context object similar to the Db = #{} one from
+% fabric2_fdb. It's is used to cache the jobs path directory (to avoid extra
+% lookups on every operation) and to check for metadata changes (in case
+% directory changes).
+%
+init_jtx(undefined) ->
+ fabric2_fdb:transactional(fun(Tx) -> init_jtx(Tx) end);
+
+init_jtx({erlfdb_transaction, _} = Tx) ->
+ LayerPrefix = fabric2_fdb:get_dir(Tx),
+ Jobs = erlfdb_tuple:pack({?JOBS}, LayerPrefix),
+ % layer_prefix, md_version and tx here match db map fields in fabric2_fdb
+ % but we also assert that this is a job transaction using the jtx => true
+ % field
+ #{
+ jtx => true,
+ tx => Tx,
+ layer_prefix => LayerPrefix,
+ jobs_path => Jobs,
+ md_version => get_metadata_version(Tx)
+ }.
+
+
+ensure_current(#{jtx := true, tx := Tx} = JTx) ->
+ case get(?COUCH_JOBS_CURRENT) of
+ Tx ->
+ JTx;
+ _ ->
+ JTx1 = update_current(JTx),
+ put(?COUCH_JOBS_CURRENT, Tx),
+ JTx1
+ end.
+
+
+get_metadata_version({erlfdb_transaction, _} = Tx) ->
+ erlfdb:wait(erlfdb:get_ss(Tx, ?COUCH_JOBS_MD_VERSION)).
+
+
+update_current(#{tx := Tx, md_version := Version} = JTx) ->
+ case get_md_version_age(Version) of
+ Age when Age =< ?MD_VERSION_MAX_AGE_SEC ->
+ % Looked it up not too long ago. Avoid looking it up to frequently
+ JTx;
+ _ ->
+ case get_metadata_version(Tx) of
+ Version ->
+ update_md_version_timestamp(Version),
+ JTx;
+ _NewVersion ->
+ update_jtx_cache(init_jtx(Tx))
+ end
+ end.
+
+
+update_jtx_cache(#{jtx := true, md_version := Version} = JTx) ->
+ CachedJTx = JTx#{tx := undefined},
+ ets:insert(?MODULE, {?JOBS_ETS_KEY, CachedJTx}),
+ update_md_version_timestamp(Version),
+ JTx.
+
+
+get_md_version_age(Version) ->
+ Timestamp = case ets:lookup(?MODULE, ?MD_TIMESTAMP_ETS_KEY) of
+ [{_, Version, Ts}] -> Ts;
+ _ -> 0
+ end,
+ erlang:system_time(second) - Timestamp.
+
+
+update_md_version_timestamp(Version) ->
+ Ts = erlang:system_time(second),
+ ets:insert(?MODULE, {?MD_TIMESTAMP_ETS_KEY, Version, Ts}).
+
+
+update_job_data(Data, undefined) ->
+ Data;
+
+update_job_data(_Data, NewData) ->
+ NewData.
diff --git a/src/couch_jobs/src/couch_jobs_notifier.erl b/src/couch_jobs/src/couch_jobs_notifier.erl
new file mode 100644
index 000000000..99581cb79
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_notifier.erl
@@ -0,0 +1,314 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_notifier).
+
+-behaviour(gen_server).
+
+
+-export([
+ start_link/1,
+ subscribe/4,
+ unsubscribe/2
+]).
+
+-export([
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3,
+ format_status/2
+]).
+
+
+-include("couch_jobs.hrl").
+
+
+-define(TYPE_MONITOR_HOLDOFF_DEFAULT, 50).
+-define(TYPE_MONITOR_TIMEOUT_DEFAULT, "infinity").
+-define(GET_JOBS_RANGE_RATIO, 0.5).
+
+
+-record(st, {
+ jtx,
+ type,
+ monitor_pid,
+ subs, % #{JobId => #{Ref => {Pid, State, Seq}}}
+ pidmap, % #{{Jobid, Pid} => Ref}
+ refmap % #{Ref => JobId}
+}).
+
+
+start_link(Type) ->
+ gen_server:start_link(?MODULE, [Type], []).
+
+
+subscribe(Type, JobId, State, Seq) ->
+ case couch_jobs_server:get_notifier_server(Type) of
+ {ok, Server} ->
+ CallArgs = {subscribe, JobId, State, Seq, self()},
+ Ref = gen_server:call(Server, CallArgs, infinity),
+ {ok, {Server, Ref}};
+ {error, Error} ->
+ {error, Error}
+ end.
+
+
+unsubscribe(Server, Ref) when is_reference(Ref) ->
+ gen_server:call(Server, {unsubscribe, Ref, self()}, infinity).
+
+
+init([Type]) ->
+ JTx = couch_jobs_fdb:get_jtx(),
+ St = #st{
+ jtx = JTx,
+ type = Type,
+ subs = #{},
+ pidmap = #{},
+ refmap = #{}
+ },
+ VS = get_type_vs(St),
+ HoldOff = get_holdoff(),
+ Timeout = get_timeout(),
+ Pid = couch_jobs_type_monitor:start(Type, VS, HoldOff, Timeout),
+ {ok, St#st{monitor_pid = Pid}}.
+
+
+terminate(_, _St) ->
+ ok.
+
+
+handle_call({subscribe, JobId, State, Seq, Pid}, _From, #st{} = St) ->
+ #st{pidmap = PidMap, refmap = RefMap} = St,
+ case maps:get({JobId, Pid}, PidMap, not_found) of
+ not_found ->
+ Ref = erlang:monitor(process, Pid),
+ St1 = update_sub(JobId, Ref, Pid, State, Seq, St),
+ St2 = St1#st{pidmap = PidMap#{{JobId, Pid} => Ref}},
+ St3 = St2#st{refmap = RefMap#{Ref => JobId}},
+ {reply, Ref, St3};
+ Ref when is_reference(Ref) ->
+ St1 = update_sub(JobId, Ref, Pid, State, Seq, St),
+ {reply, Ref, St1}
+ end;
+
+handle_call({unsubscribe, Ref, Pid}, _From, #st{} = St) ->
+ {reply, ok, unsubscribe_int(Ref, Pid, St)};
+
+handle_call(Msg, _From, St) ->
+ {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(Msg, St) ->
+ {stop, {bad_cast, Msg}, St}.
+
+
+handle_info({type_updated, VS}, St) ->
+ VSMax = flush_type_updated_messages(VS),
+ {noreply, try_notify_subscribers(VSMax, St)};
+
+handle_info({Ref, ready}, St) when is_reference(Ref) ->
+ % Don't crash out couch_jobs_server and the whole application would need to
+ % eventually do proper cleanup in erlfdb:wait timeout code.
+ LogMsg = "~p : spurious erlfdb future ready message ~p",
+ couch_log:error(LogMsg, [?MODULE, Ref]),
+ {noreply, St};
+
+handle_info({'DOWN', Ref, process, Pid, _}, #st{} = St) ->
+ {noreply, unsubscribe_int(Ref, Pid, St)};
+
+handle_info(Msg, St) ->
+ {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+ {ok, St}.
+
+
+format_status(_Opt, [_PDict, State]) ->
+ #st{
+ jtx=JTx,
+ type=Type,
+ monitor_pid=MonitorPid,
+ subs=Subs,
+ pidmap=PidMap,
+ refmap=RefMap
+ } = State,
+ [{data, [{"State", [
+ {jtx, JTx},
+ {type, Type},
+ {monitor_pid, MonitorPid},
+ {subs, {map_size, maps:size(Subs)}},
+ {pidmap, {map_size, maps:size(PidMap)}},
+ {refmap, {map_size, maps:size(RefMap)}}
+ ]}]}].
+
+
+update_subs(JobId, Refs, #st{subs = Subs} = St) when map_size(Refs) =:= 0 ->
+ St#st{subs = maps:remove(JobId, Subs)};
+
+update_subs(JobId, Refs, #st{subs = Subs} = St) when map_size(Refs) > 0 ->
+ St#st{subs = Subs#{JobId => Refs}}.
+
+
+update_sub(JobId, Ref, Pid, State, Seq, #st{subs = Subs} = St) ->
+ Refs = maps:get(JobId, Subs, #{}),
+ update_subs(JobId, Refs#{Ref => {Pid, State, Seq}}, St).
+
+
+remove_sub(JobId, Ref, #st{subs = Subs} = St) ->
+ case maps:get(JobId, Subs, not_found) of
+ not_found -> St;
+ #{} = Refs -> update_subs(JobId, maps:remove(Ref, Refs), St)
+ end.
+
+
+unsubscribe_int(Id, Ref, Pid, #st{pidmap = PidMap, refmap = RefMap} = St) ->
+ St1 = remove_sub(Id, Ref, St),
+ erlang:demonitor(Ref, [flush]),
+ St1#st{
+ pidmap = maps:remove({Id, Pid}, PidMap),
+ refmap = maps:remove(Ref, RefMap)
+ }.
+
+
+unsubscribe_int(Ref, Pid, #st{refmap = RefMap} = St) ->
+ case maps:get(Ref, RefMap, not_found) of
+ not_found -> St;
+ Id -> unsubscribe_int(Id, Ref, Pid, St)
+ end.
+
+
+flush_type_updated_messages(VSMax) ->
+ receive
+ {type_updated, VS} ->
+ flush_type_updated_messages(max(VS, VSMax))
+ after
+ 0 -> VSMax
+ end.
+
+
+get_jobs(#st{jtx = JTx, type = Type}, InactiveIdMap, Ratio)
+ when Ratio >= ?GET_JOBS_RANGE_RATIO ->
+ Filter = fun(JobId) -> maps:is_key(JobId, InactiveIdMap) end,
+ JobMap = couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+ couch_jobs_fdb:get_jobs(JTx1, Type, Filter)
+ end),
+ maps:map(fun(JobId, _) ->
+ case maps:is_key(JobId, JobMap) of
+ true -> maps:get(JobId, JobMap);
+ false -> {null, not_found, not_found}
+ end
+ end, InactiveIdMap);
+
+get_jobs(#st{jtx = JTx, type = Type}, InactiveIdMap, _) ->
+ couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+ maps:map(fun(JobId, _) ->
+ Job = #{job => true, type => Type, id => JobId},
+ case couch_jobs_fdb:get_job_state_and_data(JTx1, Job) of
+ {ok, Seq, State, Data} ->
+ {Seq, State, Data};
+ {error, not_found} ->
+ {null, not_found, not_found}
+ end
+ end, InactiveIdMap)
+ end).
+
+
+get_type_vs(#st{jtx = JTx, type = Type}) ->
+ couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+ couch_jobs_fdb:get_activity_vs(JTx1, Type)
+ end).
+
+
+% "Active since" is the set of jobs that have been active (running)
+% and updated at least once since the given versionstamp. These are relatively
+% cheap to find as it's just a range read in the ?ACTIVITY subspace.
+%
+get_active_since(#st{} = _St, not_found) ->
+ #{};
+
+get_active_since(#st{jtx = JTx, type = Type, subs = Subs}, VS) ->
+ AllUpdated = couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+ couch_jobs_fdb:get_active_since(JTx1, Type, VS)
+ end),
+ maps:map(fun(_JobId, Data) ->
+ {VS, running, Data}
+ end, maps:with(maps:keys(Subs), AllUpdated)).
+
+
+try_notify_subscribers(ActiveVS, #st{} = St) ->
+ try
+ notify_subscribers(ActiveVS, St)
+ catch
+ error:{timeout, _} -> try_notify_subscribers(ActiveVS, St);
+ error:{erlfdb_error, 1031} -> try_notify_subscribers(ActiveVS, St)
+ end.
+
+
+notify_subscribers(_, #st{subs = Subs} = St) when map_size(Subs) =:= 0 ->
+ St;
+
+notify_subscribers(ActiveVS, #st{} = St1) ->
+ % First gather the easy (cheap) active jobs. Then with those out of way
+ % inspect each job to get its state.
+ Active = get_active_since(St1, ActiveVS),
+ St2 = notify_job_ids(Active, St1),
+ ActiveIds = maps:keys(Active),
+ Subs = St2#st.subs,
+ InactiveIdMap = maps:without(ActiveIds, Subs),
+ InactiveRatio = maps:size(InactiveIdMap) / maps:size(Subs),
+ Inactive = get_jobs(St2, InactiveIdMap, InactiveRatio),
+ notify_job_ids(Inactive, St2).
+
+
+notify_job_ids(#{} = Jobs, #st{type = Type} = St0) ->
+ maps:fold(fun(Id, {VS, State, Data}, #st{} = StAcc) ->
+ DoUnsub = lists:member(State, [finished, not_found]),
+ maps:fold(fun
+ (_Ref, {_Pid, running, OldVS}, St) when State =:= running,
+ OldVS >= VS ->
+ St;
+ (Ref, {Pid, running, OldVS}, St) when State =:= running,
+ OldVS < VS ->
+ % For running state send updates even if state doesn't change
+ notify(Pid, Ref, Type, Id, State, Data),
+ update_sub(Id, Ref, Pid, running, VS, St);
+ (_Ref, {_Pid, OldState, _VS}, St) when OldState =:= State ->
+ St;
+ (Ref, {Pid, _State, _VS}, St) ->
+ notify(Pid, Ref, Type, Id, State, Data),
+ case DoUnsub of
+ true -> unsubscribe_int(Id, Ref, Pid, St);
+ false -> update_sub(Id, Ref, Pid, State, VS, St)
+ end
+ end, StAcc, maps:get(Id, StAcc#st.subs, #{}))
+ end, St0, Jobs).
+
+
+notify(Pid, Ref, Type, Id, State, Data) ->
+ Pid ! {?COUCH_JOBS_EVENT, Ref, Type, Id, State, Data}.
+
+
+get_holdoff() ->
+ config:get_integer("couch_jobs", "type_monitor_holdoff_msec",
+ ?TYPE_MONITOR_HOLDOFF_DEFAULT).
+
+
+get_timeout() ->
+ Default = ?TYPE_MONITOR_TIMEOUT_DEFAULT,
+ case config:get("couch_jobs", "type_monitor_timeout_msec", Default) of
+ "infinity" -> infinity;
+ Milliseconds -> list_to_integer(Milliseconds)
+ end.
diff --git a/src/couch_jobs/src/couch_jobs_notifier_sup.erl b/src/couch_jobs/src/couch_jobs_notifier_sup.erl
new file mode 100644
index 000000000..81d93493b
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_notifier_sup.erl
@@ -0,0 +1,64 @@
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_notifier_sup).
+
+
+-behaviour(supervisor).
+
+
+-export([
+ start_link/0,
+
+ start_notifier/1,
+ stop_notifier/1,
+ get_child_pids/0
+]).
+
+-export([
+ init/1
+]).
+
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+
+start_notifier(Type) ->
+ supervisor:start_child(?MODULE, [Type]).
+
+
+stop_notifier(Pid) ->
+ supervisor:terminate_child(?MODULE, Pid).
+
+
+get_child_pids() ->
+ lists:map(fun({_Id, Pid, _Type, _Mod}) ->
+ Pid
+ end, supervisor:which_children(?MODULE)).
+
+
+init(_) ->
+ Flags = #{
+ strategy => simple_one_for_one,
+ intensity => 10,
+ period => 3
+ },
+ Children = [
+ #{
+ id => couch_jobs_notifier,
+ restart => temporary,
+ start => {couch_jobs_notifier, start_link, []}
+ }
+ ],
+ {ok, {Flags, Children}}.
diff --git a/src/couch_jobs/src/couch_jobs_pending.erl b/src/couch_jobs/src/couch_jobs_pending.erl
new file mode 100644
index 000000000..ab53c59d1
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_pending.erl
@@ -0,0 +1,143 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_pending).
+
+
+-export([
+ enqueue/4,
+ dequeue/4,
+ remove/4
+]).
+
+
+-include("couch_jobs.hrl").
+
+
+-define(RANGE_LIMIT, 1024).
+
+
+enqueue(#{jtx := true} = JTx, Type, STime, JobId) ->
+ #{tx := Tx, jobs_path := Jobs} = JTx,
+ Key = erlfdb_tuple:pack({?PENDING, Type, STime, JobId}, Jobs),
+ erlfdb:set(Tx, Key, <<>>),
+ WatchKey = erlfdb_tuple:pack({?WATCHES_PENDING, Type}, Jobs),
+ erlfdb:add(Tx, WatchKey, 1),
+ ok.
+
+
+dequeue(#{jtx := true} = JTx, Type, _, true) ->
+ #{tx := Tx, jobs_path := Jobs} = JTx,
+ Prefix = erlfdb_tuple:pack({?PENDING, Type, 0}, Jobs),
+ case get_random_item(Tx, Prefix) of
+ {error, not_found} ->
+ {not_found, get_pending_watch(JTx, Type)};
+ {ok, PendingKey} ->
+ erlfdb:clear(Tx, PendingKey),
+ {JobId} = erlfdb_tuple:unpack(PendingKey, Prefix),
+ {ok, JobId}
+ end;
+
+dequeue(#{jtx := true} = JTx, Type, MaxPriority, _) ->
+ #{tx := Tx, jobs_path := Jobs} = JTx,
+ Prefix = erlfdb_tuple:pack({?PENDING, Type}, Jobs),
+ StartKeySel = erlfdb_key:first_greater_than(Prefix),
+ End = erlfdb_tuple:pack({MaxPriority, <<16#FF>>}, Prefix),
+ EndKeySel = erlfdb_key:first_greater_or_equal(End),
+ case clear_random_key_from_range(Tx, StartKeySel, EndKeySel) of
+ {error, not_found} ->
+ {not_found, get_pending_watch(JTx, Type)};
+ {ok, PendingKey} ->
+ {_, JobId} = erlfdb_tuple:unpack(PendingKey, Prefix),
+ {ok, JobId}
+ end.
+
+
+remove(#{jtx := true} = JTx, Type, JobId, STime) ->
+ #{tx := Tx, jobs_path := Jobs} = JTx,
+ Key = erlfdb_tuple:pack({?PENDING, Type, STime, JobId}, Jobs),
+ erlfdb:clear(Tx, Key).
+
+
+%% Private functions
+
+
+% Pick a random item from the range without reading the keys in first. However
+% the constraint it that IDs should looks like random UUIDs
+get_random_item(Tx, Prefix) ->
+ Id = fabric2_util:uuid(),
+ Snapshot = erlfdb:snapshot(Tx),
+ % Try to be fair and switch evently between trying ids before or after the
+ % randomly generated one. Otherwise, trying before first, will leave a lot
+ % of <<"fff...">> IDs in the queue for too long and trying "after" first
+ % will leave a lot of <"00...">> ones waiting.
+ case rand:uniform() > 0.5 of
+ true ->
+ case get_after(Snapshot, Prefix, Id) of
+ {error, not_found} -> get_before(Snapshot, Prefix, Id);
+ {ok, Key} -> {ok, Key}
+ end;
+ false ->
+ case get_before(Snapshot, Prefix, Id) of
+ {error, not_found} -> get_after(Snapshot, Prefix, Id);
+ {ok, Key} -> {ok, Key}
+ end
+ end.
+
+
+get_before(Snapshot, Prefix, Id) ->
+ KSel = erlfdb_key:last_less_or_equal(erlfdb_tuple:pack({Id}, Prefix)),
+ PrefixSize = byte_size(Prefix),
+ case erlfdb:wait(erlfdb:get_key(Snapshot, KSel)) of
+ <<Prefix:PrefixSize/binary, _/binary>> = Key -> {ok, Key};
+ _ -> {error, not_found}
+ end.
+
+
+get_after(Snapshot, Prefix, Id) ->
+ KSel = erlfdb_key:first_greater_or_equal(erlfdb_tuple:pack({Id}, Prefix)),
+ PrefixSize = byte_size(Prefix),
+ case erlfdb:wait(erlfdb:get_key(Snapshot, KSel)) of
+ <<Prefix:PrefixSize/binary, _/binary>> = Key -> {ok, Key};
+ _ -> {error, not_found}
+ end.
+
+
+% Pick a random key from the range snapshot. Then radomly pick a key to clear.
+% Before clearing, ensure there is a read conflict on the key in in case other
+% workers have picked the same key.
+%
+clear_random_key_from_range(Tx, Start, End) ->
+ Opts = [
+ {limit, ?RANGE_LIMIT},
+ {snapshot, true}
+ ],
+ case erlfdb:wait(erlfdb:get_range(Tx, Start, End, Opts)) of
+ [] ->
+ {error, not_found};
+ [{Key, _}] ->
+ erlfdb:add_read_conflict_key(Tx, Key),
+ erlfdb:clear(Tx, Key),
+ {ok, Key};
+ [{_, _} | _] = KVs ->
+ Index = rand:uniform(length(KVs)),
+ {Key, _} = lists:nth(Index, KVs),
+ erlfdb:add_read_conflict_key(Tx, Key),
+ erlfdb:clear(Tx, Key),
+ {ok, Key}
+ end.
+
+
+get_pending_watch(#{jtx := true} = JTx, Type) ->
+ #{tx := Tx, jobs_path := Jobs} = couch_jobs_fdb:get_jtx(JTx),
+ Key = erlfdb_tuple:pack({?WATCHES_PENDING, Type}, Jobs),
+ erlfdb:watch(Tx, Key).
diff --git a/src/couch_jobs/src/couch_jobs_server.erl b/src/couch_jobs/src/couch_jobs_server.erl
new file mode 100644
index 000000000..2e03c7dcf
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_server.erl
@@ -0,0 +1,193 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_server).
+
+-behaviour(gen_server).
+
+
+-export([
+ start_link/0,
+ get_notifier_server/1,
+ force_check_types/0
+]).
+
+-export([
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3
+]).
+
+
+-define(TYPE_CHECK_PERIOD_DEFAULT, 15000).
+-define(MAX_JITTER_DEFAULT, 5000).
+
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, nil, []).
+
+
+get_notifier_server(Type) ->
+ case get_type_pid_refs(Type) of
+ {{_, _}, {NotifierPid, _}} ->
+ {ok, NotifierPid};
+ not_found ->
+ force_check_types(),
+ case get_type_pid_refs(Type) of
+ {{_, _}, {NotifierPid, _}} ->
+ {ok, NotifierPid};
+ not_found ->
+ {error, not_found}
+ end
+ end.
+
+
+force_check_types() ->
+ gen_server:call(?MODULE, check_types, infinity).
+
+
+init(_) ->
+ % If couch_jobs_server is after the notifiers and activity supervisor. If
+ % it restart, there could be some stale notifier or activity monitors. Kill
+ % those as later on we'd start new ones anyway.
+ reset_monitors(),
+ reset_notifiers(),
+ ets:new(?MODULE, [protected, named_table]),
+ check_types(),
+ schedule_check(),
+ {ok, nil}.
+
+
+terminate(_, _St) ->
+ ok.
+
+
+handle_call(check_types, _From, St) ->
+ check_types(),
+ {reply, ok, St};
+
+handle_call(Msg, _From, St) ->
+ {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(Msg, St) ->
+ {stop, {bad_cast, Msg}, St}.
+
+
+handle_info(check_types, St) ->
+ check_types(),
+ schedule_check(),
+ {noreply, St};
+
+handle_info({'DOWN', _Ref, process, Pid, Reason}, St) ->
+ LogMsg = "~p : process ~p exited with ~p",
+ couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
+ {stop, {unexpected_process_exit, Pid, Reason}, St};
+
+handle_info({Ref, ready}, St) when is_reference(Ref) ->
+ % Don't crash out couch_jobs_server and the whole application would need to
+ % eventually do proper cleanup in erlfdb:wait timeout code.
+ LogMsg = "~p : spurious erlfdb future ready message ~p",
+ couch_log:error(LogMsg, [?MODULE, Ref]),
+ {noreply, St};
+
+handle_info(Msg, St) ->
+ {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+ {ok, St}.
+
+
+check_types() ->
+ FdbTypes = fdb_types(),
+ EtsTypes = ets_types(),
+ ToStart = FdbTypes -- EtsTypes,
+ ToStop = EtsTypes -- FdbTypes,
+ lists:foreach(fun(Type) -> start_monitors(Type) end, ToStart),
+ lists:foreach(fun(Type) -> stop_monitors(Type) end, ToStop).
+
+
+start_monitors(Type) ->
+ MonPidRef = case couch_jobs_activity_monitor_sup:start_monitor(Type) of
+ {ok, Pid1} -> {Pid1, monitor(process, Pid1)};
+ {error, Error1} -> error({failed_to_start_monitor, Type, Error1})
+ end,
+ NotifierPidRef = case couch_jobs_notifier_sup:start_notifier(Type) of
+ {ok, Pid2} -> {Pid2, monitor(process, Pid2)};
+ {error, Error2} -> error({failed_to_start_notifier, Type, Error2})
+ end,
+ ets:insert_new(?MODULE, {Type, MonPidRef, NotifierPidRef}).
+
+
+stop_monitors(Type) ->
+ {{MonPid, MonRef}, {NotifierPid, NotifierRef}} = get_type_pid_refs(Type),
+ ok = couch_jobs_activity_monitor_sup:stop_monitor(MonPid),
+ demonitor(MonRef, [flush]),
+ ok = couch_jobs_notifier_sup:stop_notifier(NotifierPid),
+ demonitor(NotifierRef, [flush]),
+ ets:delete(?MODULE, Type).
+
+
+reset_monitors() ->
+ lists:foreach(fun(Pid) ->
+ couch_jobs_activity_monitor_sup:stop_monitor(Pid)
+ end, couch_jobs_activity_monitor_sup:get_child_pids()).
+
+
+reset_notifiers() ->
+ lists:foreach(fun(Pid) ->
+ couch_jobs_notifier_sup:stop_notifier(Pid)
+ end, couch_jobs_notifier_sup:get_child_pids()).
+
+
+get_type_pid_refs(Type) ->
+ case ets:lookup(?MODULE, Type) of
+ [{_, MonPidRef, NotifierPidRef}] -> {MonPidRef, NotifierPidRef};
+ [] -> not_found
+ end.
+
+
+ets_types() ->
+ lists:flatten(ets:match(?MODULE, {'$1', '_', '_'})).
+
+
+fdb_types() ->
+ try
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
+ couch_jobs_fdb:get_types(JTx)
+ end)
+ catch
+ error:{timeout, _} ->
+ couch_log:warning("~p : Timed out connecting to FDB", [?MODULE]),
+ []
+ end.
+
+
+schedule_check() ->
+ Timeout = get_period_msec(),
+ MaxJitter = max(Timeout div 2, get_max_jitter_msec()),
+ Wait = Timeout + rand:uniform(max(1, MaxJitter)),
+ erlang:send_after(Wait, self(), check_types).
+
+
+get_period_msec() ->
+ config:get_integer("couch_jobs", "type_check_period_msec",
+ ?TYPE_CHECK_PERIOD_DEFAULT).
+
+
+get_max_jitter_msec() ->
+ config:get_integer("couch_jobs", "type_check_max_jitter_msec",
+ ?MAX_JITTER_DEFAULT).
diff --git a/src/couch_jobs/src/couch_jobs_sup.erl b/src/couch_jobs/src/couch_jobs_sup.erl
new file mode 100644
index 000000000..d79023777
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_sup.erl
@@ -0,0 +1,66 @@
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_sup).
+
+
+-behaviour(supervisor).
+
+
+-export([
+ start_link/0
+]).
+
+-export([
+ init/1
+]).
+
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+
+init([]) ->
+ Flags = #{
+ strategy => rest_for_one,
+ intensity => 3,
+ period => 10
+ },
+ Children = [
+ #{
+ id => couch_jobs_fdb,
+ restart => transient,
+ start => {couch_jobs_fdb, init_cache, []}
+ },
+ #{
+ id => couch_jobs_activity_monitor_sup,
+ restart => permanent,
+ shutdown => brutal_kill,
+ type => supervisor,
+ start => {couch_jobs_activity_monitor_sup, start_link, []}
+ },
+ #{
+ id => couch_jobs_notifier_sup,
+ restart => permanent,
+ shutdown => brutal_kill,
+ type => supervisor,
+ start => {couch_jobs_notifier_sup, start_link, []}
+ },
+ #{
+ id => couch_jobs_server,
+ restart => permanent,
+ shutdown => brutal_kill,
+ start => {couch_jobs_server, start_link, []}
+ }
+ ],
+ {ok, {Flags, Children}}.
diff --git a/src/couch_jobs/src/couch_jobs_type_monitor.erl b/src/couch_jobs/src/couch_jobs_type_monitor.erl
new file mode 100644
index 000000000..04ad60acc
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_type_monitor.erl
@@ -0,0 +1,84 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_type_monitor).
+
+
+-export([
+ start/4
+]).
+
+
+-include("couch_jobs.hrl").
+
+
+-record(st, {
+ jtx,
+ type,
+ vs,
+ parent,
+ timestamp,
+ holdoff,
+ timeout
+}).
+
+
+start(Type, VS, HoldOff, Timeout) ->
+ Parent = self(),
+ spawn_link(fun() ->
+ loop(#st{
+ jtx = couch_jobs_fdb:get_jtx(),
+ type = Type,
+ vs = VS,
+ parent = Parent,
+ timestamp = 0,
+ holdoff = HoldOff,
+ timeout = Timeout
+ })
+ end).
+
+
+loop(#st{vs = VS, timeout = Timeout} = St) ->
+ {St1, Watch} = case get_vs_and_watch(St) of
+ {VS1, W} when VS1 =/= VS -> {notify(St#st{vs = VS1}), W};
+ {VS, W} -> {St, W}
+ end,
+ try
+ erlfdb:wait(Watch, [{timeout, Timeout}])
+ catch
+ error:{erlfdb_error, ?FUTURE_VERSION} ->
+ erlfdb:cancel(Watch, [flush]),
+ ok;
+ error:{timeout, _} ->
+ erlfdb:cancel(Watch, [flush]),
+ ok
+ end,
+ loop(St1).
+
+
+notify(#st{} = St) ->
+ #st{holdoff = HoldOff, parent = Pid, timestamp = Ts, vs = VS} = St,
+ Now = erlang:system_time(millisecond),
+ case Now - Ts of
+ Dt when Dt < HoldOff ->
+ timer:sleep(max(HoldOff - Dt, 0));
+ _ ->
+ ok
+ end,
+ Pid ! {type_updated, VS},
+ St#st{timestamp = Now}.
+
+
+get_vs_and_watch(#st{jtx = JTx, type = Type}) ->
+ couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+ couch_jobs_fdb:get_activity_vs_and_watch(JTx1, Type)
+ end).
diff --git a/src/couch_jobs/test/couch_jobs_tests.erl b/src/couch_jobs/test/couch_jobs_tests.erl
new file mode 100644
index 000000000..11572a4b9
--- /dev/null
+++ b/src/couch_jobs/test/couch_jobs_tests.erl
@@ -0,0 +1,762 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+% Job creation API can take an undefined Tx object
+% in that case it will start its own transaction
+-define(TX, undefined).
+
+
+couch_jobs_basic_test_() ->
+ {
+ "Test couch jobs basics",
+ {
+ setup,
+ fun setup_couch/0, fun teardown_couch/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun add_remove_pending/1,
+ fun add_remove_errors/1,
+ fun add_with_the_same_scheduled_time/1,
+ fun get_job_data_and_state/1,
+ fun resubmit_as_job_creator/1,
+ fun type_timeouts_and_server/1,
+ fun dead_notifier_restarts_jobs_server/1,
+ fun bad_messages_restart_couch_jobs_server/1,
+ fun bad_messages_restart_notifier/1,
+ fun bad_messages_restart_activity_monitor/1,
+ fun basic_accept_and_finish/1,
+ fun accept_blocking/1,
+ fun job_processor_update/1,
+ fun resubmit_enqueues_job/1,
+ fun resubmit_finished_updates_job_data/1,
+ fun resubmit_running_does_not_update_job_data/1,
+ fun resubmit_custom_schedtime/1,
+ fun add_pending_updates_job_data/1,
+ fun add_finished_updates_job_data/1,
+ fun add_running_does_not_update_job_data/1,
+ fun accept_max_schedtime/1,
+ fun accept_no_schedule/1,
+ fun subscribe/1,
+ fun remove_when_subscribed_and_pending/1,
+ fun remove_when_subscribed_and_running/1,
+ fun subscribe_wait_multiple/1,
+ fun enqueue_inactive/1,
+ fun remove_running_job/1,
+ fun check_get_jobs/1,
+ fun use_fabric_transaction_object/1,
+ fun metadata_version_bump/1
+ ]
+ }
+ }
+ }.
+
+
+setup_couch() ->
+ test_util:start_couch([fabric]).
+
+
+teardown_couch(Ctx) ->
+ test_util:stop_couch(Ctx),
+ meck:unload().
+
+
+setup() ->
+ application:start(couch_jobs),
+ clear_jobs(),
+ T1 = {<<"t1">>, 1024}, % a complex type should work
+ T2 = 42, % a number should work as well
+ T1Timeout = 2,
+ T2Timeout = 3,
+ couch_jobs:set_type_timeout(T1, T1Timeout),
+ couch_jobs:set_type_timeout(T2, T2Timeout),
+ #{
+ t1 => T1,
+ t2 => T2,
+ t1_timeout => T1Timeout,
+ j1 => <<"j1">>,
+ j2 => <<"j2">>,
+ dbname => ?tempdb()
+ }.
+
+
+teardown(#{dbname := DbName}) ->
+ clear_jobs(),
+ application:stop(couch_jobs),
+ AllDbs = fabric2_db:list_dbs(),
+ case lists:member(DbName, AllDbs) of
+ true -> ok = fabric2_db:delete(DbName, []);
+ false -> ok
+ end,
+ meck:unload().
+
+
+clear_jobs() ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
+ #{jobs_path := Jobs, tx := Tx} = JTx,
+ erlfdb:clear_range_startswith(Tx, Jobs)
+ end).
+
+
+restart_app() ->
+ application:stop(couch_jobs),
+ application:start(couch_jobs),
+ couch_jobs_server:force_check_types().
+
+
+get_job(Type, JobId) ->
+ couch_jobs_fdb:get_job(Type, JobId).
+
+
+add_remove_pending(#{t1 := T1, j1 := J1, t2 := T2, j2 := J2}) ->
+ ?_test(begin
+ ?assertEqual(ok, couch_jobs:add(?TX, T1, J1, #{})),
+ ?assertMatch(#{state := pending, data := #{}}, get_job(T1, J1)),
+ ?assertEqual(ok, couch_jobs:remove(?TX, T1, J1)),
+ % Data and numeric type should work as well. Also do it in a
+ % transaction
+ Data = #{<<"x">> => 42},
+ ?assertEqual(ok, fabric2_fdb:transactional(fun(Tx) ->
+ couch_jobs:add(Tx, T2, J2, Data)
+ end)),
+ ?assertMatch(#{state := pending, data := Data}, get_job(T2, J2)),
+ ?assertEqual(ok, couch_jobs:remove(?TX, T2, J2))
+ end).
+
+
+get_job_data_and_state(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ Data = #{<<"x">> => 42},
+ ok = couch_jobs:add(?TX, T, J, Data),
+ ?assertEqual({ok, Data}, couch_jobs:get_job_data(?TX, T, J)),
+ ?assertEqual({ok, pending}, couch_jobs:get_job_state(?TX, T, J)),
+ ?assertEqual(ok, couch_jobs:remove(?TX, T, J)),
+ ?assertEqual({error, not_found}, couch_jobs:get_job_data(?TX, T, J)),
+ ?assertEqual({error, not_found}, couch_jobs:get_job_state(?TX, T, J))
+ end).
+
+
+add_remove_errors(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ ?assertEqual({error, not_found}, couch_jobs:remove(?TX, 999, <<"x">>)),
+ ?assertMatch({error, {json_encoding_error, _}}, couch_jobs:add(?TX, T,
+ J, #{1 => 2})),
+ ?assertEqual({error, no_type_timeout}, couch_jobs:add(?TX, <<"x">>, J,
+ #{})),
+ ?assertEqual(ok, couch_jobs:add(?TX, T, J, #{})),
+ ?assertEqual(ok, couch_jobs:add(?TX, T, J, #{})),
+ ?assertEqual(ok, couch_jobs:remove(?TX, T, J))
+ end).
+
+
+add_with_the_same_scheduled_time(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ ?assertEqual(ok, couch_jobs:add(?TX, T, J, #{})),
+ fabric2_fdb:transactional(fun(Tx) ->
+ ?assertEqual(ok, couch_jobs:add(Tx, T, J, #{})),
+ ?assert(erlfdb:is_read_only(Tx))
+ end)
+ end).
+
+
+resubmit_as_job_creator(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ Data = #{<<"x">> => 42},
+ ok = couch_jobs:add(?TX, T, J, Data, 15),
+
+ % Job was pending, doesn't get resubmitted
+ ok = couch_jobs:add(?TX, T, J, Data, 16),
+ ?assertMatch(#{state := pending, stime := 16}, get_job(T, J)),
+
+ {ok, Job1, Data} = couch_jobs:accept(T),
+
+ % If is running, it gets flagged to be resubmitted
+ ok = couch_jobs:add(?TX, T, J, Data, 17),
+ ?assertMatch(#{state := running, stime := 17}, get_job(T, J)),
+ ?assertEqual(true, couch_jobs:is_resubmitted(get_job(T, J))),
+
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
+ % It should be pending according to the resubmit flag
+ ?assertMatch(#{state := pending, stime := 17}, get_job(T, J)),
+
+ % A finished job will be re-enqueued
+ {ok, Job2, _} = couch_jobs:accept(T),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job2)),
+ ?assertMatch(#{state := finished, stime := 17}, get_job(T, J)),
+ ok = couch_jobs:add(?TX, T, J, Data, 18),
+ ?assertMatch(#{state := pending, stime := 18}, get_job(T, J))
+ end).
+
+
+type_timeouts_and_server(#{t1 := T, t1_timeout := T1Timeout}) ->
+ {timeout, 15, ?_test(begin
+
+ WaitForActivityMonitors = fun(N) ->
+ test_util:wait(fun() ->
+ Pids = couch_jobs_activity_monitor_sup:get_child_pids(),
+ case length(Pids) == N of
+ true -> ok;
+ false -> wait
+ end
+ end)
+ end,
+
+ WaitForNotifiers = fun(N) ->
+ test_util:wait(fun() ->
+ Pids = couch_jobs_notifier_sup:get_child_pids(),
+ case length(Pids) == N of
+ true -> ok;
+ false -> wait
+ end
+ end)
+ end,
+
+ couch_jobs_server:force_check_types(),
+
+ ?assertEqual(T1Timeout, couch_jobs:get_type_timeout(T)),
+
+ WaitForActivityMonitors(2),
+ ?assertEqual(2,
+ length(couch_jobs_activity_monitor_sup:get_child_pids())),
+
+ WaitForNotifiers(2),
+ ?assertEqual(2, length(couch_jobs_notifier_sup:get_child_pids())),
+
+ ?assertMatch({ok, _}, couch_jobs_server:get_notifier_server(T)),
+
+ ?assertEqual(ok, couch_jobs:set_type_timeout(<<"t3">>, 8)),
+ couch_jobs_server:force_check_types(),
+
+ WaitForActivityMonitors(3),
+ ?assertEqual(3,
+ length(couch_jobs_activity_monitor_sup:get_child_pids())),
+
+ WaitForNotifiers(3),
+ ?assertEqual(3, length(couch_jobs_notifier_sup:get_child_pids())),
+
+ ?assertEqual(ok, couch_jobs:clear_type_timeout(<<"t3">>)),
+ couch_jobs_server:force_check_types(),
+
+ WaitForActivityMonitors(2),
+ ?assertEqual(2,
+ length(couch_jobs_activity_monitor_sup:get_child_pids())),
+
+ WaitForNotifiers(2),
+ ?assertEqual(2,
+ length(couch_jobs_notifier_sup:get_child_pids())),
+
+ ?assertMatch({error, _},
+ couch_jobs_server:get_notifier_server(<<"t3">>)),
+
+ ?assertEqual(not_found, couch_jobs:get_type_timeout(<<"t3">>))
+ end)}.
+
+
+dead_notifier_restarts_jobs_server(#{}) ->
+ ?_test(begin
+ couch_jobs_server:force_check_types(),
+
+ ServerPid = whereis(couch_jobs_server),
+ Ref = monitor(process, ServerPid),
+
+ [Notifier1, _Notifier2] = couch_jobs_notifier_sup:get_child_pids(),
+ exit(Notifier1, kill),
+
+ % Killing a notifier should kill the server as well
+ receive {'DOWN', Ref, _, _, _} -> ok end
+ end).
+
+
+bad_messages_restart_couch_jobs_server(#{}) ->
+ ?_test(begin
+ % couch_jobs_server dies on bad cast
+ ServerPid1 = whereis(couch_jobs_server),
+ Ref1 = monitor(process, ServerPid1),
+ gen_server:cast(ServerPid1, bad_cast),
+ receive {'DOWN', Ref1, _, _, _} -> ok end,
+
+ restart_app(),
+
+ % couch_jobs_server dies on bad call
+ ServerPid2 = whereis(couch_jobs_server),
+ Ref2 = monitor(process, ServerPid2),
+ catch gen_server:call(ServerPid2, bad_call),
+ receive {'DOWN', Ref2, _, _, _} -> ok end,
+
+ restart_app(),
+
+ % couch_jobs_server dies on bad info
+ ServerPid3 = whereis(couch_jobs_server),
+ Ref3 = monitor(process, ServerPid3),
+ ServerPid3 ! a_random_message,
+ receive {'DOWN', Ref3, _, _, _} -> ok end,
+
+ restart_app()
+ end).
+
+
+bad_messages_restart_notifier(#{}) ->
+ ?_test(begin
+ couch_jobs_server:force_check_types(),
+
+ % bad cast kills the activity monitor
+ [AMon1, _] = couch_jobs_notifier_sup:get_child_pids(),
+ Ref1 = monitor(process, AMon1),
+ gen_server:cast(AMon1, bad_cast),
+ receive {'DOWN', Ref1, _, _, _} -> ok end,
+
+ restart_app(),
+
+ % bad calls restart activity monitor
+ [AMon2, _] = couch_jobs_notifier_sup:get_child_pids(),
+ Ref2 = monitor(process, AMon2),
+ catch gen_server:call(AMon2, bad_call),
+ receive {'DOWN', Ref2, _, _, _} -> ok end,
+
+ restart_app(),
+
+ % bad info message kills activity monitor
+ [AMon3, _] = couch_jobs_notifier_sup:get_child_pids(),
+ Ref3 = monitor(process, AMon3),
+ AMon3 ! a_bad_message,
+ receive {'DOWN', Ref3, _, _, _} -> ok end,
+
+
+ restart_app()
+ end).
+
+
+bad_messages_restart_activity_monitor(#{}) ->
+ ?_test(begin
+ couch_jobs_server:force_check_types(),
+
+ % bad cast kills the activity monitor
+ [AMon1, _] = couch_jobs_activity_monitor_sup:get_child_pids(),
+ Ref1 = monitor(process, AMon1),
+ gen_server:cast(AMon1, bad_cast),
+ receive {'DOWN', Ref1, _, _, _} -> ok end,
+
+ restart_app(),
+
+ % bad calls restart activity monitor
+ [AMon2, _] = couch_jobs_activity_monitor_sup:get_child_pids(),
+ Ref2 = monitor(process, AMon2),
+ catch gen_server:call(AMon2, bad_call),
+ receive {'DOWN', Ref2, _, _, _} -> ok end,
+
+ restart_app(),
+
+ % bad info message kills activity monitor
+ [AMon3, _] = couch_jobs_activity_monitor_sup:get_child_pids(),
+ Ref3 = monitor(process, AMon3),
+ AMon3 ! a_bad_message,
+ receive {'DOWN', Ref3, _, _, _} -> ok end,
+
+ restart_app()
+ end).
+
+
+basic_accept_and_finish(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ ok = couch_jobs:add(?TX, T, J, #{}),
+ {ok, Job, #{}} = couch_jobs:accept(T),
+ ?assertMatch(#{state := running}, get_job(T, J)),
+ % check json validation for bad data in finish
+ ?assertMatch({error, {json_encoding_error, _}},
+ fabric2_fdb:transactional(fun(Tx) ->
+ couch_jobs:finish(Tx, Job, #{1 => 1})
+ end)),
+ Data = #{<<"x">> => 42},
+ ?assertEqual(ok, fabric2_fdb:transactional(fun(Tx) ->
+ couch_jobs:finish(Tx, Job, Data)
+ end)),
+ ?assertMatch(#{state := finished, data := Data}, get_job(T, J))
+ end).
+
+
+accept_blocking(#{t1 := T, j1 := J1, j2 := J2}) ->
+ ?_test(begin
+ Accept = fun() -> exit(couch_jobs:accept(T)) end,
+ WaitAccept = fun(Ref) ->
+ receive
+ {'DOWN', Ref, _, _, Res} -> Res
+ after
+ 500 -> timeout
+ end
+ end,
+ {_, Ref1} = spawn_monitor(Accept),
+ ok = couch_jobs:add(?TX, T, J1, #{}),
+ ?assertMatch({ok, #{id := J1}, #{}}, WaitAccept(Ref1)),
+ {_, Ref2} = spawn_monitor(Accept),
+ ?assertEqual(timeout, WaitAccept(Ref2)),
+ ok = couch_jobs:add(?TX, T, J2, #{}),
+ ?assertMatch({ok, #{id := J2}, #{}}, WaitAccept(Ref2))
+ end).
+
+
+job_processor_update(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ ok = couch_jobs:add(?TX, T, J, #{}),
+ {ok, Job, #{}} = couch_jobs:accept(T),
+
+ % Use proper transactions in a few places here instead of passing in
+ % ?TX This is mostly to increase code coverage
+
+ ?assertMatch({ok, #{job := true}}, fabric2_fdb:transactional(fun(Tx) ->
+ couch_jobs:update(Tx, Job, #{<<"x">> => 1})
+ end)),
+
+ ?assertMatch(#{data := #{<<"x">> := 1}, state := running},
+ get_job(T, J)),
+
+ ?assertMatch({ok, #{job := true}}, fabric2_fdb:transactional(fun(Tx) ->
+ couch_jobs:update(Tx, Job)
+ end)),
+
+ ?assertMatch(#{data := #{<<"x">> := 1}, state := running},
+ get_job(T, J)),
+
+ ?assertMatch({ok, #{job := true}}, fabric2_fdb:transactional(fun(Tx) ->
+ couch_jobs:update(Tx, Job, #{<<"x">> => 2})
+ end)),
+
+ % check json validation for bad data in update
+ ?assertMatch({error, {json_encoding_error, _}},
+ fabric2_fdb:transactional(fun(Tx) ->
+ couch_jobs:update(Tx, Job, #{1 => 1})
+ end)),
+
+ ?assertMatch(#{data := #{<<"x">> := 2}, state := running},
+ get_job(T, J)),
+
+ % Finish may update the data as well
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job, #{<<"x">> => 3})),
+ ?assertMatch(#{data := #{<<"x">> := 3}, state := finished},
+ get_job(T, J))
+ end).
+
+
+resubmit_enqueues_job(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ ok = couch_jobs:add(?TX, T, J, #{}),
+ {ok, Job1, #{}} = couch_jobs:accept(T),
+ ?assertMatch({ok, _}, couch_jobs:resubmit(?TX, Job1, 6)),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
+ ?assertMatch(#{state := pending, stime := 6}, get_job(T, J)),
+ {ok, Job2, #{}} = couch_jobs:accept(T),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job2)),
+ ?assertMatch(#{state := finished}, get_job(T, J))
+ end).
+
+
+resubmit_finished_updates_job_data(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ Data1 = #{<<"test">> => 1},
+ Data2 = #{<<"test">> => 2},
+ ok = couch_jobs:add(?TX, T, J, Data1),
+ {ok, Job1, #{}} = couch_jobs:accept(T),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
+ ?assertMatch({ok, _}, couch_jobs:resubmit(?TX, Job1, 6, Data2)),
+ ?assertMatch({ok, _, Data2}, couch_jobs:accept(T))
+ end).
+
+
+resubmit_running_does_not_update_job_data(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ Data1 = #{<<"test">> => 1},
+ Data2 = #{<<"test">> => 2},
+ ok = couch_jobs:add(?TX, T, J, Data1),
+ {ok, Job1, #{}} = couch_jobs:accept(T),
+ ?assertMatch({ok, _}, couch_jobs:resubmit(?TX, Job1, 6, Data2)),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
+ ?assertMatch({ok, _, Data1}, couch_jobs:accept(T))
+ end).
+
+
+resubmit_custom_schedtime(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ ?assertEqual(ok, couch_jobs:add(?TX, T, J, #{}, 7)),
+ {ok, Job, #{}} = couch_jobs:accept(T),
+ ?assertMatch({ok, _}, couch_jobs:resubmit(?TX, Job, 9)),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job)),
+ ?assertMatch(#{stime := 9, state := pending}, get_job(T, J))
+ end).
+
+
+add_pending_updates_job_data(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ Data1 = #{<<"test">> => 1},
+ Data2 = #{<<"test">> => 2},
+ ok = couch_jobs:add(?TX, T, J, Data1),
+ ?assertEqual(ok, couch_jobs:add(?TX, T, J, Data2, 6)),
+ ?assertMatch({ok, _, Data2}, couch_jobs:accept(T))
+ end).
+
+
+add_finished_updates_job_data(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ Data1 = #{<<"test">> => 1},
+ Data2 = #{<<"test">> => 2},
+ ok = couch_jobs:add(?TX, T, J, Data1),
+ {ok, Job1, #{}} = couch_jobs:accept(T),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
+ ?assertEqual(ok, couch_jobs:add(?TX, T, J, Data2, 6)),
+ ?assertMatch({ok, _, Data2}, couch_jobs:accept(T))
+ end).
+
+
+add_running_does_not_update_job_data(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ Data1 = #{<<"test">> => 1},
+ Data2 = #{<<"test">> => 2},
+ ok = couch_jobs:add(?TX, T, J, Data1),
+ {ok, Job1, #{}} = couch_jobs:accept(T),
+ ?assertEqual(ok, couch_jobs:add(?TX, T, J, Data2, 6)),
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
+ ?assertMatch({ok, _, Data1}, couch_jobs:accept(T))
+ end).
+
+
+accept_max_schedtime(#{t1 := T, j1 := J1, j2 := J2}) ->
+ ?_test(begin
+ ok = couch_jobs:add(?TX, T, J1, #{}, 5000),
+ ok = couch_jobs:add(?TX, T, J2, #{}, 3000),
+ ?assertEqual({error, not_found}, couch_jobs:accept(T,
+ #{max_sched_time => 1000})),
+ ?assertMatch({ok, #{id := J2}, _}, couch_jobs:accept(T,
+ #{max_sched_time => 3000})),
+ ?assertMatch({ok, #{id := J1}, _}, couch_jobs:accept(T,
+ #{max_sched_time => 9000}))
+ end).
+
+
+accept_no_schedule(#{t1 := T}) ->
+ ?_test(begin
+ JobCount = 25,
+ Jobs = [fabric2_util:uuid() || _ <- lists:seq(1, JobCount)],
+ [couch_jobs:add(?TX, T, J, #{}) || J <- Jobs],
+ InvalidOpts = #{no_schedule => true, max_sched_time => 1},
+ ?assertMatch({error, _}, couch_jobs:accept(T, InvalidOpts)),
+ AcceptOpts = #{no_schedule => true},
+ Accepted = [begin
+ {ok, #{id := J}, _} = couch_jobs:accept(T, AcceptOpts),
+ J
+ end || _ <- lists:seq(1, JobCount)],
+ ?assertEqual(lists:sort(Jobs), lists:sort(Accepted))
+ end).
+
+
+subscribe(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ ok = couch_jobs:add(?TX, T, J, #{<<"z">> => 1}),
+
+ ?assertEqual({error, not_found}, couch_jobs:subscribe(<<"xyz">>, J)),
+ ?assertEqual({error, not_found}, couch_jobs:subscribe(T, <<"j5">>)),
+
+ SubRes0 = couch_jobs:subscribe(T, J),
+ ?assertMatch({ok, {_, _}, pending, #{<<"z">> := 1}}, SubRes0),
+ {ok, SubId0, pending, _} = SubRes0,
+
+ SubRes1 = couch_jobs:subscribe(T, J),
+ ?assertEqual(SubRes0, SubRes1),
+
+ ?assertEqual(ok, couch_jobs:unsubscribe(SubId0)),
+
+ SubRes = couch_jobs:subscribe(T, J),
+ ?assertMatch({ok, {_, _}, pending, #{<<"z">> := 1}}, SubRes),
+ {ok, SubId, pending, _} = SubRes,
+
+ {ok, Job, _} = couch_jobs:accept(T),
+ ?assertMatch({T, J, running, #{<<"z">> := 1}},
+ couch_jobs:wait(SubId, 5000)),
+
+ % Make sure we get intermediate `running` updates
+ ?assertMatch({ok, _}, couch_jobs:update(?TX, Job, #{<<"z">> => 2})),
+ ?assertMatch({T, J, running, #{<<"z">> := 2}},
+ couch_jobs:wait(SubId, 5000)),
+
+ ?assertEqual(ok, couch_jobs:finish(?TX, Job, #{<<"z">> => 3})),
+ ?assertMatch({T, J, finished, #{<<"z">> := 3}},
+ couch_jobs:wait(SubId, finished, 5000)),
+
+ ?assertEqual(timeout, couch_jobs:wait(SubId, 50)),
+
+ ?assertEqual({ok, finished, #{<<"z">> => 3}},
+ couch_jobs:subscribe(T, J)),
+
+ ?assertEqual(ok, couch_jobs:remove(?TX, T, J)),
+ ?assertEqual({error, not_found}, couch_jobs:subscribe(T, J))
+ end).
+
+
+remove_when_subscribed_and_pending(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ ok = couch_jobs:add(?TX, T, J, #{<<"x">> => 1}),
+ {ok, SId, pending, _} = couch_jobs:subscribe(T, J),
+
+ couch_jobs:remove(?TX, T, J),
+
+ ?assertMatch({T, J, not_found, not_found}, couch_jobs:wait(SId, 5000)),
+ ?assertEqual(timeout, couch_jobs:wait(SId, 50))
+ end).
+
+
+remove_when_subscribed_and_running(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ ok = couch_jobs:add(?TX, T, J, #{<<"z">> => 2}),
+ {ok, SId, pending, _} = couch_jobs:subscribe(T, J),
+ {ok, #{}, _} = couch_jobs:accept(T),
+ ?assertMatch({_, _, running, _}, couch_jobs:wait(SId, 5000)),
+
+ couch_jobs:remove(?TX, T, J),
+
+ ?assertMatch({T, J, not_found, not_found}, couch_jobs:wait(SId, 5000)),
+ ?assertEqual(timeout, couch_jobs:wait(SId, 50))
+ end).
+
+
+subscribe_wait_multiple(#{t1 := T, j1 := J1, j2 := J2}) ->
+ ?_test(begin
+ ok = couch_jobs:add(?TX, T, J1, #{}),
+ ok = couch_jobs:add(?TX, T, J2, #{}),
+
+ {ok, S1, pending, #{}} = couch_jobs:subscribe(T, J1),
+ {ok, S2, pending, #{}} = couch_jobs:subscribe(T, J2),
+
+ Subs = [S1, S2],
+
+ % Accept one job. Only one running update is expected. PJob1 and PJob2
+ % do not necessarily correspond got Job1 and Job2, they could be
+ % accepted as Job2 and Job1 respectively.
+ {ok, PJob1, _} = couch_jobs:accept(T),
+ ?assertMatch({_, _, running, _}, couch_jobs:wait(Subs, 5000)),
+ ?assertMatch(timeout, couch_jobs:wait(Subs, 50)),
+
+ % Accept another job. Expect another update.
+ {ok, PJob2, _} = couch_jobs:accept(T),
+ ?assertMatch({_, _, running, _}, couch_jobs:wait(Subs, 5000)),
+ ?assertMatch(timeout, couch_jobs:wait(Subs, 50)),
+
+ ?assertMatch({ok, _}, couch_jobs:update(?TX, PJob1, #{<<"q">> => 5})),
+ ?assertMatch({ok, _}, couch_jobs:update(?TX, PJob2, #{<<"r">> => 6})),
+
+ % Each job was updated once, expect two running updates.
+ ?assertMatch({_, _, running, _}, couch_jobs:wait(Subs, 5000)),
+ ?assertMatch({_, _, running, _}, couch_jobs:wait(Subs, 5000)),
+
+ % Finish one job. Expect one finished update only.
+ ?assertEqual(ok, couch_jobs:finish(?TX, PJob1)),
+
+ ?assertMatch({_, _, finished, #{<<"q">> := 5}},
+ couch_jobs:wait(Subs, finished, 5000)),
+ ?assertMatch(timeout, couch_jobs:wait(Subs, finished, 50)),
+
+ % Finish another job. However, unsubscribe should flush the
+ % the message and we should not get it.
+ ?assertEqual(ok, couch_jobs:finish(?TX, PJob2)),
+ ?assertEqual(ok, couch_jobs:unsubscribe(S1)),
+ ?assertEqual(ok, couch_jobs:unsubscribe(S2)),
+ ?assertMatch(timeout, couch_jobs:wait(Subs, finished, 50))
+ end).
+
+
+enqueue_inactive(#{t1 := T, j1 := J, t1_timeout := Timeout}) ->
+ {timeout, 10, ?_test(begin
+ couch_jobs_server:force_check_types(),
+
+ ok = couch_jobs:add(?TX, T, J, #{<<"y">> => 1}),
+ {ok, Job, _} = couch_jobs:accept(T),
+
+ {ok, SubId, running, #{<<"y">> := 1}} = couch_jobs:subscribe(T, J),
+ Wait = 3 * Timeout * 1000,
+ ?assertEqual({T, J, pending, #{<<"y">> => 1}},
+ couch_jobs:wait(SubId, pending, Wait)),
+ ?assertMatch(#{state := pending}, get_job(T, J)),
+
+ % After job was re-enqueued, old job processor can't update it anymore
+ ?assertEqual({error, halt}, couch_jobs:update(?TX, Job)),
+ ?assertEqual({error, halt}, couch_jobs:finish(?TX, Job))
+ end)}.
+
+
+remove_running_job(#{t1 := T, j1 := J}) ->
+ ?_test(begin
+ ok = couch_jobs:add(?TX, T, J, #{}),
+ {ok, Job, _} = couch_jobs:accept(T),
+ ?assertEqual(ok, couch_jobs:remove(?TX, T, J)),
+ ?assertEqual({error, not_found}, couch_jobs:remove(?TX, T, J)),
+ ?assertEqual({error, halt}, couch_jobs:update(?TX, Job)),
+ ?assertEqual({error, halt}, couch_jobs:finish(?TX, Job))
+ end).
+
+
+check_get_jobs(#{t1 := T1, j1 := J1, t2 := T2, j2 := J2}) ->
+ ?_test(begin
+ ok = couch_jobs:add(?TX, T1, J1, #{}),
+ ok = couch_jobs:add(?TX, T2, J2, #{}),
+ ?assertMatch([
+ {T2, J2, pending, #{}},
+ {T1, J1, pending, #{}}
+ ], lists:sort(couch_jobs_fdb:get_jobs())),
+ {ok, _, _} = couch_jobs:accept(T1),
+ ?assertMatch([
+ {T2, J2, pending, #{}},
+ {T1, J1, running, #{}}
+ ], lists:sort(couch_jobs_fdb:get_jobs()))
+ end).
+
+
+use_fabric_transaction_object(#{t1 := T1, j1 := J1, dbname := DbName}) ->
+ ?_test(begin
+ {ok, Db} = fabric2_db:create(DbName, []),
+ ?assertEqual(ok, couch_jobs:add(Db, T1, J1, #{})),
+ ?assertMatch(#{state := pending, data := #{}}, get_job(T1, J1)),
+ {ok, Job, _} = couch_jobs:accept(T1),
+ ?assertEqual(ok, fabric2_fdb:transactional(Db, fun(Db1) ->
+ {ok, #{}} = couch_jobs:get_job_data(Db1, T1, J1),
+ Doc1 = #doc{id = <<"1">>, body = {[]}},
+ {ok, {_, _}} = fabric2_db:update_doc(Db1, Doc1),
+ Doc2 = #doc{id = <<"2">>, body = {[]}},
+ {ok, {_, _}} = fabric2_db:update_doc(Db1, Doc2),
+ couch_jobs:finish(Db1, Job, #{<<"d">> => 1})
+ end)),
+ ok = couch_jobs:remove(#{tx => undefined}, T1, J1),
+ ok = fabric2_db:delete(DbName, [])
+ end).
+
+
+metadata_version_bump(_) ->
+ ?_test(begin
+ JTx1 = couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(Tx) -> Tx end),
+ ?assertMatch(#{md_version := not_found}, JTx1),
+
+ ets:delete_all_objects(couch_jobs_fdb),
+ couch_jobs_fdb:bump_metadata_version(),
+ JTx2 = couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(Tx) -> Tx end),
+ ?assertMatch(#{md_version := Bin} when is_binary(Bin), JTx2),
+
+ ets:delete_all_objects(couch_jobs_fdb),
+ couch_jobs_fdb:bump_metadata_version(),
+ JTx3 = couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(Tx) -> Tx end),
+ OldMdv = maps:get(md_version, JTx2),
+ NewMdv = maps:get(md_version, JTx3),
+ ?assert(NewMdv > OldMdv)
+ end).
diff --git a/src/couch_js/README.md b/src/couch_js/README.md
new file mode 100644
index 000000000..4084b7d8e
--- /dev/null
+++ b/src/couch_js/README.md
@@ -0,0 +1,6 @@
+couch_js
+===
+
+This application is just an isolation of most of the code required for running couchjs.
+
+For the time being I'm not moving the implementation of couchjs due to the specifics of the build system configuration. Once we go to remove the `couch` application we'll have to revisit that approach. \ No newline at end of file
diff --git a/src/couch_js/src/couch_js.app.src b/src/couch_js/src/couch_js.app.src
new file mode 100644
index 000000000..44efd6d7d
--- /dev/null
+++ b/src/couch_js/src/couch_js.app.src
@@ -0,0 +1,27 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{application, couch_js, [
+ {description, "An OTP application"},
+ {vsn, git},
+ {registered, [
+ couch_js_proc_manager
+ ]},
+ {mod, {couch_js_app, []}},
+ {applications, [
+ kernel,
+ stdlib,
+ config,
+ couch_log,
+ ioq
+ ]}
+ ]}.
diff --git a/src/couch_js/src/couch_js.erl b/src/couch_js/src/couch_js.erl
new file mode 100644
index 000000000..1bc0f1927
--- /dev/null
+++ b/src/couch_js/src/couch_js.erl
@@ -0,0 +1,51 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-module(couch_js).
+-behavior(couch_eval).
+
+
+-export([
+ acquire_map_context/1,
+ release_map_context/1,
+ map_docs/2
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+
+-define(JS, <<"javascript">>).
+
+
+acquire_map_context(Opts) ->
+ #{
+ map_funs := MapFuns,
+ lib := Lib
+ } = Opts,
+ couch_js_query_servers:start_doc_map(?JS, MapFuns, Lib).
+
+
+release_map_context(Proc) ->
+ couch_js_query_servers:stop_doc_map(Proc).
+
+
+map_docs(Proc, Docs) ->
+ {ok, lists:map(fun(Doc) ->
+ {ok, RawResults} = couch_js_query_servers:map_doc_raw(Proc, Doc),
+ Results = couch_js_query_servers:raw_to_ejson(RawResults),
+ Tupled = lists:map(fun(ViewResult) ->
+ lists:map(fun([K, V]) -> {K, V} end, ViewResult)
+ end, Results),
+ {Doc#doc.id, Tupled}
+ end, Docs)}.
diff --git a/src/couch_js/src/couch_js_app.erl b/src/couch_js/src/couch_js_app.erl
new file mode 100644
index 000000000..b28f5852e
--- /dev/null
+++ b/src/couch_js/src/couch_js_app.erl
@@ -0,0 +1,31 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-module(couch_js_app).
+
+
+-behaviour(application).
+
+
+-export([
+ start/2,
+ stop/1
+]).
+
+
+start(_StartType, _StartArgs) ->
+ couch_js_sup:start_link().
+
+
+stop(_State) ->
+ ok. \ No newline at end of file
diff --git a/src/couch_js/src/couch_js_io_logger.erl b/src/couch_js/src/couch_js_io_logger.erl
new file mode 100644
index 000000000..5a1695c01
--- /dev/null
+++ b/src/couch_js/src/couch_js_io_logger.erl
@@ -0,0 +1,107 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_js_io_logger).
+
+-export([
+ start/1,
+ log_output/1,
+ log_input/1,
+ stop_noerror/0,
+ stop_error/1
+]).
+
+
+start(undefined) ->
+ ok;
+start(Dir) ->
+ case filelib:is_dir(Dir) of
+ true ->
+ Name = log_name(),
+ Path = Dir ++ "/" ++ Name,
+ OPath = Path ++ ".out.log_",
+ IPath = Path ++ ".in.log_",
+ {ok, OFd} = file:open(OPath, [read, write, raw]),
+ {ok, IFd} = file:open(IPath, [read, write, raw]),
+ ok = file:delete(OPath),
+ ok = file:delete(IPath),
+ put(logger_path, Path),
+ put(logger_out_fd, OFd),
+ put(logger_in_fd, IFd),
+ ok;
+ false ->
+ ok
+ end.
+
+
+stop_noerror() ->
+ case get(logger_path) of
+ undefined ->
+ ok;
+ _Path ->
+ close_logs()
+ end.
+
+
+stop_error(Err) ->
+ case get(logger_path) of
+ undefined ->
+ ok;
+ Path ->
+ save_error_logs(Path, Err),
+ close_logs()
+ end.
+
+
+log_output(Data) ->
+ log(get(logger_out_fd), Data).
+
+
+log_input(Data) ->
+ log(get(logger_in_fd), Data).
+
+
+unix_time() ->
+ {Mega, Sec, USec} = os:timestamp(),
+ UnixTs = (Mega * 1000000 + Sec) * 1000000 + USec,
+ integer_to_list(UnixTs).
+
+
+log_name() ->
+ Ts = unix_time(),
+ Pid0 = erlang:pid_to_list(self()),
+ Pid1 = string:strip(Pid0, left, $<),
+ Pid2 = string:strip(Pid1, right, $>),
+ lists:flatten(io_lib:format("~s_~s", [Ts, Pid2])).
+
+
+close_logs() ->
+ file:close(get(logger_out_fd)),
+ file:close(get(logger_in_fd)).
+
+
+save_error_logs(Path, Err) ->
+ Otp = erlang:system_info(otp_release),
+ Msg = io_lib:format("Error: ~p~nNode: ~p~nOTP: ~p~n", [Err, node(), Otp]),
+ file:write_file(Path ++ ".meta", Msg),
+ IFd = get(logger_out_fd),
+ OFd = get(logger_in_fd),
+ file:position(IFd, 0),
+ file:position(OFd, 0),
+ file:copy(IFd, Path ++ ".out.log"),
+ file:copy(OFd, Path ++ ".in.log").
+
+
+log(undefined, _Data) ->
+ ok;
+log(Fd, Data) ->
+ ok = file:write(Fd, [Data, io_lib:nl()]).
diff --git a/src/couch_js/src/couch_js_native_process.erl b/src/couch_js/src/couch_js_native_process.erl
new file mode 100644
index 000000000..d5ed3f94f
--- /dev/null
+++ b/src/couch_js/src/couch_js_native_process.erl
@@ -0,0 +1,468 @@
+% Licensed under the Apache License, Version 2.0 (the "License");
+% you may not use this file except in compliance with the License.
+%
+% You may obtain a copy of the License at
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing,
+% software distributed under the License is distributed on an
+% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+% either express or implied.
+%
+% See the License for the specific language governing permissions
+% and limitations under the License.
+%
+% This file drew much inspiration from erlview, which was written by and
+% copyright Michael McDaniel [http://autosys.us], and is also under APL 2.0
+%
+%
+% This module provides the smallest possible native view-server.
+% With this module in-place, you can add the following to your couch INI files:
+% [native_query_servers]
+% erlang={couch_native_process, start_link, []}
+%
+% Which will then allow following example map function to be used:
+%
+% fun({Doc}) ->
+% % Below, we emit a single record - the _id as key, null as value
+% DocId = couch_util:get_value(<<"_id">>, Doc, null),
+% Emit(DocId, null)
+% end.
+%
+% which should be roughly the same as the javascript:
+% emit(doc._id, null);
+%
+% This module exposes enough functions such that a native erlang server can
+% act as a fully-fleged view server, but no 'helper' functions specifically
+% for simplifying your erlang view code. It is expected other third-party
+% extensions will evolve which offer useful layers on top of this view server
+% to help simplify your view code.
+-module(couch_js_native_process).
+-behaviour(gen_server).
+-vsn(1).
+
+-export([start_link/0,init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,
+ handle_info/2,format_status/2]).
+-export([set_timeout/2, prompt/2]).
+
+-define(STATE, native_proc_state).
+-record(evstate, {
+ ddocs,
+ funs = [],
+ query_config = [],
+ list_pid = nil,
+ timeout = 5000,
+ idle = 5000
+}).
+
+-include_lib("couch/include/couch_db.hrl").
+
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+% this is a bit messy, see also couch_query_servers handle_info
+% stop(_Pid) ->
+% ok.
+
+set_timeout(Pid, TimeOut) ->
+ gen_server:call(Pid, {set_timeout, TimeOut}).
+
+prompt(Pid, Data) when is_list(Data) ->
+ gen_server:call(Pid, {prompt, Data}).
+
+% gen_server callbacks
+init([]) ->
+ V = config:get("query_server_config", "os_process_idle_limit", "300"),
+ Idle = list_to_integer(V) * 1000,
+ {ok, #evstate{ddocs=dict:new(), idle=Idle}, Idle}.
+
+handle_call({set_timeout, TimeOut}, _From, State) ->
+ {reply, ok, State#evstate{timeout=TimeOut}, State#evstate.idle};
+
+handle_call({prompt, Data}, _From, State) ->
+ couch_log:debug("Prompt native qs: ~s",[?JSON_ENCODE(Data)]),
+ {NewState, Resp} = try run(State, to_binary(Data)) of
+ {S, R} -> {S, R}
+ catch
+ throw:{error, Why} ->
+ {State, [<<"error">>, Why, Why]}
+ end,
+
+ Idle = State#evstate.idle,
+ case Resp of
+ {error, Reason} ->
+ Msg = io_lib:format("couch native server error: ~p", [Reason]),
+ Error = [<<"error">>, <<"native_query_server">>, list_to_binary(Msg)],
+ {reply, Error, NewState, Idle};
+ [<<"error">> | Rest] ->
+ % Msg = io_lib:format("couch native server error: ~p", [Rest]),
+ % TODO: markh? (jan)
+ {reply, [<<"error">> | Rest], NewState, Idle};
+ [<<"fatal">> | Rest] ->
+ % Msg = io_lib:format("couch native server error: ~p", [Rest]),
+ % TODO: markh? (jan)
+ {stop, fatal, [<<"error">> | Rest], NewState};
+ Resp ->
+ {reply, Resp, NewState, Idle}
+ end.
+
+handle_cast(garbage_collect, State) ->
+ erlang:garbage_collect(),
+ {noreply, State, State#evstate.idle};
+handle_cast(stop, State) ->
+ {stop, normal, State};
+handle_cast(_Msg, State) ->
+ {noreply, State, State#evstate.idle}.
+
+handle_info(timeout, State) ->
+ gen_server:cast(couch_js_proc_manager, {os_proc_idle, self()}),
+ erlang:garbage_collect(),
+ {noreply, State, State#evstate.idle};
+handle_info({'EXIT',_,normal}, State) ->
+ {noreply, State, State#evstate.idle};
+handle_info({'EXIT',_,Reason}, State) ->
+ {stop, Reason, State}.
+terminate(_Reason, _State) -> ok.
+code_change(_OldVersion, State, _Extra) -> {ok, State}.
+
+format_status(_Opt, [_PDict, State]) ->
+ #evstate{
+ ddocs = DDocs,
+ funs = Funs,
+ query_config = Config
+ } = State,
+ Scrubbed = State#evstate{
+ ddocs = {dict_size, dict:size(DDocs)},
+ funs = {length, length(Funs)},
+ query_config = {length, length(Config)}
+ },
+ [{data, [{"State",
+ ?record_to_keyval(evstate, Scrubbed)
+ }]}].
+
+
+run(#evstate{list_pid=Pid}=State, [<<"list_row">>, Row]) when is_pid(Pid) ->
+ Pid ! {self(), list_row, Row},
+ receive
+ {Pid, chunks, Data} ->
+ {State, [<<"chunks">>, Data]};
+ {Pid, list_end, Data} ->
+ receive
+ {'EXIT', Pid, normal} -> ok
+ after State#evstate.timeout ->
+ throw({timeout, list_cleanup})
+ end,
+ process_flag(trap_exit, erlang:get(do_trap)),
+ {State#evstate{list_pid=nil}, [<<"end">>, Data]}
+ after State#evstate.timeout ->
+ throw({timeout, list_row})
+ end;
+run(#evstate{list_pid=Pid}=State, [<<"list_end">>]) when is_pid(Pid) ->
+ Pid ! {self(), list_end},
+ Resp =
+ receive
+ {Pid, list_end, Data} ->
+ receive
+ {'EXIT', Pid, normal} -> ok
+ after State#evstate.timeout ->
+ throw({timeout, list_cleanup})
+ end,
+ [<<"end">>, Data]
+ after State#evstate.timeout ->
+ throw({timeout, list_end})
+ end,
+ process_flag(trap_exit, erlang:get(do_trap)),
+ {State#evstate{list_pid=nil}, Resp};
+run(#evstate{list_pid=Pid}=State, _Command) when is_pid(Pid) ->
+ {State, [<<"error">>, list_error, list_error]};
+run(#evstate{ddocs=DDocs}, [<<"reset">>]) ->
+ {#evstate{ddocs=DDocs}, true};
+run(#evstate{ddocs=DDocs, idle=Idle}, [<<"reset">>, QueryConfig]) ->
+ NewState = #evstate{
+ ddocs = DDocs,
+ query_config = QueryConfig,
+ idle = Idle
+ },
+ {NewState, true};
+run(#evstate{funs=Funs}=State, [<<"add_fun">> , BinFunc]) ->
+ FunInfo = makefun(State, BinFunc),
+ {State#evstate{funs=Funs ++ [FunInfo]}, true};
+run(State, [<<"map_doc">> , Doc]) ->
+ Resp = lists:map(fun({Sig, Fun}) ->
+ erlang:put(Sig, []),
+ Fun(Doc),
+ lists:reverse(erlang:get(Sig))
+ end, State#evstate.funs),
+ {State, Resp};
+run(State, [<<"reduce">>, Funs, KVs]) ->
+ {Keys, Vals} =
+ lists:foldl(fun([K, V], {KAcc, VAcc}) ->
+ {[K | KAcc], [V | VAcc]}
+ end, {[], []}, KVs),
+ Keys2 = lists:reverse(Keys),
+ Vals2 = lists:reverse(Vals),
+ {State, catch reduce(State, Funs, Keys2, Vals2, false)};
+run(State, [<<"rereduce">>, Funs, Vals]) ->
+ {State, catch reduce(State, Funs, null, Vals, true)};
+run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, <<"new">>, DDocId, DDoc]) ->
+ DDocs2 = store_ddoc(DDocs, DDocId, DDoc),
+ {State#evstate{ddocs=DDocs2}, true};
+run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, DDocId | Rest]) ->
+ DDoc = load_ddoc(DDocs, DDocId),
+ ddoc(State, DDoc, Rest);
+run(_, Unknown) ->
+ couch_log:error("Native Process: Unknown command: ~p~n", [Unknown]),
+ throw({error, unknown_command}).
+
+ddoc(State, {DDoc}, [FunPath, Args]) ->
+ % load fun from the FunPath
+ BFun = lists:foldl(fun
+ (Key, {Props}) when is_list(Props) ->
+ couch_util:get_value(Key, Props, nil);
+ (_Key, Fun) when is_binary(Fun) ->
+ Fun;
+ (_Key, nil) ->
+ throw({error, not_found});
+ (_Key, _Fun) ->
+ throw({error, malformed_ddoc})
+ end, {DDoc}, FunPath),
+ ddoc(State, makefun(State, BFun, {DDoc}), FunPath, Args).
+
+ddoc(State, {_, Fun}, [<<"validate_doc_update">>], Args) ->
+ {State, (catch apply(Fun, Args))};
+ddoc(State, {_, Fun}, [<<"rewrites">>], Args) ->
+ {State, (catch apply(Fun, Args))};
+ddoc(State, {_, Fun}, [<<"filters">>|_], [Docs, Req]) ->
+ FilterFunWrapper = fun(Doc) ->
+ case catch Fun(Doc, Req) of
+ true -> true;
+ false -> false;
+ {'EXIT', Error} -> couch_log:error("~p", [Error])
+ end
+ end,
+ Resp = lists:map(FilterFunWrapper, Docs),
+ {State, [true, Resp]};
+ddoc(State, {_, Fun}, [<<"views">>|_], [Docs]) ->
+ MapFunWrapper = fun(Doc) ->
+ case catch Fun(Doc) of
+ undefined -> true;
+ ok -> false;
+ false -> false;
+ [_|_] -> true;
+ {'EXIT', Error} -> couch_log:error("~p", [Error])
+ end
+ end,
+ Resp = lists:map(MapFunWrapper, Docs),
+ {State, [true, Resp]};
+ddoc(State, {_, Fun}, [<<"shows">>|_], Args) ->
+ Resp = case (catch apply(Fun, Args)) of
+ FunResp when is_list(FunResp) ->
+ FunResp;
+ {FunResp} ->
+ [<<"resp">>, {FunResp}];
+ FunResp ->
+ FunResp
+ end,
+ {State, Resp};
+ddoc(State, {_, Fun}, [<<"updates">>|_], Args) ->
+ Resp = case (catch apply(Fun, Args)) of
+ [JsonDoc, JsonResp] ->
+ [<<"up">>, JsonDoc, JsonResp]
+ end,
+ {State, Resp};
+ddoc(State, {Sig, Fun}, [<<"lists">>|_], Args) ->
+ Self = self(),
+ SpawnFun = fun() ->
+ LastChunk = (catch apply(Fun, Args)),
+ case start_list_resp(Self, Sig) of
+ started ->
+ receive
+ {Self, list_row, _Row} -> ignore;
+ {Self, list_end} -> ignore
+ after State#evstate.timeout ->
+ throw({timeout, list_cleanup_pid})
+ end;
+ _ ->
+ ok
+ end,
+ LastChunks =
+ case erlang:get(Sig) of
+ undefined -> [LastChunk];
+ OtherChunks -> [LastChunk | OtherChunks]
+ end,
+ Self ! {self(), list_end, lists:reverse(LastChunks)}
+ end,
+ erlang:put(do_trap, process_flag(trap_exit, true)),
+ Pid = spawn_link(SpawnFun),
+ Resp =
+ receive
+ {Pid, start, Chunks, JsonResp} ->
+ [<<"start">>, Chunks, JsonResp]
+ after State#evstate.timeout ->
+ throw({timeout, list_start})
+ end,
+ {State#evstate{list_pid=Pid}, Resp}.
+
+store_ddoc(DDocs, DDocId, DDoc) ->
+ dict:store(DDocId, DDoc, DDocs).
+load_ddoc(DDocs, DDocId) ->
+ try dict:fetch(DDocId, DDocs) of
+ {DDoc} -> {DDoc}
+ catch
+ _:_Else -> throw({error, ?l2b(io_lib:format("Native Query Server missing DDoc with Id: ~s",[DDocId]))})
+ end.
+
+bindings(State, Sig) ->
+ bindings(State, Sig, nil).
+bindings(State, Sig, DDoc) ->
+ Self = self(),
+
+ Log = fun(Msg) ->
+ couch_log:info(Msg, [])
+ end,
+
+ Emit = fun(Id, Value) ->
+ Curr = erlang:get(Sig),
+ erlang:put(Sig, [[Id, Value] | Curr])
+ end,
+
+ Start = fun(Headers) ->
+ erlang:put(list_headers, Headers)
+ end,
+
+ Send = fun(Chunk) ->
+ Curr =
+ case erlang:get(Sig) of
+ undefined -> [];
+ Else -> Else
+ end,
+ erlang:put(Sig, [Chunk | Curr])
+ end,
+
+ GetRow = fun() ->
+ case start_list_resp(Self, Sig) of
+ started ->
+ ok;
+ _ ->
+ Chunks =
+ case erlang:get(Sig) of
+ undefined -> [];
+ CurrChunks -> CurrChunks
+ end,
+ Self ! {self(), chunks, lists:reverse(Chunks)}
+ end,
+ erlang:put(Sig, []),
+ receive
+ {Self, list_row, Row} -> Row;
+ {Self, list_end} -> nil
+ after State#evstate.timeout ->
+ throw({timeout, list_pid_getrow})
+ end
+ end,
+
+ FoldRows = fun(Fun, Acc) -> foldrows(GetRow, Fun, Acc) end,
+
+ Bindings = [
+ {'Log', Log},
+ {'Emit', Emit},
+ {'Start', Start},
+ {'Send', Send},
+ {'GetRow', GetRow},
+ {'FoldRows', FoldRows}
+ ],
+ case DDoc of
+ {_Props} ->
+ Bindings ++ [{'DDoc', DDoc}];
+ _Else -> Bindings
+ end.
+
+% thanks to erlview, via:
+% http://erlang.org/pipermail/erlang-questions/2003-November/010544.html
+makefun(State, Source) ->
+ Sig = couch_hash:md5_hash(Source),
+ BindFuns = bindings(State, Sig),
+ {Sig, makefun(State, Source, BindFuns)}.
+makefun(State, Source, {DDoc}) ->
+ Sig = couch_hash:md5_hash(lists:flatten([Source, term_to_binary(DDoc)])),
+ BindFuns = bindings(State, Sig, {DDoc}),
+ {Sig, makefun(State, Source, BindFuns)};
+makefun(_State, Source, BindFuns) when is_list(BindFuns) ->
+ FunStr = binary_to_list(Source),
+ {ok, Tokens, _} = erl_scan:string(FunStr),
+ Form = case (catch erl_parse:parse_exprs(Tokens)) of
+ {ok, [ParsedForm]} ->
+ ParsedForm;
+ {error, {LineNum, _Mod, [Mesg, Params]}}=Error ->
+ couch_log:error("Syntax error on line: ~p~n~s~p~n",
+ [LineNum, Mesg, Params]),
+ throw(Error)
+ end,
+ Bindings = lists:foldl(fun({Name, Fun}, Acc) ->
+ erl_eval:add_binding(Name, Fun, Acc)
+ end, erl_eval:new_bindings(), BindFuns),
+ {value, Fun, _} = erl_eval:expr(Form, Bindings),
+ Fun.
+
+reduce(State, BinFuns, Keys, Vals, ReReduce) ->
+ Funs = case is_list(BinFuns) of
+ true ->
+ lists:map(fun(BF) -> makefun(State, BF) end, BinFuns);
+ _ ->
+ [makefun(State, BinFuns)]
+ end,
+ Reds = lists:map(fun({_Sig, Fun}) ->
+ Fun(Keys, Vals, ReReduce)
+ end, Funs),
+ [true, Reds].
+
+foldrows(GetRow, ProcRow, Acc) ->
+ case GetRow() of
+ nil ->
+ {ok, Acc};
+ Row ->
+ case (catch ProcRow(Row, Acc)) of
+ {ok, Acc2} ->
+ foldrows(GetRow, ProcRow, Acc2);
+ {stop, Acc2} ->
+ {ok, Acc2}
+ end
+ end.
+
+start_list_resp(Self, Sig) ->
+ case erlang:get(list_started) of
+ undefined ->
+ Headers =
+ case erlang:get(list_headers) of
+ undefined -> {[{<<"headers">>, {[]}}]};
+ CurrHdrs -> CurrHdrs
+ end,
+ Chunks =
+ case erlang:get(Sig) of
+ undefined -> [];
+ CurrChunks -> CurrChunks
+ end,
+ Self ! {self(), start, lists:reverse(Chunks), Headers},
+ erlang:put(list_started, true),
+ erlang:put(Sig, []),
+ started;
+ _ ->
+ ok
+ end.
+
+to_binary({Data}) ->
+ Pred = fun({Key, Value}) ->
+ {to_binary(Key), to_binary(Value)}
+ end,
+ {lists:map(Pred, Data)};
+to_binary(Data) when is_list(Data) ->
+ [to_binary(D) || D <- Data];
+to_binary(null) ->
+ null;
+to_binary(true) ->
+ true;
+to_binary(false) ->
+ false;
+to_binary(Data) when is_atom(Data) ->
+ list_to_binary(atom_to_list(Data));
+to_binary(Data) ->
+ Data.
diff --git a/src/couch_js/src/couch_js_os_process.erl b/src/couch_js/src/couch_js_os_process.erl
new file mode 100644
index 000000000..a453d1ab2
--- /dev/null
+++ b/src/couch_js/src/couch_js_os_process.erl
@@ -0,0 +1,265 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_js_os_process).
+-behaviour(gen_server).
+-vsn(1).
+
+-export([start_link/1, start_link/2, start_link/3, stop/1]).
+-export([set_timeout/2, prompt/2, killer/1]).
+-export([send/2, writeline/2, readline/1, writejson/2, readjson/1]).
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
+
+-include_lib("couch/include/couch_db.hrl").
+
+-define(PORT_OPTIONS, [stream, {line, 4096}, binary, exit_status, hide]).
+
+-record(os_proc,
+ {command,
+ port,
+ writer,
+ reader,
+ timeout=5000,
+ idle
+ }).
+
+start_link(Command) ->
+ start_link(Command, []).
+start_link(Command, Options) ->
+ start_link(Command, Options, ?PORT_OPTIONS).
+start_link(Command, Options, PortOptions) ->
+ gen_server:start_link(?MODULE, [Command, Options, PortOptions], []).
+
+stop(Pid) ->
+ gen_server:cast(Pid, stop).
+
+% Read/Write API
+set_timeout(Pid, TimeOut) when is_integer(TimeOut) ->
+ ok = gen_server:call(Pid, {set_timeout, TimeOut}, infinity).
+
+% Used by couch_event_os_process.erl
+send(Pid, Data) ->
+ gen_server:cast(Pid, {send, Data}).
+
+prompt(Pid, Data) ->
+ case ioq:call(Pid, {prompt, Data}, erlang:get(io_priority)) of
+ {ok, Result} ->
+ Result;
+ Error ->
+ couch_log:error("OS Process Error ~p :: ~p",[Pid,Error]),
+ throw(Error)
+ end.
+
+% Utility functions for reading and writing
+% in custom functions
+writeline(OsProc, Data) when is_record(OsProc, os_proc) ->
+ Res = port_command(OsProc#os_proc.port, [Data, $\n]),
+ couch_js_io_logger:log_output(Data),
+ Res.
+
+readline(#os_proc{} = OsProc) ->
+ Res = readline(OsProc, []),
+ couch_js_io_logger:log_input(Res),
+ Res.
+readline(#os_proc{port = Port} = OsProc, Acc) ->
+ receive
+ {Port, {data, {noeol, Data}}} when is_binary(Acc) ->
+ readline(OsProc, <<Acc/binary,Data/binary>>);
+ {Port, {data, {noeol, Data}}} when is_binary(Data) ->
+ readline(OsProc, Data);
+ {Port, {data, {noeol, Data}}} ->
+ readline(OsProc, [Data|Acc]);
+ {Port, {data, {eol, <<Data/binary>>}}} when is_binary(Acc) ->
+ [<<Acc/binary,Data/binary>>];
+ {Port, {data, {eol, Data}}} when is_binary(Data) ->
+ [Data];
+ {Port, {data, {eol, Data}}} ->
+ lists:reverse(Acc, Data);
+ {Port, Err} ->
+ catch port_close(Port),
+ throw({os_process_error, Err})
+ after OsProc#os_proc.timeout ->
+ catch port_close(Port),
+ throw({os_process_error, "OS process timed out."})
+ end.
+
+% Standard JSON functions
+writejson(OsProc, Data) when is_record(OsProc, os_proc) ->
+ JsonData = ?JSON_ENCODE(Data),
+ couch_log:debug("OS Process ~p Input :: ~s",
+ [OsProc#os_proc.port, JsonData]),
+ true = writeline(OsProc, JsonData).
+
+readjson(OsProc) when is_record(OsProc, os_proc) ->
+ Line = iolist_to_binary(readline(OsProc)),
+ couch_log:debug("OS Process ~p Output :: ~s", [OsProc#os_proc.port, Line]),
+ try
+ % Don't actually parse the whole JSON. Just try to see if it's
+ % a command or a doc map/reduce/filter/show/list/update output.
+ % If it's a command then parse the whole JSON and execute the
+ % command, otherwise return the raw JSON line to the caller.
+ pick_command(Line)
+ catch
+ throw:abort ->
+ {json, Line};
+ throw:{cmd, _Cmd} ->
+ case ?JSON_DECODE(Line) of
+ [<<"log">>, Msg] when is_binary(Msg) ->
+ % we got a message to log. Log it and continue
+ couch_log:info("OS Process ~p Log :: ~s",
+ [OsProc#os_proc.port, Msg]),
+ readjson(OsProc);
+ [<<"error">>, Id, Reason] ->
+ throw({error, {couch_util:to_existing_atom(Id),Reason}});
+ [<<"fatal">>, Id, Reason] ->
+ couch_log:info("OS Process ~p Fatal Error :: ~s ~p",
+ [OsProc#os_proc.port, Id, Reason]),
+ throw({couch_util:to_existing_atom(Id),Reason});
+ _Result ->
+ {json, Line}
+ end
+ end.
+
+pick_command(Line) ->
+ json_stream_parse:events(Line, fun pick_command0/1).
+
+pick_command0(array_start) ->
+ fun pick_command1/1;
+pick_command0(_) ->
+ throw(abort).
+
+pick_command1(<<"log">> = Cmd) ->
+ throw({cmd, Cmd});
+pick_command1(<<"error">> = Cmd) ->
+ throw({cmd, Cmd});
+pick_command1(<<"fatal">> = Cmd) ->
+ throw({cmd, Cmd});
+pick_command1(_) ->
+ throw(abort).
+
+
+% gen_server API
+init([Command, Options, PortOptions]) ->
+ couch_js_io_logger:start(os:getenv("COUCHDB_IO_LOG_DIR")),
+ PrivDir = couch_util:priv_dir(),
+ Spawnkiller = "\"" ++ filename:join(PrivDir, "couchspawnkillable") ++ "\"",
+ V = config:get("query_server_config", "os_process_idle_limit", "300"),
+ IdleLimit = list_to_integer(V) * 1000,
+ BaseProc = #os_proc{
+ command=Command,
+ port=open_port({spawn, Spawnkiller ++ " " ++ Command}, PortOptions),
+ writer=fun ?MODULE:writejson/2,
+ reader=fun ?MODULE:readjson/1,
+ idle=IdleLimit
+ },
+ KillCmd = iolist_to_binary(readline(BaseProc)),
+ Pid = self(),
+ couch_log:debug("OS Process Start :: ~p", [BaseProc#os_proc.port]),
+ spawn(fun() ->
+ % this ensure the real os process is killed when this process dies.
+ erlang:monitor(process, Pid),
+ killer(?b2l(KillCmd))
+ end),
+ OsProc =
+ lists:foldl(fun(Opt, Proc) ->
+ case Opt of
+ {writer, Writer} when is_function(Writer) ->
+ Proc#os_proc{writer=Writer};
+ {reader, Reader} when is_function(Reader) ->
+ Proc#os_proc{reader=Reader};
+ {timeout, TimeOut} when is_integer(TimeOut) ->
+ Proc#os_proc{timeout=TimeOut}
+ end
+ end, BaseProc, Options),
+ {ok, OsProc, IdleLimit}.
+
+terminate(Reason, #os_proc{port=Port}) ->
+ catch port_close(Port),
+ case Reason of
+ normal ->
+ couch_js_io_logger:stop_noerror();
+ Error ->
+ couch_js_io_logger:stop_error(Error)
+ end,
+ ok.
+
+handle_call({set_timeout, TimeOut}, _From, #os_proc{idle=Idle}=OsProc) ->
+ {reply, ok, OsProc#os_proc{timeout=TimeOut}, Idle};
+handle_call({prompt, Data}, _From, #os_proc{idle=Idle}=OsProc) ->
+ #os_proc{writer=Writer, reader=Reader} = OsProc,
+ try
+ Writer(OsProc, Data),
+ {reply, {ok, Reader(OsProc)}, OsProc, Idle}
+ catch
+ throw:{error, OsError} ->
+ {reply, OsError, OsProc, Idle};
+ throw:{fatal, OsError} ->
+ {stop, normal, OsError, OsProc};
+ throw:OtherError ->
+ {stop, normal, OtherError, OsProc}
+ after
+ garbage_collect()
+ end.
+
+handle_cast({send, Data}, #os_proc{writer=Writer, idle=Idle}=OsProc) ->
+ try
+ Writer(OsProc, Data),
+ {noreply, OsProc, Idle}
+ catch
+ throw:OsError ->
+ couch_log:error("Failed sending data: ~p -> ~p", [Data, OsError]),
+ {stop, normal, OsProc}
+ end;
+handle_cast(garbage_collect, #os_proc{idle=Idle}=OsProc) ->
+ erlang:garbage_collect(),
+ {noreply, OsProc, Idle};
+handle_cast(stop, OsProc) ->
+ {stop, normal, OsProc};
+handle_cast(Msg, #os_proc{idle=Idle}=OsProc) ->
+ couch_log:debug("OS Proc: Unknown cast: ~p", [Msg]),
+ {noreply, OsProc, Idle}.
+
+handle_info(timeout, #os_proc{idle=Idle}=OsProc) ->
+ gen_server:cast(couch_js_proc_manager, {os_proc_idle, self()}),
+ erlang:garbage_collect(),
+ {noreply, OsProc, Idle};
+handle_info({Port, {exit_status, 0}}, #os_proc{port=Port}=OsProc) ->
+ couch_log:info("OS Process terminated normally", []),
+ {stop, normal, OsProc};
+handle_info({Port, {exit_status, Status}}, #os_proc{port=Port}=OsProc) ->
+ couch_log:error("OS Process died with status: ~p", [Status]),
+ {stop, {exit_status, Status}, OsProc};
+handle_info(Msg, #os_proc{idle=Idle}=OsProc) ->
+ couch_log:debug("OS Proc: Unknown info: ~p", [Msg]),
+ {noreply, OsProc, Idle}.
+
+code_change(_, {os_proc, Cmd, Port, W, R, Timeout} , _) ->
+ V = config:get("query_server_config","os_process_idle_limit","300"),
+ State = #os_proc{
+ command = Cmd,
+ port = Port,
+ writer = W,
+ reader = R,
+ timeout = Timeout,
+ idle = list_to_integer(V) * 1000
+ },
+ {ok, State};
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+killer(KillCmd) ->
+ receive _ ->
+ os:cmd(KillCmd)
+ after 1000 ->
+ ?MODULE:killer(KillCmd)
+ end.
+
diff --git a/src/couch_js/src/couch_js_proc_manager.erl b/src/couch_js/src/couch_js_proc_manager.erl
new file mode 100644
index 000000000..db5c492f5
--- /dev/null
+++ b/src/couch_js/src/couch_js_proc_manager.erl
@@ -0,0 +1,615 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_js_proc_manager).
+-behaviour(gen_server).
+-behaviour(config_listener).
+-vsn(1).
+
+-export([
+ start_link/0,
+ get_proc_count/0,
+ get_stale_proc_count/0,
+ new_proc/1,
+ reload/0,
+ terminate_stale_procs/0
+]).
+
+-export([
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3,
+ format_status/2
+]).
+
+-export([
+ handle_config_change/5,
+ handle_config_terminate/3
+]).
+
+-include_lib("couch/include/couch_db.hrl").
+
+-define(PROCS, couch_js_proc_manager_procs).
+-define(WAITERS, couch_js_proc_manager_waiters).
+-define(OPENING, couch_js_proc_manager_opening).
+-define(SERVERS, couch_js_proc_manager_servers).
+-define(RELISTEN_DELAY, 5000).
+
+-record(state, {
+ config,
+ counts,
+ threshold_ts,
+ hard_limit,
+ soft_limit
+}).
+
+-type docid() :: iodata().
+-type revision() :: {integer(), binary()}.
+
+-record(client, {
+ timestamp :: os:timestamp() | '_',
+ from :: undefined | {pid(), reference()} | '_',
+ lang :: binary() | '_',
+ ddoc :: #doc{} | '_',
+ ddoc_key :: undefined | {DDocId :: docid(), Rev :: revision()} | '_'
+}).
+
+-record(proc_int, {
+ pid,
+ lang,
+ client,
+ ddoc_keys = [],
+ prompt_fun,
+ set_timeout_fun,
+ stop_fun,
+ t0 = os:timestamp()
+}).
+
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+get_proc_count() ->
+ gen_server:call(?MODULE, get_proc_count).
+
+
+get_stale_proc_count() ->
+ gen_server:call(?MODULE, get_stale_proc_count).
+
+
+reload() ->
+ gen_server:call(?MODULE, set_threshold_ts).
+
+
+terminate_stale_procs() ->
+ gen_server:call(?MODULE, terminate_stale_procs).
+
+
+init([]) ->
+ process_flag(trap_exit, true),
+ ok = config:listen_for_changes(?MODULE, undefined),
+
+ TableOpts = [public, named_table, ordered_set],
+ ets:new(?PROCS, TableOpts ++ [{keypos, #proc_int.pid}]),
+ ets:new(?WAITERS, TableOpts ++ [{keypos, #client.timestamp}]),
+ ets:new(?OPENING, [public, named_table, set]),
+ ets:new(?SERVERS, [public, named_table, set]),
+ ets:insert(?SERVERS, get_servers_from_env("COUCHDB_QUERY_SERVER_")),
+ ets:insert(?SERVERS, get_servers_from_env("COUCHDB_NATIVE_QUERY_SERVER_")),
+ maybe_configure_erlang_native_servers(),
+
+ {ok, #state{
+ config = get_proc_config(),
+ counts = dict:new(),
+ threshold_ts = os:timestamp(),
+ hard_limit = get_hard_limit(),
+ soft_limit = get_soft_limit()
+ }}.
+
+
+terminate(_Reason, _State) ->
+ ets:foldl(fun(#proc_int{pid=P}, _) ->
+ couch_util:shutdown_sync(P)
+ end, 0, ?PROCS),
+ ok.
+
+
+handle_call(get_proc_count, _From, State) ->
+ NumProcs = ets:info(?PROCS, size),
+ NumOpening = ets:info(?OPENING, size),
+ {reply, NumProcs + NumOpening, State};
+
+handle_call(get_stale_proc_count, _From, State) ->
+ #state{threshold_ts = T0} = State,
+ MatchSpec = [{#proc_int{t0='$1', _='_'}, [{'<', '$1', {T0}}], [true]}],
+ {reply, ets:select_count(?PROCS, MatchSpec), State};
+
+handle_call({get_proc, #doc{body={Props}}=DDoc, DDocKey}, From, State) ->
+ LangStr = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
+ Lang = couch_util:to_binary(LangStr),
+ Client = #client{from=From, lang=Lang, ddoc=DDoc, ddoc_key=DDocKey},
+ add_waiting_client(Client),
+ {noreply, flush_waiters(State, Lang)};
+
+handle_call({get_proc, LangStr}, From, State) ->
+ Lang = couch_util:to_binary(LangStr),
+ Client = #client{from=From, lang=Lang},
+ add_waiting_client(Client),
+ {noreply, flush_waiters(State, Lang)};
+
+handle_call({ret_proc, #proc{client=Ref} = Proc}, _From, State) ->
+ erlang:demonitor(Ref, [flush]),
+ NewState = case ets:lookup(?PROCS, Proc#proc.pid) of
+ [#proc_int{}=ProcInt] ->
+ return_proc(State, ProcInt);
+ [] ->
+ % Proc must've died and we already
+ % cleared it out of the table in
+ % the handle_info clause.
+ State
+ end,
+ {reply, true, NewState};
+
+handle_call(set_threshold_ts, _From, State) ->
+ FoldFun = fun
+ (#proc_int{client = undefined} = Proc, StateAcc) ->
+ remove_proc(StateAcc, Proc);
+ (_, StateAcc) ->
+ StateAcc
+ end,
+ NewState = ets:foldl(FoldFun, State, ?PROCS),
+ {reply, ok, NewState#state{threshold_ts = os:timestamp()}};
+
+handle_call(terminate_stale_procs, _From, #state{threshold_ts = Ts1} = State) ->
+ FoldFun = fun
+ (#proc_int{client = undefined, t0 = Ts2} = Proc, StateAcc) ->
+ case Ts1 > Ts2 of
+ true ->
+ remove_proc(StateAcc, Proc);
+ false ->
+ StateAcc
+ end;
+ (_, StateAcc) ->
+ StateAcc
+ end,
+ NewState = ets:foldl(FoldFun, State, ?PROCS),
+ {reply, ok, NewState};
+
+handle_call(_Call, _From, State) ->
+ {reply, ignored, State}.
+
+
+handle_cast({os_proc_idle, Pid}, #state{counts=Counts}=State) ->
+ NewState = case ets:lookup(?PROCS, Pid) of
+ [#proc_int{client=undefined, lang=Lang}=Proc] ->
+ case dict:find(Lang, Counts) of
+ {ok, Count} when Count >= State#state.soft_limit ->
+ couch_log:info("Closing idle OS Process: ~p", [Pid]),
+ remove_proc(State, Proc);
+ {ok, _} ->
+ State
+ end;
+ _ ->
+ State
+ end,
+ {noreply, NewState};
+
+handle_cast(reload_config, State) ->
+ NewState = State#state{
+ config = get_proc_config(),
+ hard_limit = get_hard_limit(),
+ soft_limit = get_soft_limit()
+ },
+ maybe_configure_erlang_native_servers(),
+ {noreply, flush_waiters(NewState)};
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+
+handle_info(shutdown, State) ->
+ {stop, shutdown, State};
+
+handle_info({'EXIT', Pid, {spawn_ok, Proc0, {ClientPid,_} = From}}, State) ->
+ ets:delete(?OPENING, Pid),
+ link(Proc0#proc_int.pid),
+ Proc = assign_proc(ClientPid, Proc0),
+ gen_server:reply(From, {ok, Proc, State#state.config}),
+ {noreply, State};
+
+handle_info({'EXIT', Pid, spawn_error}, State) ->
+ [{Pid, #client{lang=Lang}}] = ets:lookup(?OPENING, Pid),
+ ets:delete(?OPENING, Pid),
+ NewState = State#state{
+ counts = dict:update_counter(Lang, -1, State#state.counts)
+ },
+ {noreply, flush_waiters(NewState, Lang)};
+
+handle_info({'EXIT', Pid, Reason}, State) ->
+ couch_log:info("~p ~p died ~p", [?MODULE, Pid, Reason]),
+ case ets:lookup(?PROCS, Pid) of
+ [#proc_int{} = Proc] ->
+ NewState = remove_proc(State, Proc),
+ {noreply, flush_waiters(NewState, Proc#proc_int.lang)};
+ [] ->
+ {noreply, State}
+ end;
+
+handle_info({'DOWN', Ref, _, _, _Reason}, State0) ->
+ case ets:match_object(?PROCS, #proc_int{client=Ref, _='_'}) of
+ [#proc_int{} = Proc] ->
+ {noreply, return_proc(State0, Proc)};
+ [] ->
+ {noreply, State0}
+ end;
+
+
+handle_info(restart_config_listener, State) ->
+ ok = config:listen_for_changes(?MODULE, nil),
+ {noreply, State};
+
+handle_info(_Msg, State) ->
+ {noreply, State}.
+
+
+code_change(_OldVsn, #state{}=State, _Extra) ->
+ {ok, State}.
+
+
+format_status(_Opt, [_PDict, State]) ->
+ #state{
+ counts = Counts
+ } = State,
+ Scrubbed = State#state{
+ counts = {dict_size, dict:size(Counts)}
+ },
+ [{data, [{"State",
+ ?record_to_keyval(state, Scrubbed)
+ }]}].
+
+
+handle_config_terminate(_, stop, _) ->
+ ok;
+handle_config_terminate(_Server, _Reason, _State) ->
+ gen_server:cast(?MODULE, reload_config),
+ erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
+
+handle_config_change("native_query_servers", _, _, _, _) ->
+ gen_server:cast(?MODULE, reload_config),
+ {ok, undefined};
+handle_config_change("query_server_config", _, _, _, _) ->
+ gen_server:cast(?MODULE, reload_config),
+ {ok, undefined};
+handle_config_change(_, _, _, _, _) ->
+ {ok, undefined}.
+
+
+find_proc(#client{lang = Lang, ddoc_key = undefined}) ->
+ Pred = fun(_) ->
+ true
+ end,
+ find_proc(Lang, Pred);
+find_proc(#client{lang = Lang, ddoc = DDoc, ddoc_key = DDocKey} = Client) ->
+ Pred = fun(#proc_int{ddoc_keys = DDocKeys}) ->
+ lists:member(DDocKey, DDocKeys)
+ end,
+ case find_proc(Lang, Pred) of
+ not_found ->
+ case find_proc(Client#client{ddoc_key=undefined}) of
+ {ok, Proc} ->
+ teach_ddoc(DDoc, DDocKey, Proc);
+ Else ->
+ Else
+ end;
+ Else ->
+ Else
+ end.
+
+find_proc(Lang, Fun) ->
+ try iter_procs(Lang, Fun)
+ catch error:Reason ->
+ StackTrace = erlang:get_stacktrace(),
+ couch_log:error("~p ~p ~p", [?MODULE, Reason, StackTrace]),
+ {error, Reason}
+ end.
+
+
+iter_procs(Lang, Fun) when is_binary(Lang) ->
+ Pattern = #proc_int{lang=Lang, client=undefined, _='_'},
+ MSpec = [{Pattern, [], ['$_']}],
+ case ets:select_reverse(?PROCS, MSpec, 25) of
+ '$end_of_table' ->
+ not_found;
+ Continuation ->
+ iter_procs_int(Continuation, Fun)
+ end.
+
+
+iter_procs_int({[], Continuation0}, Fun) ->
+ case ets:select_reverse(Continuation0) of
+ '$end_of_table' ->
+ not_found;
+ Continuation1 ->
+ iter_procs_int(Continuation1, Fun)
+ end;
+iter_procs_int({[Proc | Rest], Continuation}, Fun) ->
+ case Fun(Proc) of
+ true ->
+ {ok, Proc};
+ false ->
+ iter_procs_int({Rest, Continuation}, Fun)
+ end.
+
+
+spawn_proc(State, Client) ->
+ Pid = spawn_link(?MODULE, new_proc, [Client]),
+ ets:insert(?OPENING, {Pid, Client}),
+ Counts = State#state.counts,
+ Lang = Client#client.lang,
+ State#state{
+ counts = dict:update_counter(Lang, 1, Counts)
+ }.
+
+
+new_proc(#client{ddoc=undefined, ddoc_key=undefined}=Client) ->
+ #client{from=From, lang=Lang} = Client,
+ Resp = try
+ case new_proc_int(From, Lang) of
+ {ok, Proc} ->
+ {spawn_ok, Proc, From};
+ Error ->
+ gen_server:reply(From, {error, Error}),
+ spawn_error
+ end
+ catch _:_ ->
+ spawn_error
+ end,
+ exit(Resp);
+
+new_proc(Client) ->
+ #client{from=From, lang=Lang, ddoc=DDoc, ddoc_key=DDocKey} = Client,
+ Resp = try
+ case new_proc_int(From, Lang) of
+ {ok, NewProc} ->
+ {ok, Proc} = teach_ddoc(DDoc, DDocKey, NewProc),
+ {spawn_ok, Proc, From};
+ Error ->
+ gen_server:reply(From, {error, Error}),
+ spawn_error
+ end
+ catch _:_ ->
+ spawn_error
+ end,
+ exit(Resp).
+
+split_string_if_longer(String, Pos) ->
+ case length(String) > Pos of
+ true -> lists:split(Pos, String);
+ false -> false
+ end.
+
+split_by_char(String, Char) ->
+ %% 17.5 doesn't have string:split
+ %% the function doesn't handle errors
+ %% it is designed to be used only in specific context
+ Pos = string:chr(String, Char),
+ {Key, [_Eq | Value]} = lists:split(Pos - 1, String),
+ {Key, Value}.
+
+get_servers_from_env(Spec) ->
+ SpecLen = length(Spec),
+ % loop over os:getenv(), match SPEC_
+ lists:filtermap(fun(EnvStr) ->
+ case split_string_if_longer(EnvStr, SpecLen) of
+ {Spec, Rest} ->
+ {true, split_by_char(Rest, $=)};
+ _ ->
+ false
+ end
+ end, os:getenv()).
+
+get_query_server(LangStr) ->
+ case ets:lookup(?SERVERS, string:to_upper(LangStr)) of
+ [{_, Command}] -> Command;
+ _ -> undefined
+ end.
+
+native_query_server_enabled() ->
+ % 1. [native_query_server] enable_erlang_query_server = true | false
+ % 2. if [native_query_server] erlang == {couch_native_process, start_link, []} -> pretend true as well
+ NativeEnabled = config:get_boolean("native_query_servers", "enable_erlang_query_server", false),
+ NativeLegacyConfig = config:get("native_query_servers", "erlang", ""),
+ NativeLegacyEnabled = NativeLegacyConfig =:= "{couch_native_process, start_link, []}",
+ NativeEnabled orelse NativeLegacyEnabled.
+
+maybe_configure_erlang_native_servers() ->
+ case native_query_server_enabled() of
+ true ->
+ ets:insert(?SERVERS, [
+ {"ERLANG", {couch_js_native_process, start_link, []}}]);
+ _Else ->
+ ok
+ end.
+
+new_proc_int(From, Lang) when is_binary(Lang) ->
+ LangStr = binary_to_list(Lang),
+ case get_query_server(LangStr) of
+ undefined ->
+ gen_server:reply(From, {unknown_query_language, Lang});
+ {M, F, A} ->
+ {ok, Pid} = apply(M, F, A),
+ make_proc(Pid, Lang, M);
+ Command ->
+ {ok, Pid} = couch_js_os_process:start_link(Command),
+ make_proc(Pid, Lang, couch_js_os_process)
+ end.
+
+
+teach_ddoc(DDoc, {DDocId, _Rev}=DDocKey, #proc_int{ddoc_keys=Keys}=Proc) ->
+ % send ddoc over the wire
+ % we only share the rev with the client we know to update code
+ % but it only keeps the latest copy, per each ddoc, around.
+ true = couch_js_query_servers:proc_prompt(
+ export_proc(Proc),
+ [<<"ddoc">>, <<"new">>, DDocId, couch_doc:to_json_obj(DDoc, [])]),
+ % we should remove any other ddocs keys for this docid
+ % because the query server overwrites without the rev
+ Keys2 = [{D,R} || {D,R} <- Keys, D /= DDocId],
+ % add ddoc to the proc
+ {ok, Proc#proc_int{ddoc_keys=[DDocKey|Keys2]}}.
+
+
+make_proc(Pid, Lang, Mod) when is_binary(Lang) ->
+ Proc = #proc_int{
+ lang = Lang,
+ pid = Pid,
+ prompt_fun = {Mod, prompt},
+ set_timeout_fun = {Mod, set_timeout},
+ stop_fun = {Mod, stop}
+ },
+ unlink(Pid),
+ {ok, Proc}.
+
+
+assign_proc(Pid, #proc_int{client=undefined}=Proc0) when is_pid(Pid) ->
+ Proc = Proc0#proc_int{client = erlang:monitor(process, Pid)},
+ ets:insert(?PROCS, Proc),
+ export_proc(Proc);
+assign_proc(#client{}=Client, #proc_int{client=undefined}=Proc) ->
+ {Pid, _} = Client#client.from,
+ assign_proc(Pid, Proc).
+
+
+return_proc(#state{} = State, #proc_int{} = ProcInt) ->
+ #proc_int{pid = Pid, lang = Lang} = ProcInt,
+ NewState = case is_process_alive(Pid) of true ->
+ case ProcInt#proc_int.t0 < State#state.threshold_ts of
+ true ->
+ remove_proc(State, ProcInt);
+ false ->
+ gen_server:cast(Pid, garbage_collect),
+ true = ets:update_element(?PROCS, Pid, [
+ {#proc_int.client, undefined}
+ ]),
+ State
+ end;
+ false ->
+ remove_proc(State, ProcInt)
+ end,
+ flush_waiters(NewState, Lang).
+
+
+remove_proc(State, #proc_int{}=Proc) ->
+ ets:delete(?PROCS, Proc#proc_int.pid),
+ case is_process_alive(Proc#proc_int.pid) of true ->
+ unlink(Proc#proc_int.pid),
+ gen_server:cast(Proc#proc_int.pid, stop);
+ false ->
+ ok
+ end,
+ Counts = State#state.counts,
+ Lang = Proc#proc_int.lang,
+ State#state{
+ counts = dict:update_counter(Lang, -1, Counts)
+ }.
+
+
+-spec export_proc(#proc_int{}) -> #proc{}.
+export_proc(#proc_int{} = ProcInt) ->
+ ProcIntList = tuple_to_list(ProcInt),
+ ProcLen = record_info(size, proc),
+ [_ | Data] = lists:sublist(ProcIntList, ProcLen),
+ list_to_tuple([proc | Data]).
+
+
+flush_waiters(State) ->
+ dict:fold(fun(Lang, Count, StateAcc) ->
+ case Count < State#state.hard_limit of
+ true ->
+ flush_waiters(StateAcc, Lang);
+ false ->
+ StateAcc
+ end
+ end, State, State#state.counts).
+
+
+flush_waiters(State, Lang) ->
+ CanSpawn = can_spawn(State, Lang),
+ case get_waiting_client(Lang) of
+ #client{from = From} = Client ->
+ case find_proc(Client) of
+ {ok, ProcInt} ->
+ Proc = assign_proc(Client, ProcInt),
+ gen_server:reply(From, {ok, Proc, State#state.config}),
+ remove_waiting_client(Client),
+ flush_waiters(State, Lang);
+ {error, Error} ->
+ gen_server:reply(From, {error, Error}),
+ remove_waiting_client(Client),
+ flush_waiters(State, Lang);
+ not_found when CanSpawn ->
+ NewState = spawn_proc(State, Client),
+ remove_waiting_client(Client),
+ flush_waiters(NewState, Lang);
+ not_found ->
+ State
+ end;
+ undefined ->
+ State
+ end.
+
+
+add_waiting_client(Client) ->
+ ets:insert(?WAITERS, Client#client{timestamp=os:timestamp()}).
+
+-spec get_waiting_client(Lang :: binary()) -> undefined | #client{}.
+get_waiting_client(Lang) ->
+ case ets:match_object(?WAITERS, #client{lang=Lang, _='_'}, 1) of
+ '$end_of_table' ->
+ undefined;
+ {[#client{}=Client], _} ->
+ Client
+ end.
+
+
+remove_waiting_client(#client{timestamp = Timestamp}) ->
+ ets:delete(?WAITERS, Timestamp).
+
+
+can_spawn(#state{hard_limit = HardLimit, counts = Counts}, Lang) ->
+ case dict:find(Lang, Counts) of
+ {ok, Count} -> Count < HardLimit;
+ error -> true
+ end.
+
+
+get_proc_config() ->
+ Limit = config:get("query_server_config", "reduce_limit", "true"),
+ Timeout = config:get("couchdb", "os_process_timeout", "5000"),
+ {[
+ {<<"reduce_limit">>, list_to_atom(Limit)},
+ {<<"timeout">>, list_to_integer(Timeout)}
+ ]}.
+
+
+get_hard_limit() ->
+ LimStr = config:get("query_server_config", "os_process_limit", "100"),
+ list_to_integer(LimStr).
+
+
+get_soft_limit() ->
+ LimStr = config:get("query_server_config", "os_process_soft_limit", "100"),
+ list_to_integer(LimStr).
diff --git a/src/couch_js/src/couch_js_query_servers.erl b/src/couch_js/src/couch_js_query_servers.erl
new file mode 100644
index 000000000..12dc864ea
--- /dev/null
+++ b/src/couch_js/src/couch_js_query_servers.erl
@@ -0,0 +1,683 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_js_query_servers).
+
+-export([try_compile/4]).
+-export([start_doc_map/3, map_doc_raw/2, stop_doc_map/1, raw_to_ejson/1]).
+-export([reduce/3, rereduce/3,validate_doc_update/5]).
+-export([filter_docs/5]).
+-export([filter_view/3]).
+-export([finalize/2]).
+-export([rewrite/3]).
+
+-export([with_ddoc_proc/2, proc_prompt/2, ddoc_prompt/3, ddoc_proc_prompt/3, json_doc/1]).
+
+% For 210-os-proc-pool.t
+-export([get_os_process/1, get_ddoc_process/2, ret_os_process/1]).
+
+-include_lib("couch/include/couch_db.hrl").
+
+-define(SUMERROR, <<"The _sum function requires that map values be numbers, "
+ "arrays of numbers, or objects. Objects cannot be mixed with other "
+ "data structures. Objects can be arbitrarily nested, provided that the values "
+ "for all fields are themselves numbers, arrays of numbers, or objects.">>).
+
+-define(STATERROR, <<"The _stats function requires that map values be numbers "
+ "or arrays of numbers, not '~p'">>).
+
+
+try_compile(Proc, FunctionType, FunctionName, FunctionSource) ->
+ try
+ proc_prompt(Proc, [<<"add_fun">>, FunctionSource]),
+ ok
+ catch
+ {compilation_error, E} ->
+ Fmt = "Compilation of the ~s function in the '~s' view failed: ~s",
+ Msg = io_lib:format(Fmt, [FunctionType, FunctionName, E]),
+ throw({compilation_error, Msg});
+ {os_process_error, {exit_status, ExitStatus}} ->
+ Fmt = "Compilation of the ~s function in the '~s' view failed with exit status: ~p",
+ Msg = io_lib:format(Fmt, [FunctionType, FunctionName, ExitStatus]),
+ throw({compilation_error, Msg})
+ end.
+
+start_doc_map(Lang, Functions, Lib) ->
+ Proc = get_os_process(Lang),
+ case Lib of
+ {[]} -> ok;
+ Lib ->
+ true = proc_prompt(Proc, [<<"add_lib">>, Lib])
+ end,
+ lists:foreach(fun(FunctionSource) ->
+ true = proc_prompt(Proc, [<<"add_fun">>, FunctionSource])
+ end, Functions),
+ {ok, Proc}.
+
+map_doc_raw(Proc, Doc) ->
+ Json = couch_doc:to_json_obj(Doc, []),
+ {ok, proc_prompt_raw(Proc, [<<"map_doc">>, Json])}.
+
+
+stop_doc_map(nil) ->
+ ok;
+stop_doc_map(Proc) ->
+ ok = ret_os_process(Proc).
+
+group_reductions_results([]) ->
+ [];
+group_reductions_results(List) ->
+ {Heads, Tails} = lists:foldl(
+ fun([H|T], {HAcc,TAcc}) ->
+ {[H|HAcc], [T|TAcc]}
+ end, {[], []}, List),
+ case Tails of
+ [[]|_] -> % no tails left
+ [Heads];
+ _ ->
+ [Heads | group_reductions_results(Tails)]
+ end.
+
+finalize(<<"_approx_count_distinct",_/binary>>, Reduction) ->
+ true = hyper:is_hyper(Reduction),
+ {ok, round(hyper:card(Reduction))};
+finalize(<<"_stats",_/binary>>, Unpacked) ->
+ {ok, pack_stats(Unpacked)};
+finalize(_RedSrc, Reduction) ->
+ {ok, Reduction}.
+
+rereduce(_Lang, [], _ReducedValues) ->
+ {ok, []};
+rereduce(Lang, RedSrcs, ReducedValues) ->
+ Grouped = group_reductions_results(ReducedValues),
+ Results = lists:zipwith(
+ fun
+ (<<"_", _/binary>> = FunSrc, Values) ->
+ {ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []),
+ Result;
+ (FunSrc, Values) ->
+ os_rereduce(Lang, [FunSrc], Values)
+ end, RedSrcs, Grouped),
+ {ok, Results}.
+
+reduce(_Lang, [], _KVs) ->
+ {ok, []};
+reduce(Lang, RedSrcs, KVs) ->
+ {OsRedSrcs, BuiltinReds} = lists:partition(fun
+ (<<"_", _/binary>>) -> false;
+ (_OsFun) -> true
+ end, RedSrcs),
+ {ok, OsResults} = os_reduce(Lang, OsRedSrcs, KVs),
+ {ok, BuiltinResults} = builtin_reduce(reduce, BuiltinReds, KVs, []),
+ recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, []).
+
+
+recombine_reduce_results([], [], [], Acc) ->
+ {ok, lists:reverse(Acc)};
+recombine_reduce_results([<<"_", _/binary>>|RedSrcs], OsResults, [BRes|BuiltinResults], Acc) ->
+ recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [BRes|Acc]);
+recombine_reduce_results([_OsFun|RedSrcs], [OsR|OsResults], BuiltinResults, Acc) ->
+ recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [OsR|Acc]).
+
+os_reduce(_Lang, [], _KVs) ->
+ {ok, []};
+os_reduce(Lang, OsRedSrcs, KVs) ->
+ Proc = get_os_process(Lang),
+ OsResults = try proc_prompt(Proc, [<<"reduce">>, OsRedSrcs, KVs]) of
+ [true, Reductions] -> Reductions
+ catch
+ throw:{reduce_overflow_error, Msg} ->
+ [{[{reduce_overflow_error, Msg}]} || _ <- OsRedSrcs]
+ after
+ ok = ret_os_process(Proc)
+ end,
+ {ok, OsResults}.
+
+os_rereduce(Lang, OsRedSrcs, KVs) ->
+ case get_overflow_error(KVs) of
+ undefined ->
+ Proc = get_os_process(Lang),
+ try proc_prompt(Proc, [<<"rereduce">>, OsRedSrcs, KVs]) of
+ [true, [Reduction]] -> Reduction
+ catch
+ throw:{reduce_overflow_error, Msg} ->
+ {[{reduce_overflow_error, Msg}]}
+ after
+ ok = ret_os_process(Proc)
+ end;
+ Error ->
+ Error
+ end.
+
+
+get_overflow_error([]) ->
+ undefined;
+get_overflow_error([{[{reduce_overflow_error, _}]} = Error | _]) ->
+ Error;
+get_overflow_error([_ | Rest]) ->
+ get_overflow_error(Rest).
+
+
+builtin_reduce(_Re, [], _KVs, Acc) ->
+ {ok, lists:reverse(Acc)};
+builtin_reduce(Re, [<<"_sum",_/binary>>|BuiltinReds], KVs, Acc) ->
+ Sum = builtin_sum_rows(KVs, 0),
+ Red = check_sum_overflow(?term_size(KVs), ?term_size(Sum), Sum),
+ builtin_reduce(Re, BuiltinReds, KVs, [Red|Acc]);
+builtin_reduce(reduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
+ Count = length(KVs),
+ builtin_reduce(reduce, BuiltinReds, KVs, [Count|Acc]);
+builtin_reduce(rereduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
+ Count = builtin_sum_rows(KVs, 0),
+ builtin_reduce(rereduce, BuiltinReds, KVs, [Count|Acc]);
+builtin_reduce(Re, [<<"_stats",_/binary>>|BuiltinReds], KVs, Acc) ->
+ Stats = builtin_stats(Re, KVs),
+ builtin_reduce(Re, BuiltinReds, KVs, [Stats|Acc]);
+builtin_reduce(Re, [<<"_approx_count_distinct",_/binary>>|BuiltinReds], KVs, Acc) ->
+ Distinct = approx_count_distinct(Re, KVs),
+ builtin_reduce(Re, BuiltinReds, KVs, [Distinct|Acc]).
+
+
+builtin_sum_rows([], Acc) ->
+ Acc;
+builtin_sum_rows([[_Key, Value] | RestKVs], Acc) ->
+ try sum_values(Value, Acc) of
+ NewAcc ->
+ builtin_sum_rows(RestKVs, NewAcc)
+ catch
+ throw:{builtin_reduce_error, Obj} ->
+ Obj;
+ throw:{invalid_value, Reason, Cause} ->
+ {[{<<"error">>, <<"builtin_reduce_error">>},
+ {<<"reason">>, Reason}, {<<"caused_by">>, Cause}]}
+ end.
+
+
+sum_values(Value, Acc) when is_number(Value), is_number(Acc) ->
+ Acc + Value;
+sum_values(Value, Acc) when is_list(Value), is_list(Acc) ->
+ sum_arrays(Acc, Value);
+sum_values(Value, Acc) when is_number(Value), is_list(Acc) ->
+ sum_arrays(Acc, [Value]);
+sum_values(Value, Acc) when is_list(Value), is_number(Acc) ->
+ sum_arrays([Acc], Value);
+sum_values({Props}, Acc) ->
+ case lists:keyfind(<<"error">>, 1, Props) of
+ {<<"error">>, <<"builtin_reduce_error">>} ->
+ throw({builtin_reduce_error, {Props}});
+ false ->
+ ok
+ end,
+ case Acc of
+ 0 ->
+ {Props};
+ {AccProps} ->
+ {sum_objects(lists:sort(Props), lists:sort(AccProps))}
+ end;
+sum_values(Else, _Acc) ->
+ throw_sum_error(Else).
+
+sum_objects([{K1, V1} | Rest1], [{K1, V2} | Rest2]) ->
+ [{K1, sum_values(V1, V2)} | sum_objects(Rest1, Rest2)];
+sum_objects([{K1, V1} | Rest1], [{K2, V2} | Rest2]) when K1 < K2 ->
+ [{K1, V1} | sum_objects(Rest1, [{K2, V2} | Rest2])];
+sum_objects([{K1, V1} | Rest1], [{K2, V2} | Rest2]) when K1 > K2 ->
+ [{K2, V2} | sum_objects([{K1, V1} | Rest1], Rest2)];
+sum_objects([], Rest) ->
+ Rest;
+sum_objects(Rest, []) ->
+ Rest.
+
+sum_arrays([], []) ->
+ [];
+sum_arrays([_|_]=Xs, []) ->
+ Xs;
+sum_arrays([], [_|_]=Ys) ->
+ Ys;
+sum_arrays([X|Xs], [Y|Ys]) when is_number(X), is_number(Y) ->
+ [X+Y | sum_arrays(Xs,Ys)];
+sum_arrays(Else, _) ->
+ throw_sum_error(Else).
+
+check_sum_overflow(InSize, OutSize, Sum) ->
+ Overflowed = OutSize > 4906 andalso OutSize * 2 > InSize,
+ case config:get("query_server_config", "reduce_limit", "true") of
+ "true" when Overflowed ->
+ Msg = log_sum_overflow(InSize, OutSize),
+ {[
+ {<<"error">>, <<"builtin_reduce_error">>},
+ {<<"reason">>, Msg}
+ ]};
+ "log" when Overflowed ->
+ log_sum_overflow(InSize, OutSize),
+ Sum;
+ _ ->
+ Sum
+ end.
+
+log_sum_overflow(InSize, OutSize) ->
+ Fmt = "Reduce output must shrink more rapidly: "
+ "input size: ~b "
+ "output size: ~b",
+ Msg = iolist_to_binary(io_lib:format(Fmt, [InSize, OutSize])),
+ couch_log:error(Msg, []),
+ Msg.
+
+builtin_stats(_, []) ->
+ {0, 0, 0, 0, 0};
+builtin_stats(_, [[_,First]|Rest]) ->
+ lists:foldl(fun([_Key, Value], Acc) ->
+ stat_values(Value, Acc)
+ end, build_initial_accumulator(First), Rest).
+
+stat_values(Value, Acc) when is_list(Value), is_list(Acc) ->
+ lists:zipwith(fun stat_values/2, Value, Acc);
+stat_values({PreRed}, Acc) when is_list(PreRed) ->
+ stat_values(unpack_stats({PreRed}), Acc);
+stat_values(Value, Acc) when is_number(Value) ->
+ stat_values({Value, 1, Value, Value, Value*Value}, Acc);
+stat_values(Value, Acc) when is_number(Acc) ->
+ stat_values(Value, {Acc, 1, Acc, Acc, Acc*Acc});
+stat_values(Value, Acc) when is_tuple(Value), is_tuple(Acc) ->
+ {Sum0, Cnt0, Min0, Max0, Sqr0} = Value,
+ {Sum1, Cnt1, Min1, Max1, Sqr1} = Acc,
+ {
+ Sum0 + Sum1,
+ Cnt0 + Cnt1,
+ erlang:min(Min0, Min1),
+ erlang:max(Max0, Max1),
+ Sqr0 + Sqr1
+ };
+stat_values(Else, _Acc) ->
+ throw_stat_error(Else).
+
+build_initial_accumulator(L) when is_list(L) ->
+ [build_initial_accumulator(X) || X <- L];
+build_initial_accumulator(X) when is_number(X) ->
+ {X, 1, X, X, X*X};
+build_initial_accumulator({_, _, _, _, _} = AlreadyUnpacked) ->
+ AlreadyUnpacked;
+build_initial_accumulator({Props}) ->
+ unpack_stats({Props});
+build_initial_accumulator(Else) ->
+ Msg = io_lib:format("non-numeric _stats input: ~w", [Else]),
+ throw({invalid_value, iolist_to_binary(Msg)}).
+
+unpack_stats({PreRed}) when is_list(PreRed) ->
+ {
+ get_number(<<"sum">>, PreRed),
+ get_number(<<"count">>, PreRed),
+ get_number(<<"min">>, PreRed),
+ get_number(<<"max">>, PreRed),
+ get_number(<<"sumsqr">>, PreRed)
+ }.
+
+
+pack_stats({Sum, Cnt, Min, Max, Sqr}) ->
+ {[{<<"sum">>,Sum}, {<<"count">>,Cnt}, {<<"min">>,Min}, {<<"max">>,Max}, {<<"sumsqr">>,Sqr}]};
+pack_stats({Packed}) ->
+ % Legacy code path before we had the finalize operation
+ {Packed};
+pack_stats(Stats) when is_list(Stats) ->
+ lists:map(fun pack_stats/1, Stats).
+
+get_number(Key, Props) ->
+ case couch_util:get_value(Key, Props) of
+ X when is_number(X) ->
+ X;
+ undefined when is_binary(Key) ->
+ get_number(binary_to_atom(Key, latin1), Props);
+ undefined ->
+ Msg = io_lib:format("user _stats input missing required field ~s (~p)",
+ [Key, Props]),
+ throw({invalid_value, iolist_to_binary(Msg)});
+ Else ->
+ Msg = io_lib:format("non-numeric _stats input received for ~s: ~w",
+ [Key, Else]),
+ throw({invalid_value, iolist_to_binary(Msg)})
+ end.
+
+% TODO allow customization of precision in the ddoc.
+approx_count_distinct(reduce, KVs) ->
+ lists:foldl(fun([[Key, _Id], _Value], Filter) ->
+ hyper:insert(term_to_binary(Key), Filter)
+ end, hyper:new(11), KVs);
+approx_count_distinct(rereduce, Reds) ->
+ hyper:union([Filter || [_, Filter] <- Reds]).
+
+% use the function stored in ddoc.validate_doc_update to test an update.
+-spec validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) -> ok when
+ DDoc :: ddoc(),
+ EditDoc :: doc(),
+ DiskDoc :: doc() | nil,
+ Ctx :: user_ctx(),
+ SecObj :: sec_obj().
+
+validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
+ JsonEditDoc = couch_doc:to_json_obj(EditDoc, [revs]),
+ JsonDiskDoc = json_doc(DiskDoc),
+ Resp = ddoc_prompt(
+ DDoc,
+ [<<"validate_doc_update">>],
+ [JsonEditDoc, JsonDiskDoc, Ctx, SecObj]
+ ),
+ if Resp == 1 -> ok; true ->
+ couch_stats:increment_counter([couchdb, query_server, vdu_rejects], 1)
+ end,
+ case Resp of
+ RespCode when RespCode =:= 1; RespCode =:= ok; RespCode =:= true ->
+ ok;
+ {[{<<"forbidden">>, Message}]} ->
+ throw({forbidden, Message});
+ {[{<<"unauthorized">>, Message}]} ->
+ throw({unauthorized, Message});
+ {[{_, Message}]} ->
+ throw({unknown_error, Message});
+ Message when is_binary(Message) ->
+ throw({unknown_error, Message})
+ end.
+
+
+rewrite(Req, Db, DDoc) ->
+ Fields = [F || F <- chttpd_external:json_req_obj_fields(),
+ F =/= <<"info">>, F =/= <<"form">>,
+ F =/= <<"uuid">>, F =/= <<"id">>],
+ JsonReq = chttpd_external:json_req_obj(Req, Db, null, Fields),
+ case ddoc_prompt(DDoc, [<<"rewrites">>], [JsonReq]) of
+ {[{<<"forbidden">>, Message}]} ->
+ throw({forbidden, Message});
+ {[{<<"unauthorized">>, Message}]} ->
+ throw({unauthorized, Message});
+ [<<"no_dispatch_rule">>] ->
+ undefined;
+ [<<"ok">>, {V}=Rewrite] when is_list(V) ->
+ ok = validate_rewrite_response(Rewrite),
+ Rewrite;
+ [<<"ok">>, _] ->
+ throw_rewrite_error(<<"bad rewrite">>);
+ V ->
+ couch_log:error("bad rewrite return ~p", [V]),
+ throw({unknown_error, V})
+ end.
+
+validate_rewrite_response({Fields}) when is_list(Fields) ->
+ validate_rewrite_response_fields(Fields).
+
+validate_rewrite_response_fields([{Key, Value} | Rest]) ->
+ validate_rewrite_response_field(Key, Value),
+ validate_rewrite_response_fields(Rest);
+validate_rewrite_response_fields([]) ->
+ ok.
+
+validate_rewrite_response_field(<<"method">>, Method) when is_binary(Method) ->
+ ok;
+validate_rewrite_response_field(<<"method">>, _) ->
+ throw_rewrite_error(<<"bad method">>);
+validate_rewrite_response_field(<<"path">>, Path) when is_binary(Path) ->
+ ok;
+validate_rewrite_response_field(<<"path">>, _) ->
+ throw_rewrite_error(<<"bad path">>);
+validate_rewrite_response_field(<<"body">>, Body) when is_binary(Body) ->
+ ok;
+validate_rewrite_response_field(<<"body">>, _) ->
+ throw_rewrite_error(<<"bad body">>);
+validate_rewrite_response_field(<<"headers">>, {Props}=Headers) when is_list(Props) ->
+ validate_object_fields(Headers);
+validate_rewrite_response_field(<<"headers">>, _) ->
+ throw_rewrite_error(<<"bad headers">>);
+validate_rewrite_response_field(<<"query">>, {Props}=Query) when is_list(Props) ->
+ validate_object_fields(Query);
+validate_rewrite_response_field(<<"query">>, _) ->
+ throw_rewrite_error(<<"bad query">>);
+validate_rewrite_response_field(<<"code">>, Code) when is_integer(Code) andalso Code >= 200 andalso Code < 600 ->
+ ok;
+validate_rewrite_response_field(<<"code">>, _) ->
+ throw_rewrite_error(<<"bad code">>);
+validate_rewrite_response_field(K, V) ->
+ couch_log:debug("unknown rewrite field ~p=~p", [K, V]),
+ ok.
+
+validate_object_fields({Props}) when is_list(Props) ->
+ lists:foreach(fun
+ ({Key, Value}) when is_binary(Key) andalso is_binary(Value) ->
+ ok;
+ ({Key, Value}) ->
+ Reason = io_lib:format(
+ "object key/value must be strings ~p=~p", [Key, Value]),
+ throw_rewrite_error(Reason);
+ (Value) ->
+ throw_rewrite_error(io_lib:format("bad value ~p", [Value]))
+ end, Props).
+
+
+throw_rewrite_error(Reason) when is_list(Reason)->
+ throw_rewrite_error(iolist_to_binary(Reason));
+throw_rewrite_error(Reason) when is_binary(Reason) ->
+ throw({rewrite_error, Reason}).
+
+
+json_doc_options() ->
+ json_doc_options([]).
+
+json_doc_options(Options) ->
+ Limit = config:get_integer("query_server_config", "revs_limit", 20),
+ [{revs, Limit} | Options].
+
+json_doc(Doc) ->
+ json_doc(Doc, json_doc_options()).
+
+json_doc(nil, _) ->
+ null;
+json_doc(Doc, Options) ->
+ couch_doc:to_json_obj(Doc, Options).
+
+filter_view(DDoc, VName, Docs) ->
+ Options = json_doc_options(),
+ JsonDocs = [json_doc(Doc, Options) || Doc <- Docs],
+ [true, Passes] = ddoc_prompt(DDoc, [<<"views">>, VName, <<"map">>], [JsonDocs]),
+ {ok, Passes}.
+
+filter_docs(Req, Db, DDoc, FName, Docs) ->
+ JsonReq = case Req of
+ {json_req, JsonObj} ->
+ JsonObj;
+ #httpd{} = HttpReq ->
+ couch_httpd_external:json_req_obj(HttpReq, Db)
+ end,
+ Options = json_doc_options(),
+ JsonDocs = [json_doc(Doc, Options) || Doc <- Docs],
+ [true, Passes] = ddoc_prompt(DDoc, [<<"filters">>, FName],
+ [JsonDocs, JsonReq]),
+ {ok, Passes}.
+
+ddoc_proc_prompt({Proc, DDocId}, FunPath, Args) ->
+ proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args]).
+
+ddoc_prompt(DDoc, FunPath, Args) ->
+ with_ddoc_proc(DDoc, fun({Proc, DDocId}) ->
+ proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args])
+ end).
+
+with_ddoc_proc(#doc{id=DDocId,revs={Start, [DiskRev|_]}}=DDoc, Fun) ->
+ Rev = couch_doc:rev_to_str({Start, DiskRev}),
+ DDocKey = {DDocId, Rev},
+ Proc = get_ddoc_process(DDoc, DDocKey),
+ try Fun({Proc, DDocId})
+ after
+ ok = ret_os_process(Proc)
+ end.
+
+proc_prompt(Proc, Args) ->
+ case proc_prompt_raw(Proc, Args) of
+ {json, Json} ->
+ ?JSON_DECODE(Json);
+ EJson ->
+ EJson
+ end.
+
+proc_prompt_raw(#proc{prompt_fun = {Mod, Func}} = Proc, Args) ->
+ apply(Mod, Func, [Proc#proc.pid, Args]).
+
+raw_to_ejson({json, Json}) ->
+ ?JSON_DECODE(Json);
+raw_to_ejson(EJson) ->
+ EJson.
+
+proc_stop(Proc) ->
+ {Mod, Func} = Proc#proc.stop_fun,
+ apply(Mod, Func, [Proc#proc.pid]).
+
+proc_set_timeout(Proc, Timeout) ->
+ {Mod, Func} = Proc#proc.set_timeout_fun,
+ apply(Mod, Func, [Proc#proc.pid, Timeout]).
+
+get_os_process_timeout() ->
+ list_to_integer(config:get("couchdb", "os_process_timeout", "5000")).
+
+get_ddoc_process(#doc{} = DDoc, DDocKey) ->
+ % remove this case statement
+ case gen_server:call(couch_js_proc_manager, {get_proc, DDoc, DDocKey}, get_os_process_timeout()) of
+ {ok, Proc, {QueryConfig}} ->
+ % process knows the ddoc
+ case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
+ true ->
+ proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
+ Proc;
+ _ ->
+ catch proc_stop(Proc),
+ get_ddoc_process(DDoc, DDocKey)
+ end;
+ Error ->
+ throw(Error)
+ end.
+
+get_os_process(Lang) ->
+ case gen_server:call(couch_js_proc_manager, {get_proc, Lang}, get_os_process_timeout()) of
+ {ok, Proc, {QueryConfig}} ->
+ case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
+ true ->
+ proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
+ Proc;
+ _ ->
+ catch proc_stop(Proc),
+ get_os_process(Lang)
+ end;
+ Error ->
+ throw(Error)
+ end.
+
+ret_os_process(Proc) ->
+ true = gen_server:call(couch_js_proc_manager, {ret_proc, Proc}, infinity),
+ catch unlink(Proc#proc.pid),
+ ok.
+
+throw_sum_error(Else) ->
+ throw({invalid_value, ?SUMERROR, Else}).
+
+throw_stat_error(Else) ->
+ throw({invalid_value, iolist_to_binary(io_lib:format(?STATERROR, [Else]))}).
+
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+builtin_sum_rows_negative_test() ->
+ A = [{[{<<"a">>, 1}]}, {[{<<"a">>, 2}]}, {[{<<"a">>, 3}]}],
+ E = {[{<<"error">>, <<"builtin_reduce_error">>}]},
+ ?assertEqual(E, builtin_sum_rows([["K", E]], [])),
+ % The below case is where the value is invalid, but no error because
+ % it's only one document.
+ ?assertEqual(A, builtin_sum_rows([["K", A]], [])),
+ {Result} = builtin_sum_rows([["K", A]], [1, 2, 3]),
+ ?assertEqual({<<"error">>, <<"builtin_reduce_error">>},
+ lists:keyfind(<<"error">>, 1, Result)).
+
+sum_values_test() ->
+ ?assertEqual(3, sum_values(1, 2)),
+ ?assertEqual([2,4,6], sum_values(1, [1,4,6])),
+ ?assertEqual([3,5,7], sum_values([3,2,4], [0,3,3])),
+ X = {[{<<"a">>,1}, {<<"b">>,[1,2]}, {<<"c">>, {[{<<"d">>,3}]}},
+ {<<"g">>,1}]},
+ Y = {[{<<"a">>,2}, {<<"b">>,3}, {<<"c">>, {[{<<"e">>, 5}]}},
+ {<<"f">>,1}, {<<"g">>,1}]},
+ Z = {[{<<"a">>,3}, {<<"b">>,[4,2]}, {<<"c">>, {[{<<"d">>,3},{<<"e">>,5}]}},
+ {<<"f">>,1}, {<<"g">>,2}]},
+ ?assertEqual(Z, sum_values(X, Y)),
+ ?assertEqual(Z, sum_values(Y, X)).
+
+sum_values_negative_test() ->
+ % invalid value
+ A = [{[{<<"a">>, 1}]}, {[{<<"a">>, 2}]}, {[{<<"a">>, 3}]}],
+ B = ["error 1", "error 2"],
+ C = [<<"error 3">>, <<"error 4">>],
+ KV = {[{<<"error">>, <<"builtin_reduce_error">>},
+ {<<"reason">>, ?SUMERROR}, {<<"caused_by">>, <<"some cause">>}]},
+ ?assertThrow({invalid_value, _, _}, sum_values(A, [1, 2, 3])),
+ ?assertThrow({invalid_value, _, _}, sum_values(A, 0)),
+ ?assertThrow({invalid_value, _, _}, sum_values(B, [1, 2])),
+ ?assertThrow({invalid_value, _, _}, sum_values(C, [0])),
+ ?assertThrow({builtin_reduce_error, KV}, sum_values(KV, [0])).
+
+stat_values_test() ->
+ ?assertEqual({1, 2, 0, 1, 1}, stat_values(1, 0)),
+ ?assertEqual({11, 2, 1, 10, 101}, stat_values(1, 10)),
+ ?assertEqual([{9, 2, 2, 7, 53},
+ {14, 2, 3, 11, 130},
+ {18, 2, 5, 13, 194}
+ ], stat_values([2,3,5], [7,11,13])).
+
+reduce_stats_test() ->
+ ?assertEqual([
+ {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
+ ], test_reduce(<<"_stats">>, [[[null, key], 2]])),
+
+ ?assertEqual([[
+ {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
+ {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
+ ]], test_reduce(<<"_stats">>, [[[null, key],[1,2]]])),
+
+ ?assertEqual(
+ {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
+ , element(2, finalize(<<"_stats">>, {2, 1, 2, 2, 4}))),
+
+ ?assertEqual([
+ {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
+ {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
+ ], element(2, finalize(<<"_stats">>, [
+ {1, 1, 1, 1, 1},
+ {2, 1, 2, 2, 4}
+ ]))),
+
+ ?assertEqual([
+ {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
+ {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
+ ], element(2, finalize(<<"_stats">>, [
+ {1, 1, 1, 1, 1},
+ {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
+ ]))),
+
+ ?assertEqual([
+ {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
+ {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
+ ], element(2, finalize(<<"_stats">>, [
+ {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
+ {2, 1, 2, 2, 4}
+ ]))),
+ ok.
+
+test_reduce(Reducer, KVs) ->
+ ?assertMatch({ok, _}, reduce(<<"javascript">>, [Reducer], KVs)),
+ {ok, Reduced} = reduce(<<"javascript">>, [Reducer], KVs),
+ {ok, Finalized} = finalize(Reducer, Reduced),
+ Finalized.
+
+-endif.
diff --git a/src/couch_js/src/couch_js_sup.erl b/src/couch_js/src/couch_js_sup.erl
new file mode 100644
index 000000000..e87546127
--- /dev/null
+++ b/src/couch_js/src/couch_js_sup.erl
@@ -0,0 +1,45 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-module(couch_js_sup).
+-behaviour(supervisor).
+
+
+-export([
+ start_link/0
+]).
+
+-export([
+ init/1
+]).
+
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+
+init([]) ->
+ Flags = #{
+ strategy => one_for_one,
+ intensity => 50,
+ period => 3600
+ },
+ Children = [
+ #{
+ id => couch_js_proc_manager,
+ restart => permanent,
+ shutdown => brutal_kill,
+ start => {couch_js_proc_manager, start_link, []}
+ }
+ ],
+ {ok, {Flags, Children}}.
diff --git a/src/couch_js/test/couch_js_proc_manager_tests.erl b/src/couch_js/test/couch_js_proc_manager_tests.erl
new file mode 100644
index 000000000..f138dd651
--- /dev/null
+++ b/src/couch_js/test/couch_js_proc_manager_tests.erl
@@ -0,0 +1,373 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_js_proc_manager_tests).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+
+-define(TDEF(A), {atom_to_list(A), fun A/0}).
+
+-define(NUM_PROCS, 3).
+-define(TIMEOUT, 1000).
+
+-define(TIMEOUT_ERROR(Msg), erlang:error({assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, Msg}
+ ]})).
+
+
+start() ->
+ ok = application:set_env(config, ini_files, ?CONFIG_CHAIN),
+ {ok, Started} = application:ensure_all_started(couch_js),
+ config:set("native_query_servers", "enable_erlang_query_server", "true", false),
+ config:set("query_server_config", "os_process_limit", "3", false),
+ config:set("query_server_config", "os_process_soft_limit", "2", false),
+ config:set("query_server_config", "os_process_idle_limit", "1", false),
+ ok = config_wait("os_process_idle_limit", "1"),
+ Started.
+
+
+stop(Apps) ->
+ lists:foreach(fun(App) ->
+ ok = application:stop(App)
+ end, lists:reverse(Apps)).
+
+
+couch_js_proc_manager_test_() ->
+ {
+ "couch_js_proc_manger tests",
+ {
+ setup,
+ fun start/0,
+ fun stop/1,
+ [
+ ?TDEF(should_block_new_proc_on_full_pool),
+ ?TDEF(should_free_slot_on_proc_unexpected_exit),
+ ?TDEF(should_reuse_known_proc),
+ ?TDEF(should_process_waiting_queue_as_fifo),
+ ?TDEF(should_reduce_pool_on_idle_os_procs)
+ ]
+ }
+ }.
+
+
+should_block_new_proc_on_full_pool() ->
+ ok = couch_js_proc_manager:reload(),
+
+ Clients = [
+ spawn_client(),
+ spawn_client(),
+ spawn_client()
+ ],
+
+ lists:foreach(fun(Client) ->
+ ?assertEqual(ok, ping_client(Client))
+ end, Clients),
+
+ % Make sure everyone got a different proc
+ Procs = [get_client_proc(Client) || Client <- Clients],
+ ?assertEqual(lists:sort(Procs), lists:usort(Procs)),
+
+ % This client will be stuck waiting for someone
+ % to give up their proc.
+ Client4 = spawn_client(),
+ ?assert(is_client_waiting(Client4)),
+
+ Client1 = hd(Clients),
+ Proc1 = hd(Procs),
+
+ ?assertEqual(ok, stop_client(Client1)),
+ ?assertEqual(ok, ping_client(Client4)),
+
+ Proc4 = get_client_proc(Client4),
+
+ ?assertEqual(Proc1#proc.pid, Proc4#proc.pid),
+ ?assertNotEqual(Proc1#proc.client, Proc4#proc.client),
+
+ lists:map(fun(C) ->
+ ?assertEqual(ok, stop_client(C))
+ end, [Client4 | tl(Clients)]).
+
+
+should_free_slot_on_proc_unexpected_exit() ->
+ ok = couch_js_proc_manager:reload(),
+
+ Clients = [
+ spawn_client(),
+ spawn_client(),
+ spawn_client()
+ ],
+
+ lists:foreach(fun(Client) ->
+ ?assertEqual(ok, ping_client(Client))
+ end, Clients),
+
+ Procs1 = [get_client_proc(Client) || Client <- Clients],
+ ProcClients1 = [Proc#proc.client || Proc <- Procs1],
+ ?assertEqual(lists:sort(Procs1), lists:usort(Procs1)),
+ ?assertEqual(lists:sort(ProcClients1), lists:usort(ProcClients1)),
+
+ Client1 = hd(Clients),
+ Proc1 = hd(Procs1),
+ ?assertEqual(ok, kill_client(Client1)),
+
+ Client4 = spawn_client(),
+ ?assertEqual(ok, ping_client(Client4)),
+ Proc4 = get_client_proc(Client4),
+
+ ?assertEqual(Proc1#proc.pid, Proc4#proc.pid),
+ ?assertNotEqual(Proc1#proc.client, Proc4#proc.client),
+
+ Procs2 = [Proc4 | tl(Procs1)],
+ ProcClients2 = [Proc4#proc.client | tl(ProcClients1)],
+ ?assertEqual(lists:sort(Procs2), lists:usort(Procs2)),
+ ?assertEqual(lists:sort(ProcClients2), lists:usort(ProcClients2)),
+
+ lists:map(fun(C) ->
+ ?assertEqual(ok, stop_client(C))
+ end, [Client4 | tl(Clients)]).
+
+
+should_reuse_known_proc() ->
+ ok = couch_js_proc_manager:reload(),
+
+ Clients = [
+ spawn_client(<<"ddoc1">>),
+ spawn_client(<<"ddoc2">>)
+ ],
+
+ lists:foreach(fun(Client) ->
+ ?assertEqual(ok, ping_client(Client))
+ end, Clients),
+
+ Procs = [get_client_proc(Client) || Client <- Clients],
+ ?assertEqual(lists:sort(Procs), lists:usort(Procs)),
+
+ lists:foreach(fun(Client) ->
+ ?assertEqual(ok, stop_client(Client))
+ end, Clients),
+
+ lists:foreach(fun(Proc) ->
+ ?assert(is_process_alive(Proc#proc.pid))
+ end, Procs),
+
+ Client = spawn_client(<<"ddoc1">>),
+ ?assertEqual(ok, ping_client(Client)),
+
+ OldProc = hd(Procs),
+ NewProc = get_client_proc(Client),
+
+ ?assertEqual(OldProc#proc.pid, NewProc#proc.pid),
+ ?assertNotEqual(OldProc#proc.client, NewProc#proc.client),
+ ?assertEqual(ok, stop_client(Client)).
+
+
+should_process_waiting_queue_as_fifo() ->
+ Clients = [
+ spawn_client(<<"ddoc1">>),
+ spawn_client(<<"ddoc2">>),
+ spawn_client(<<"ddoc3">>),
+ spawn_client(<<"ddoc4">>),
+ spawn_client(<<"ddoc5">>),
+ spawn_client(<<"ddoc6">>)
+ ],
+
+ lists:foldl(fun(Client, Pos) ->
+ case Pos =< ?NUM_PROCS of
+ true ->
+ ?assertEqual(ok, ping_client(Client));
+ false ->
+ ?assert(is_client_waiting(Client))
+ end,
+ Pos + 1
+ end, 1, Clients),
+
+ LastClients = lists:foldl(fun(_Iteration, ClientAcc) ->
+ FirstClient = hd(ClientAcc),
+ FirstProc = get_client_proc(FirstClient),
+ ?assertEqual(ok, stop_client(FirstClient)),
+
+ RestClients = tl(ClientAcc),
+
+ lists:foldl(fun(Client, Pos) ->
+ case Pos =< ?NUM_PROCS of
+ true ->
+ ?assertEqual(ok, ping_client(Client));
+ false ->
+ ?assert(is_client_waiting(Client))
+ end,
+ if Pos /= ?NUM_PROCS -> ok; true ->
+ BubbleProc = get_client_proc(Client),
+ ?assertEqual(FirstProc#proc.pid, BubbleProc#proc.pid),
+ ?assertNotEqual(FirstProc#proc.client, BubbleProc#proc.client)
+ end,
+ Pos + 1
+ end, 1, RestClients),
+
+ RestClients
+ end, Clients, lists:seq(1, 3)),
+
+ lists:foreach(fun(Client) ->
+ ?assertEqual(ok, stop_client(Client))
+ end, LastClients).
+
+
+should_reduce_pool_on_idle_os_procs() ->
+ Clients = [
+ spawn_client(<<"ddoc1">>),
+ spawn_client(<<"ddoc2">>),
+ spawn_client(<<"ddoc3">>)
+ ],
+
+ lists:foreach(fun(Client) ->
+ ?assertEqual(ok, ping_client(Client))
+ end, Clients),
+
+ ?assertEqual(3, couch_js_proc_manager:get_proc_count()),
+
+ lists:foreach(fun(Client) ->
+ ?assertEqual(ok, stop_client(Client))
+ end, Clients),
+
+ ?assertEqual(3, couch_js_proc_manager:get_proc_count()),
+
+ timer:sleep(1200),
+
+ ?assertEqual(1, couch_js_proc_manager:get_proc_count()).
+
+
+spawn_client() ->
+ Parent = self(),
+ Ref = make_ref(),
+ {Pid, _} = spawn_monitor(fun() ->
+ Parent ! {self(), initialized},
+ Proc = couch_js_query_servers:get_os_process(<<"erlang">>),
+ loop(Parent, Ref, Proc)
+ end),
+ receive
+ {Pid, initialized} ->
+ ok
+ after ?TIMEOUT ->
+ ?TIMEOUT_ERROR("Error creating client.")
+ end,
+ {Pid, Ref}.
+
+
+spawn_client(DDocId) ->
+ Parent = self(),
+ Ref = make_ref(),
+ {Pid, _} = spawn_monitor(fun() ->
+ DDocKey = {DDocId, <<"1-abcdefgh">>},
+ DDoc = #doc{body={[{<<"language">>, <<"erlang">>}]}},
+ Parent ! {self(), initialized},
+ Proc = couch_js_query_servers:get_ddoc_process(DDoc, DDocKey),
+ loop(Parent, Ref, Proc)
+ end),
+ receive
+ {Pid, initialized} ->
+ ok
+ after ?TIMEOUT ->
+ ?TIMEOUT_ERROR("Error creating ddoc client.")
+ end,
+ {Pid, Ref}.
+
+
+loop(Parent, Ref, Proc) ->
+ receive
+ ping ->
+ Parent ! {pong, Ref},
+ loop(Parent, Ref, Proc);
+ get_proc ->
+ Parent ! {proc, Ref, Proc},
+ loop(Parent, Ref, Proc);
+ stop ->
+ couch_js_query_servers:ret_os_process(Proc),
+ Parent ! {stop, Ref};
+ die ->
+ Parent ! {die, Ref},
+ exit(some_error)
+ end.
+
+
+ping_client({Pid, Ref}) ->
+ Pid ! ping,
+ receive
+ {pong, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ ?TIMEOUT_ERROR("Timeout pinging client")
+ end.
+
+
+is_client_waiting({Pid, _Ref}) ->
+ {status, Status} = process_info(Pid, status),
+ {current_function, {M, F, A}} = process_info(Pid, current_function),
+ Status == waiting andalso {M, F, A} == {gen, do_call, 4}.
+
+
+get_client_proc({Pid, Ref}) ->
+ Pid ! get_proc,
+ receive
+ {proc, Ref, Proc} -> Proc
+ after ?TIMEOUT ->
+ ?TIMEOUT_ERROR("Timeout getting proc from client")
+ end.
+
+
+stop_client({Pid, Ref}) ->
+ Pid ! stop,
+ receive
+ {stop, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ ?TIMEOUT_ERROR("Timeout stopping client")
+ end,
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ ?TIMEOUT_ERROR("Timeout waiting for stopped client 'DOWN'")
+ end.
+
+
+kill_client({Pid, Ref}) ->
+ Pid ! die,
+ receive
+ {die, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ ?TIMEOUT_ERROR("Timeout killing client")
+ end,
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ ?TIMEOUT_ERROR("Timeout waiting for killed client 'DOWN'")
+ end.
+
+
+config_wait(Key, Value) ->
+ config_wait(Key, Value, 0).
+
+config_wait(Key, Value, Count) ->
+ case config:get("query_server_config", Key) of
+ Value ->
+ ok;
+ _ when Count > 10 ->
+ ?TIMEOUT_ERROR("Error waiting for config changes.");
+ _ ->
+ timer:sleep(10),
+ config_wait(Key, Value, Count + 1)
+ end.
diff --git a/src/couch_js/test/couch_js_query_servers_tests.erl b/src/couch_js/test/couch_js_query_servers_tests.erl
new file mode 100644
index 000000000..bc4ecc72f
--- /dev/null
+++ b/src/couch_js/test/couch_js_query_servers_tests.erl
@@ -0,0 +1,96 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_js_query_servers_tests).
+
+-include_lib("couch/include/couch_eunit.hrl").
+
+
+setup() ->
+ meck:new([config, couch_log]).
+
+
+teardown(_) ->
+ meck:unload().
+
+
+sum_overflow_test_() ->
+ {
+ "Test overflow detection in the _sum reduce function",
+ {
+ setup,
+ fun setup/0,
+ fun teardown/1,
+ [
+ fun should_return_error_on_overflow/0,
+ fun should_return_object_on_log/0,
+ fun should_return_object_on_false/0
+ ]
+ }
+ }.
+
+
+should_return_error_on_overflow() ->
+ setup_reduce_limit_mock("true"),
+
+ KVs = gen_sum_kvs(),
+ {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs),
+ ?assertMatch({[{<<"error">>, <<"builtin_reduce_error">>} | _]}, Result),
+
+ check_reduce_limit_mock().
+
+
+should_return_object_on_log() ->
+ setup_reduce_limit_mock("log"),
+
+ KVs = gen_sum_kvs(),
+ {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs),
+ ?assertMatch({[_ | _]}, Result),
+ Keys = [K || {K, _} <- element(1, Result)],
+ ?assert(not lists:member(<<"error">>, Keys)),
+
+ check_reduce_limit_mock().
+
+
+should_return_object_on_false() ->
+ setup_reduce_limit_mock("false"),
+
+ KVs = gen_sum_kvs(),
+ {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs),
+ ?assertMatch({[_ | _]}, Result),
+ Keys = [K || {K, _} <- element(1, Result)],
+ ?assert(not lists:member(<<"error">>, Keys)),
+
+ ?assert(meck:called(config, get, '_')),
+ ?assertNot(meck:called(couch_log, error, '_')).
+
+
+gen_sum_kvs() ->
+ lists:map(fun(I) ->
+ Props = lists:map(fun(_) ->
+ K = couch_util:encodeBase64Url(crypto:strong_rand_bytes(16)),
+ {K, 1}
+ end, lists:seq(1, 20)),
+ [I, {Props}]
+ end, lists:seq(1, 10)).
+
+
+setup_reduce_limit_mock(Value) ->
+ ConfigArgs = ["query_server_config", "reduce_limit", "true"],
+ meck:reset([config, couch_log]),
+ meck:expect(config, get, ConfigArgs, Value),
+ meck:expect(couch_log, error, ['_', '_'], ok).
+
+
+check_reduce_limit_mock() ->
+ ?assert(meck:called(config, get, '_')),
+ ?assert(meck:called(couch_log, error, '_')).
diff --git a/src/couch_log/src/couch_log_config.erl b/src/couch_log/src/couch_log_config.erl
index 766d068a4..ab076cc69 100644
--- a/src/couch_log/src/couch_log_config.erl
+++ b/src/couch_log/src/couch_log_config.erl
@@ -49,7 +49,8 @@ entries() ->
[
{level, "level", "info"},
{level_int, "level", "info"},
- {max_message_size, "max_message_size", "16000"}
+ {max_message_size, "max_message_size", "16000"},
+ {strip_last_msg, "strip_last_msg", "true"}
].
@@ -97,4 +98,10 @@ transform(max_message_size, SizeStr) ->
Size -> Size
catch _:_ ->
16000
- end. \ No newline at end of file
+ end;
+
+transform(strip_last_msg, "false") ->
+ false;
+
+transform(strip_last_msg, _) ->
+ true.
diff --git a/src/couch_log/src/couch_log_config_dyn.erl b/src/couch_log/src/couch_log_config_dyn.erl
index f7541f61f..b39dcf2f5 100644
--- a/src/couch_log/src/couch_log_config_dyn.erl
+++ b/src/couch_log/src/couch_log_config_dyn.erl
@@ -25,4 +25,5 @@
get(level) -> info;
get(level_int) -> 2;
-get(max_message_size) -> 16000.
+get(max_message_size) -> 16000;
+get(strip_last_msg) -> true.
diff --git a/src/couch_log/src/couch_log_formatter.erl b/src/couch_log/src/couch_log_formatter.erl
index 4d81f184f..26997a8a6 100644
--- a/src/couch_log/src/couch_log_formatter.erl
+++ b/src/couch_log/src/couch_log_formatter.erl
@@ -68,7 +68,13 @@ format(Event) ->
do_format({error, _GL, {Pid, "** Generic server " ++ _, Args}}) ->
%% gen_server terminate
- [Name, LastMsg, State, Reason | Extra] = Args,
+ [Name, LastMsg0, State, Reason | Extra] = Args,
+ LastMsg = case couch_log_config:get(strip_last_msg) of
+ true ->
+ redacted;
+ false ->
+ LastMsg0
+ end,
MsgFmt = "gen_server ~w terminated with reason: ~s~n" ++
" last msg: ~p~n state: ~p~n extra: ~p",
MsgArgs = [Name, format_reason(Reason), LastMsg, State, Extra],
@@ -76,7 +82,13 @@ do_format({error, _GL, {Pid, "** Generic server " ++ _, Args}}) ->
do_format({error, _GL, {Pid, "** State machine " ++ _, Args}}) ->
%% gen_fsm terminate
- [Name, LastMsg, StateName, State, Reason | Extra] = Args,
+ [Name, LastMsg0, StateName, State, Reason | Extra] = Args,
+ LastMsg = case couch_log_config:get(strip_last_msg) of
+ true ->
+ redacted;
+ false ->
+ LastMsg0
+ end,
MsgFmt = "gen_fsm ~w in state ~w terminated with reason: ~s~n" ++
" last msg: ~p~n state: ~p~n extra: ~p",
MsgArgs = [Name, StateName, format_reason(Reason), LastMsg, State, Extra],
@@ -84,7 +96,13 @@ do_format({error, _GL, {Pid, "** State machine " ++ _, Args}}) ->
do_format({error, _GL, {Pid, "** gen_event handler" ++ _, Args}}) ->
%% gen_event handler terminate
- [ID, Name, LastMsg, State, Reason] = Args,
+ [ID, Name, LastMsg0, State, Reason] = Args,
+ LastMsg = case couch_log_config:get(strip_last_msg) of
+ true ->
+ redacted;
+ false ->
+ LastMsg0
+ end,
MsgFmt = "gen_event ~w installed in ~w terminated with reason: ~s~n" ++
" last msg: ~p~n state: ~p",
MsgArgs = [ID, Name, format_reason(Reason), LastMsg, State],
diff --git a/src/couch_log/src/couch_log_sup.erl b/src/couch_log/src/couch_log_sup.erl
index 6219a36e9..fc1ac7812 100644
--- a/src/couch_log/src/couch_log_sup.erl
+++ b/src/couch_log/src/couch_log_sup.erl
@@ -63,6 +63,8 @@ handle_config_change("log", Key, _, _, S) ->
couch_log_config:reconfigure();
"max_message_size" ->
couch_log_config:reconfigure();
+ "strip_last_msg" ->
+ couch_log_config:reconfigure();
_ ->
% Someone may have changed the config for
% the writer so we need to re-initialize.
diff --git a/src/couch_log/test/eunit/couch_log_config_test.erl b/src/couch_log/test/eunit/couch_log_config_test.erl
index c4677f37f..a4c4bcff2 100644
--- a/src/couch_log/test/eunit/couch_log_config_test.erl
+++ b/src/couch_log/test/eunit/couch_log_config_test.erl
@@ -25,7 +25,9 @@ couch_log_config_test_() ->
fun check_level/0,
fun check_max_message_size/0,
fun check_bad_level/0,
- fun check_bad_max_message_size/0
+ fun check_bad_max_message_size/0,
+ fun check_strip_last_msg/0,
+ fun check_bad_strip_last_msg/0
]
}.
@@ -108,3 +110,36 @@ check_bad_max_message_size() ->
couch_log_test_util:wait_for_config(),
?assertEqual(16000, couch_log_config:get(max_message_size))
end).
+
+
+check_strip_last_msg() ->
+ % Default is true
+ ?assertEqual(true, couch_log_config:get(strip_last_msg)),
+
+ couch_log_test_util:with_config_listener(fun() ->
+ config:set("log", "strip_last_msg", "false"),
+ couch_log_test_util:wait_for_config(),
+ ?assertEqual(false, couch_log_config:get(strip_last_msg)),
+
+ config:delete("log", "strip_last_msg"),
+ couch_log_test_util:wait_for_config(),
+ ?assertEqual(true, couch_log_config:get(strip_last_msg))
+ end).
+
+check_bad_strip_last_msg() ->
+ % Default is true
+ ?assertEqual(true, couch_log_config:get(strip_last_msg)),
+
+ couch_log_test_util:with_config_listener(fun() ->
+ config:set("log", "strip_last_msg", "false"),
+ couch_log_test_util:wait_for_config(),
+ ?assertEqual(false, couch_log_config:get(strip_last_msg)),
+
+ config:set("log", "strip_last_msg", "this is not a boolean"),
+ couch_log_test_util:wait_for_config(),
+ ?assertEqual(true, couch_log_config:get(strip_last_msg)),
+
+ config:delete("log", "strip_last_msg"),
+ couch_log_test_util:wait_for_config(),
+ ?assertEqual(true, couch_log_config:get(strip_last_msg))
+ end).
diff --git a/src/couch_log/test/eunit/couch_log_formatter_test.erl b/src/couch_log/test/eunit/couch_log_formatter_test.erl
index 795efcf29..24de346c6 100644
--- a/src/couch_log/test/eunit/couch_log_formatter_test.erl
+++ b/src/couch_log/test/eunit/couch_log_formatter_test.erl
@@ -81,7 +81,7 @@ gen_server_error_test() ->
do_matches(do_format(Event), [
"gen_server a_gen_server terminated",
"with reason: some_reason",
- "last msg: {foo,bar}",
+ "last msg: redacted",
"state: server_state",
"extra: \\[\\]"
]).
@@ -108,7 +108,7 @@ gen_server_error_with_extra_args_test() ->
do_matches(do_format(Event), [
"gen_server a_gen_server terminated",
"with reason: some_reason",
- "last msg: {foo,bar}",
+ "last msg: redacted",
"state: server_state",
"extra: \\[sad,args\\]"
]).
@@ -135,7 +135,7 @@ gen_fsm_error_test() ->
do_matches(do_format(Event), [
"gen_fsm a_gen_fsm in state state_name",
"with reason: barf",
- "last msg: {ohai,there}",
+ "last msg: redacted",
"state: curr_state",
"extra: \\[\\]"
]).
@@ -162,7 +162,7 @@ gen_fsm_error_with_extra_args_test() ->
do_matches(do_format(Event), [
"gen_fsm a_gen_fsm in state state_name",
"with reason: barf",
- "last msg: {ohai,there}",
+ "last msg: redacted",
"state: curr_state",
"extra: \\[sad,args\\]"
]).
@@ -195,7 +195,7 @@ gen_event_error_test() ->
do_matches(do_format(Event), [
"gen_event handler_id installed in a_gen_event",
"reason: barf",
- "last msg: {ohai,there}",
+ "last msg: redacted",
"state: curr_state"
]).
@@ -850,6 +850,110 @@ coverage_test() ->
})
).
+gen_server_error_with_last_msg_test() ->
+ Pid = self(),
+ Event = {
+ error,
+ erlang:group_leader(),
+ {
+ Pid,
+ "** Generic server and some stuff",
+ [a_gen_server, {foo, bar}, server_state, some_reason]
+ }
+ },
+ ?assertMatch(
+ #log_entry{
+ level = error,
+ pid = Pid
+ },
+ do_format(Event)
+ ),
+ with_last(fun() ->
+ do_matches(do_format(Event), [
+ "gen_server a_gen_server terminated",
+ "with reason: some_reason",
+ "last msg: {foo,bar}",
+ "state: server_state",
+ "extra: \\[\\]"
+ ])
+ end).
+
+gen_event_error_with_last_msg_test() ->
+ Pid = self(),
+ Event = {
+ error,
+ erlang:group_leader(),
+ {
+ Pid,
+ "** gen_event handler did a thing",
+ [
+ handler_id,
+ a_gen_event,
+ {ohai,there},
+ curr_state,
+ barf
+ ]
+ }
+ },
+ ?assertMatch(
+ #log_entry{
+ level = error,
+ pid = Pid
+ },
+ do_format(Event)
+ ),
+ with_last(fun() ->
+ do_matches(do_format(Event), [
+ "gen_event handler_id installed in a_gen_event",
+ "reason: barf",
+ "last msg: {ohai,there}",
+ "state: curr_state"
+ ])
+ end).
+
+
+gen_fsm_error_with_last_msg_test() ->
+ Pid = self(),
+ Event = {
+ error,
+ erlang:group_leader(),
+ {
+ Pid,
+ "** State machine did a thing",
+ [a_gen_fsm, {ohai,there}, state_name, curr_state, barf]
+ }
+ },
+ ?assertMatch(
+ #log_entry{
+ level = error,
+ pid = Pid
+ },
+ do_format(Event)
+ ),
+ with_last(fun() ->
+ do_matches(do_format(Event), [
+ "gen_fsm a_gen_fsm in state state_name",
+ "with reason: barf",
+ "last msg: {ohai,there}",
+ "state: curr_state",
+ "extra: \\[\\]"
+ ])
+ end).
+
+
+with_last(Fun) ->
+ meck:new(couch_log_config_dyn, [passthrough]),
+ try
+ meck:expect(couch_log_config_dyn, get, fun(Case) ->
+ case Case of
+ strip_last_msg -> false;
+ Case -> meck:passthrough([Case])
+ end
+ end),
+ Fun()
+ after
+ meck:unload(couch_log_config_dyn)
+ end.
do_format(Event) ->
E = couch_log_formatter:format(Event),
diff --git a/src/couch_mrview/include/couch_mrview.hrl b/src/couch_mrview/include/couch_mrview.hrl
index bb0ab0b46..e0f80df81 100644
--- a/src/couch_mrview/include/couch_mrview.hrl
+++ b/src/couch_mrview/include/couch_mrview.hrl
@@ -81,7 +81,9 @@
conflicts,
callback,
sorted = true,
- extra = []
+ extra = [],
+ page_size = undefined,
+ bookmark=nil
}).
-record(vacc, {
@@ -95,7 +97,9 @@
bufsize = 0,
threshold = 1490,
row_sent = false,
- meta_sent = false
+ meta_sent = false,
+ paginated = false,
+ meta = #{}
}).
-record(lacc, {
diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl
index 1cdc91809..880dfa725 100644
--- a/src/couch_mrview/src/couch_mrview.erl
+++ b/src/couch_mrview/src/couch_mrview.erl
@@ -170,8 +170,18 @@ join([H|[]], _, Acc) ->
join([H|T], Sep, Acc) ->
join(T, Sep, [Sep, H | Acc]).
+validate(#{} = Db, DDoc) ->
+ DbName = fabric2_db:name(Db),
+ IsPartitioned = fabric2_db:is_partitioned(Db),
+ validate(DbName, IsPartitioned, DDoc);
-validate(Db, DDoc) ->
+validate(Db, DDoc) ->
+ DbName = couch_db:name(Db),
+ IsPartitioned = couch_db:is_partitioned(Db),
+ validate(DbName, IsPartitioned, DDoc).
+
+
+validate(DbName, _IsDbPartitioned, DDoc) ->
ok = validate_ddoc_fields(DDoc#doc.body),
GetName = fun
(#mrview{map_names = [Name | _]}) -> Name;
@@ -198,18 +208,8 @@ validate(Db, DDoc) ->
end,
{ok, #mrst{
language = Lang,
- views = Views,
- partitioned = Partitioned
- }} = couch_mrview_util:ddoc_to_mrst(couch_db:name(Db), DDoc),
-
- case {couch_db:is_partitioned(Db), Partitioned} of
- {false, true} ->
- throw({invalid_design_doc,
- <<"partitioned option cannot be true in a "
- "non-partitioned database.">>});
- {_, _} ->
- ok
- end,
+ views = Views
+ }} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
try Views =/= [] andalso couch_query_servers:get_os_process(Lang) of
false ->
diff --git a/src/couch_mrview/src/couch_mrview_http.erl b/src/couch_mrview/src/couch_mrview_http.erl
index 3cf8833d7..e1ba9d656 100644
--- a/src/couch_mrview/src/couch_mrview_http.erl
+++ b/src/couch_mrview/src/couch_mrview_http.erl
@@ -35,6 +35,8 @@
parse_params/3,
parse_params/4,
view_cb/2,
+ row_to_obj/1,
+ row_to_obj/2,
row_to_json/1,
row_to_json/2,
check_view_etag/3
@@ -413,11 +415,19 @@ prepend_val(#vacc{prepend=Prepend}) ->
row_to_json(Row) ->
+ ?JSON_ENCODE(row_to_obj(Row)).
+
+
+row_to_json(Kind, Row) ->
+ ?JSON_ENCODE(row_to_obj(Kind, Row)).
+
+
+row_to_obj(Row) ->
Id = couch_util:get_value(id, Row),
- row_to_json(Id, Row).
+ row_to_obj(Id, Row).
-row_to_json(error, Row) ->
+row_to_obj(error, Row) ->
% Special case for _all_docs request with KEYS to
% match prior behavior.
Key = couch_util:get_value(key, Row),
@@ -426,9 +436,8 @@ row_to_json(error, Row) ->
ReasonProp = if Reason == undefined -> []; true ->
[{reason, Reason}]
end,
- Obj = {[{key, Key}, {error, Val}] ++ ReasonProp},
- ?JSON_ENCODE(Obj);
-row_to_json(Id0, Row) ->
+ {[{key, Key}, {error, Val}] ++ ReasonProp};
+row_to_obj(Id0, Row) ->
Id = case Id0 of
undefined -> [];
Id0 -> [{id, Id0}]
@@ -439,8 +448,7 @@ row_to_json(Id0, Row) ->
undefined -> [];
Doc0 -> [{doc, Doc0}]
end,
- Obj = {Id ++ [{key, Key}, {value, Val}] ++ Doc},
- ?JSON_ENCODE(Obj).
+ {Id ++ [{key, Key}, {value, Val}] ++ Doc}.
parse_params(#httpd{}=Req, Keys) ->
@@ -523,6 +531,8 @@ parse_param(Key, Val, Args, IsDecoded) ->
Args#mrargs{end_key_docid=couch_util:to_binary(Val)};
"limit" ->
Args#mrargs{limit=parse_pos_int(Val)};
+ "page_size" ->
+ Args#mrargs{page_size=parse_pos_int(Val)};
"stale" when Val == "ok" orelse Val == <<"ok">> ->
Args#mrargs{stable=true, update=false};
"stale" when Val == "update_after" orelse Val == <<"update_after">> ->
diff --git a/src/couch_mrview/src/couch_mrview_index.erl b/src/couch_mrview/src/couch_mrview_index.erl
index 68f1d2322..8e844e80c 100644
--- a/src/couch_mrview/src/couch_mrview_index.erl
+++ b/src/couch_mrview/src/couch_mrview_index.erl
@@ -20,6 +20,7 @@
-export([index_file_exists/1]).
-export([update_local_purge_doc/2, verify_index_exists/2]).
-export([ensure_local_purge_docs/2]).
+-export([format_status/2]).
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
@@ -315,3 +316,14 @@ update_local_purge_doc(Db, State, PSeq) ->
BaseDoc
end,
couch_db:update_doc(Db, Doc, []).
+
+format_status(_Opt, [_PDict, State]) ->
+ Scrubbed = State#mrst{
+ lib = nil,
+ views = nil,
+ id_btree = nil,
+ doc_acc = nil,
+ doc_queue = nil,
+ write_queue = nil
+ },
+ ?record_to_keyval(mrst, Scrubbed).
diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl
index e971720c9..b2b2354c9 100644
--- a/src/couch_mrview/src/couch_mrview_util.erl
+++ b/src/couch_mrview/src/couch_mrview_util.erl
@@ -397,7 +397,7 @@ fold_reduce({NthRed, Lang, View}, Fun, Acc, Options) ->
validate_args(Db, DDoc, Args0) ->
- {ok, State} = couch_mrview_index:init(Db, DDoc),
+ {ok, State} = couch_mrview_util:ddoc_to_mrst(fabric2_db:name(Db), DDoc),
Args1 = apply_limit(State#mrst.partitioned, Args0),
validate_args(State, Args1).
diff --git a/src/couch_peruser/src/couch_peruser.erl b/src/couch_peruser/src/couch_peruser.erl
index 886fb4f6e..4c06e8f27 100644
--- a/src/couch_peruser/src/couch_peruser.erl
+++ b/src/couch_peruser/src/couch_peruser.erl
@@ -19,7 +19,7 @@
% gen_server callbacks
-export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
+ terminate/2, code_change/3, format_status/2]).
-export([init_changes_handler/1, changes_handler/3]).
@@ -410,3 +410,14 @@ terminate(_Reason, _State) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
+
+ format_status(_Opt, [_PDict, State]) ->
+ #state{
+ states = States
+ } = State,
+ Scrubbed = State#state{
+ states = {length, length(States)}
+ },
+ [{data, [{"State",
+ ?record_to_keyval(state, Scrubbed)
+ }]}]. \ No newline at end of file
diff --git a/src/couch_rate/README.md b/src/couch_rate/README.md
new file mode 100644
index 000000000..530da1a99
--- /dev/null
+++ b/src/couch_rate/README.md
@@ -0,0 +1,155 @@
+# Description
+
+The `couch_rate` application implements a generic rate limiter which can
+be used to control batch size and delay between batches. It was initially
+designed for background index build to find an optimal batch size to utilize
+the FDB transaction up to configured `target` parameter. The application
+provides an API to plug custom rate limiting logic when need to.
+
+# Default rate limit logic
+
+The `couch_rate_limiter` is the rate limit module used by default.
+The module tracks average number of reads and writes over specified
+time period. It uses average read/write numbers to calculate an
+approximate value for read/write ratio. Then the read/write ratio is
+used to convert estimated amount of writes into batch size.
+
+# Configuration
+
+## API based usage
+
+In the simplest use case the only mandatory keys `new/3` expects are:
+* `budget` - the initial value for estimated batch size
+* `target` - the amount in msec which we try to maintain for batch processing time
+* `window` - time interval for contention detector
+* `sensitivity` - minimal interval within the `window`
+
+We choose sane default values for the rest of the parameters.
+
+* `window_size = window div sensitivity + 1`
+* `underload_threshold = round(target * 0.95)`
+* `overload_threshold = round(target * 1.05)`
+* `delay_threshold = round(target * 1.07)`
+
+Due to the use of `round` in defaults calculation the `target` cannot be less
+than `36` msec. Otherwise some of the thresholds become equal which breaks the
+algorithm.
+
+In the case when you need to specify custom parameters, the following keys
+are supported:
+
+* `window_size` - how many batches to consider in contention detector
+* `timer` - this is used for testing to fast forward time `fun() -> current_time_in_ms() end`
+* `target` - the amount in msec which we try to maintain for batch processing time
+* `underload_threshold` - a threshold bellow which we would try to increase the budget
+* `overload_threshold` - a threshold above which we would start decreasing the budget
+* `delay_threshold` - a threshold above which we would start introducing delays between batches
+* `multiplicative_factor` - determines how fast we are going to decrease budget (must be in (0..1) range)
+* `regular_delay` - delay between batches when there is no overload
+* `congested_delay` - delay between batches when there is an overload
+* `initial_budget` - initial value for budget to start with
+
+## default.ini based usage
+
+The users of the `couch_rate` application pass the `ConfigId` parameter.
+When calling `couch_rate:new` and `couch_rate:create_if_missing`.
+The `couch_rate` application uses this information to construct name of the
+configuration section to use to get configuration parameters. The configration
+section is constructed using `"couch_rate." ++ ConfigId`.
+The parameters are encoded using erlang map syntax.
+Limitation of the map parser:
+
+* Keys must be atoms
+* Values are either integers or floats
+* We only support positive values in the map
+* Configuration object cannot use erlang reserved words in keys:
+ `after`, `and`, `andalso`, `band`, `begin`, `bnot`, `bor`,
+ `bsl`, `bsr`, `bxor`, `case`, `catch`, `cond`, `div`, `end`
+ `fun`, `if`, `let`, `not`, `of`, `or`, `orelse`, `receive`
+ `rem`, `try`, `when`, `xor`
+
+The auxilary `couch_rate_config` module implements the following API:
+
+* `couch_rate_config:from_str/1` - parses a string representation of parameters
+* `couch_rate_config:to_str/1` - converts parameters to string (used in testing)
+
+Here is the example of configuration used in `couch_view` application:
+
+```
+[couch_rate.views]
+limiter = couch_rate_limiter
+opts = #{budget => 100, target => 2500, window => 60000, sensitivity => 1000}
+```
+
+In the `couch_view` application it is used as follows:
+
+```
+Limiter = couch_rate:create_if_missing({DbName, DDocId}, "views"),
+```
+
+# API
+
+The application implements two APIs. Both APIs are supported by `couch_rate`
+module. The API variants are:
+
+* explicit state passing
+* state store based approach
+
+The API is chosen baed on the `StoreModule` argument passed to `new/4`.
+Currently we support following values for `StoreModule`:
+
+* `nil` - this value indicates that explicit state passing would be used
+* `couch_rate_ets` - ets based global state store (ets tables are owned by app supervisor)
+* `couch_rate_pd` - process dicionary based local state store
+
+The "explicit state passing" style returns a tuple `{Result :: term(), state()}`.
+The result is the same as for state store based API.
+
+
+## State store based APIs of `couch_rate` module.
+
+All functions can return `{error, Reason :: term()}` in case of errors.
+This detail is ommited bellow.
+
+* `create_if_missing(Id :: id(), Module :: module(), Store :: module(), Options :: map()) -> limiter()` - create new rate limiter instance
+* `new(Id :: id(), Module :: module(), Store :: module(), Options :: map()) -> limiter()` - create new rate limiter instance
+* `budget(limiter()) -> Budget :: integer().` - get batch size
+* `delay(limiter()) -> Delay :: timeout().` - return delay in msec between batches
+* `wait(limiter()) -> ok` - block the caller for amount of time returned by `delay/1`
+* `in(limiter(), Reads :: integer()) -> limiter()` - notify rate limiter on the amount of reads were actually done (could be less than `budget`)
+* `success(limiter(), Writes :: integer()) -> limiter()` - how many writes happen
+* `failure(limiter()) -> limiter()` - called instead of `success/2` when failure happen
+* `is_congestion(limiter()) -> boolean()` - returns `false` when congestion is detected
+* `format(limiter()) -> [{Key :: atom(), Value :: term()}]` - return key value list representing important aspects of the limiter state
+* `id(limitter()) -> id()` - returns `id()` of the rate limiter
+* `module(limiter()) -> module()` - returns callback module implementing rate limiting logic.
+* `state(limiter()) -> state()` - returns internal state of rate limiter.
+* `store(limiter()) -> module() | nil` - returns store state backend.
+
+# Testing
+
+The test suite is written in Elixir.
+
+## Running all tests
+
+```
+make couch && ERL_LIBS=`pwd`/src mix test --trace src/couch_rate/test/exunit/
+```
+
+## Running specific test suite
+
+```
+make couch && ERL_LIBS=`pwd`/src mix test --trace src/couch_rate/test/exunit/couch_rate_limiter_test.exs
+```
+
+## Running specific test using line number
+
+```
+make couch && ERL_LIBS=`pwd`/src mix test --trace src/couch_rate/test/exunit/couch_rate_limiter_test.exs:10
+```
+
+## Running traces with stats output
+
+```
+make couch && ERL_LIBS=`pwd`/src EXUNIT_DEBUG=true mix test --trace src/couch_rate/test/exunit/couch_rate_limiter_test.exs
+``` \ No newline at end of file
diff --git a/src/couch_rate/src/couch_rate.app.src b/src/couch_rate/src/couch_rate.app.src
new file mode 100644
index 000000000..ed6de81d6
--- /dev/null
+++ b/src/couch_rate/src/couch_rate.app.src
@@ -0,0 +1,24 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+ {application, couch_rate, [
+ {description, "Simple rate limiter"},
+ {vsn, git},
+ {registered, [
+ ]},
+ {applications, [
+ kernel,
+ stdlib,
+ syntax_tools
+ ]},
+ {mod, {couch_rate_app, []}}
+]}.
diff --git a/src/couch_rate/src/couch_rate.erl b/src/couch_rate/src/couch_rate.erl
new file mode 100644
index 000000000..24bbcc2a5
--- /dev/null
+++ b/src/couch_rate/src/couch_rate.erl
@@ -0,0 +1,318 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rate).
+
+-include("couch_rate.hrl").
+
+-export([
+ create_if_missing/2,
+ create_if_missing/3,
+ create_if_missing/4,
+ new/2,
+ new/3,
+ new/4,
+ from_map/4,
+ budget/1,
+ delay/1,
+ wait/1,
+ in/2,
+ success/2,
+ failure/1,
+ is_congestion/1,
+ min_latency/1,
+ format/1,
+ to_map/1,
+ id/1,
+ module/1,
+ state/1,
+ store/1
+]).
+
+-define(LIMITER, ?MODULE).
+
+-type id() :: term().
+-type state() :: term().
+-type store() :: module().
+
+-opaque limiter() :: #?LIMITER{}.
+
+-export_type([
+ id/0,
+ state/0,
+ limiter/0
+]).
+
+-spec create_if_missing(id(), string()) ->
+ couch_rate:limiter() | {error, Reason :: term()}.
+
+create_if_missing(Id, ConfigId) ->
+ ?MODULE:create_if_missing(Id, ConfigId, couch_rate_ets).
+
+-spec create_if_missing(id(), string(), nil | module()) ->
+ couch_rate:limiter() | {error, Reason :: term()}.
+
+create_if_missing(Id, ConfigId, StateStore) ->
+ {Module, Options} = get_config(ConfigId),
+ ?MODULE:create_if_missing(Id, Module, StateStore, Options).
+
+-spec create_if_missing(id(), module(), nil | module(), map()) ->
+ couch_rate:limiter() | {error, Reason :: term()}.
+
+create_if_missing(Id, Module, nil, Options) ->
+ #?LIMITER{
+ id = Id,
+ module = Module,
+ store = nil,
+ state = Module:new(Id, Options)
+ };
+
+create_if_missing(Id, Module, Store, Options) ->
+ case Store:create_if_missing(Id, Module:new(Id, Options)) of
+ {error, _} = Error ->
+ Error;
+ State ->
+ #?LIMITER{
+ id = Id,
+ module = Module,
+ store = Store,
+ state = State
+ }
+ end.
+
+
+-spec new(id(), string()) ->
+ couch_rate:limiter() | {error, Reason :: term()}.
+
+new(Id, ConfigId) ->
+ ?MODULE:new(Id, ConfigId, couch_rate_ets).
+
+-spec new(id(), string(), module()) ->
+ couch_rate:limiter() | {error, Reason :: term()}.
+
+new(Id, ConfigId, StateStore) ->
+ {Module, Options} = get_config(ConfigId),
+ ?MODULE:new(Id, Module, StateStore, Options).
+
+
+-spec new(id(), module(), nil | module(), map()) ->
+ couch_rate:limiter() | {error, Reason :: term()}.
+
+new(Id, Module, nil, Options) ->
+ #?LIMITER{
+ id = Id,
+ module = Module,
+ store = nil,
+ state = Module:new(Id, Options)
+ };
+
+new(Id, Module, Store, Options) ->
+ case Store:new(Id, Module:new(Id, Options)) of
+ {error, _} = Error ->
+ Error;
+ State ->
+ #?LIMITER{
+ id = Id,
+ module = Module,
+ store = Store,
+ state = State
+ }
+ end.
+
+
+-spec from_map(id(), module(), store(), map()) ->
+ couch_rate:limiter()
+ | {error, Reason :: term()}.
+
+from_map(Id, Module, nil, Map) ->
+ #?LIMITER{
+ id = Id,
+ module = Module,
+ store = nil,
+ state = Module:from_map(Map)
+ };
+
+from_map(Id, Module, Store, Map) ->
+ case Store:new(Id, Module:from_map(Map)) of
+ {error, _} = Error ->
+ Error;
+ State ->
+ #?LIMITER{
+ id = Id,
+ module = Module,
+ store = Store,
+ state = State
+ }
+ end.
+
+
+-spec update(limiter(), (
+ fun(
+ (id(), state()) ->
+ {Result :: term(), state()}
+ | {error, Reason :: term()}
+ )
+ )) ->
+ Result :: term()
+ | {Result :: term(), state()}
+ | {error, Reason :: term()}.
+
+update(#?LIMITER{store = nil, id = Id, state = State0} = Limiter, Fun) ->
+ case Fun(Id, State0) of
+ {error, _Reason} = Error ->
+ Error;
+ {Result, State1} ->
+ {Result, Limiter#?LIMITER{state = State1}}
+ end;
+
+update(#?LIMITER{id = Id, store = Store, state = State}, Fun) ->
+ Store:update(Id, State, Fun).
+
+
+-spec budget(limiter()) ->
+ Budget :: integer()
+ | {Budget :: integer(), limiter()}
+ | {error, term()}.
+
+budget(#?LIMITER{module = Module} = Limiter) ->
+ update(Limiter, fun(Id, StateIn) ->
+ Module:budget(Id, StateIn)
+ end).
+
+
+-spec delay(limiter()) ->
+ DelayTime :: integer()
+ | {DelayTime :: integer(), limiter()}
+ | {error, term()}.
+
+delay(#?LIMITER{module = Module} = Limiter) ->
+ update(Limiter, fun(Id, State) ->
+ Module:delay(Id, State)
+ end).
+
+
+-spec wait(limiter()) ->
+ ok
+ | {ok, limiter()}
+ | {error, term()}.
+
+wait(#?LIMITER{module = Module} = Limiter) ->
+ update(Limiter, fun(Id, State) ->
+ Module:wait(Id, State)
+ end).
+
+
+-spec in(limiter(), integer()) ->
+ ok
+ | {ok, limiter()}
+ | {error, term()}.
+
+in(#?LIMITER{module = Module} = Limiter, Reads) ->
+ update(Limiter, fun(Id, State) ->
+ Module:in(Id, State, Reads)
+ end).
+
+
+-spec success(limiter(), integer()) ->
+ ok
+ | limiter()
+ | {error, term()}.
+
+success(#?LIMITER{module = Module} = Limiter, Writes) ->
+ update(Limiter, fun(Id, State) ->
+ Module:success(Id, State, Writes)
+ end).
+
+
+-spec failure(limiter()) ->
+ ok
+ | limiter()
+ | {error, term()}.
+
+failure(#?LIMITER{module = Module} = Limiter) ->
+ update(Limiter, fun(Id, State) ->
+ Module:failure(Id, State)
+ end).
+
+
+-spec is_congestion(limiter()) -> boolean().
+
+is_congestion(#?LIMITER{store = nil, module = Module, id = Id, state = State}) ->
+ Module:is_congestion(Id, State);
+
+is_congestion(#?LIMITER{store = Store, module = Module, id = Id, state = State}) ->
+ Module:is_congestion(Id, Store:lookup(Id, State)).
+
+
+-spec format(limiter()) -> [{Key :: atom(), Value :: term()}].
+
+format(#?LIMITER{store = nil, module = Module, id = Id, state = State}) ->
+ Module:format(Id, State);
+
+format(#?LIMITER{store = Store, module = Module, id = Id, state = State}) ->
+ Module:format(Id, Store:lookup(Id, State)).
+
+
+-spec to_map(limiter()) -> map().
+
+to_map(#?LIMITER{store = nil, module = Module, id = Id, state = State}) ->
+ Module:to_map(Id, State);
+
+to_map(#?LIMITER{store = Store, module = Module, id = Id, state = State}) ->
+ Module:to_map(Id, Store:lookup(Id, State)).
+
+-spec min_latency(limiter()) -> pos_integer().
+
+min_latency(#?LIMITER{store = nil, module = Module, id = Id, state = State}) ->
+ Module:min_latency(Id, State);
+
+min_latency(#?LIMITER{store = Store, module = Module, id = Id, state = State}) ->
+ Module:to_map(Id, Store:lookup(Id, State)).
+
+
+-spec id(limiter()) -> module().
+
+id(Limiter) ->
+ Limiter#?LIMITER.id.
+
+
+-spec module(limiter()) -> module().
+
+module(Limiter) ->
+ Limiter#?LIMITER.module.
+
+
+-spec state(limiter()) -> state().
+
+state(Limiter) ->
+ Limiter#?LIMITER.state.
+
+-spec store(limiter()) -> module() | nil.
+
+store(Limiter) ->
+ Limiter#?LIMITER.store.
+
+
+get_config(ConfigId) ->
+ ConfigSection = "couch_rate." ++ ConfigId,
+ ModuleStr = config:get(ConfigSection, "limiter", "couch_rate_limiter"),
+ Module = list_to_existing_atom(ModuleStr),
+ case config:get(ConfigSection, "opts", undefined) of
+ undefined ->
+ {error, #{missing_key => "opts", in => ConfigSection}};
+ OptionsStr ->
+ Options = couch_rate_config:from_str(OptionsStr),
+ lists:map(fun(Key) ->
+ maps:is_key(Key, Options) orelse error(#{missing_key => Key, in => Options})
+ end, [budget, target, window, sensitivity]),
+ {Module, Options}
+ end.
diff --git a/src/couch_rate/src/couch_rate.hrl b/src/couch_rate/src/couch_rate.hrl
new file mode 100644
index 000000000..d19f7d8e4
--- /dev/null
+++ b/src/couch_rate/src/couch_rate.hrl
@@ -0,0 +1,19 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-record(couch_rate,
+ {
+ id :: couch_rate:id(),
+ module = couch_rate_limiter :: module(),
+ store = couch_rate_ets :: module() | nil,
+ state :: couch_rate:state()
+ }).
diff --git a/src/couch_rate/src/couch_rate_app.erl b/src/couch_rate/src/couch_rate_app.erl
new file mode 100644
index 000000000..2bb1621c3
--- /dev/null
+++ b/src/couch_rate/src/couch_rate_app.erl
@@ -0,0 +1,28 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rate_app).
+
+-behaviour(application).
+
+-export([
+ start/2,
+ stop/1
+]).
+
+
+start(_StartType, _StartArgs) ->
+ couch_rate_sup:start_link().
+
+
+stop(_State) ->
+ ok.
diff --git a/src/couch_rate/src/couch_rate_config.erl b/src/couch_rate/src/couch_rate_config.erl
new file mode 100644
index 000000000..709fbc3d3
--- /dev/null
+++ b/src/couch_rate/src/couch_rate_config.erl
@@ -0,0 +1,66 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rate_config).
+
+% This parser supports only maps where key is atom and value
+% is positive float or positive integer.
+
+-include_lib("syntax_tools/include/merl.hrl").
+
+-export([
+ from_str/1,
+ to_str/1
+]).
+
+from_str(String) ->
+ parse_map(merl:quote(String)).
+
+
+to_str(Map) when is_map(Map) ->
+ StringArgs = maps:fold(fun(Key, Val, Acc) ->
+ Acc ++ [atom_to_list(Key) ++ " => " ++ number_to_list(Val)]
+ end, [], Map),
+ "#{" ++ string:join(StringArgs, ", ") ++ "}".
+
+
+number_to_list(Int) when is_integer(Int) ->
+ integer_to_list(Int);
+
+number_to_list(Float) when is_float(Float) ->
+ float_to_list(Float).
+
+
+parse_map(MapAST) ->
+ erl_syntax:type(MapAST) == map_expr
+ orelse fail("Only #{field => pos_integer() | float()} syntax is supported"),
+ %% Parsing map manually, since merl does not support maps
+ lists:foldl(fun(AST, Bindings) ->
+ NameAST = erl_syntax:map_field_assoc_name(AST),
+ erl_syntax:type(NameAST) == atom
+ orelse fail("Only atoms are supported as field names"),
+ Name = erl_syntax:atom_value(NameAST),
+ ValueAST = erl_syntax:map_field_assoc_value(AST),
+ Value = case erl_syntax:type(ValueAST) of
+ integer ->
+ erl_syntax:integer_value(ValueAST);
+ float ->
+ erl_syntax:float_value(ValueAST);
+ _ ->
+ fail("Only pos_integer() or float() alowed as values")
+ end,
+ Bindings#{Name => Value}
+ end, #{}, erl_syntax:map_expr_fields(MapAST)).
+
+
+fail(Msg) ->
+ throw({error, Msg}). \ No newline at end of file
diff --git a/src/couch_rate/src/couch_rate_ets.erl b/src/couch_rate/src/couch_rate_ets.erl
new file mode 100644
index 000000000..edd9d965c
--- /dev/null
+++ b/src/couch_rate/src/couch_rate_ets.erl
@@ -0,0 +1,119 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rate_ets).
+
+-include("couch_rate.hrl").
+
+-export([
+ create_tables/0,
+ delete_tables/0,
+ create_if_missing/2,
+ new/2,
+ lookup/2,
+ update/3
+]).
+
+
+-define(SHARDS_N, 16).
+
+-type id() :: term().
+-type state() :: term().
+-type result() :: term().
+-type store_state() :: term().
+
+
+-spec create_if_missing(couch_rate:id(), state()) ->
+ store_state().
+
+create_if_missing(Id, State) ->
+ Tid = term_to_table(Id),
+ case ets:lookup(Tid, Id) of
+ [_ | _] -> ok;
+ _ -> ets:insert(Tid, {Id, State})
+ end,
+ ok.
+
+
+-spec new(couch_rate:id(), state()) ->
+ store_state()
+ | {error, term()}.
+
+new(Id, State) ->
+ Tid = term_to_table(Id),
+ case ets:insert_new(Tid, {Id, State}) of
+ true -> ok;
+ false -> {error, #{reason => already_exists, id => Id}}
+ end.
+
+
+-spec update(id(), store_state(), fun(
+ (id(), state()) -> {state(), result()}
+ )) ->
+ result()
+ | {error, term()}.
+
+update(Id, _StoreState, Fun) ->
+ Tid = term_to_table(Id),
+ case ets:lookup(Tid, Id) of
+ [{Id, State0}] ->
+ case Fun(Id, State0) of
+ {Result, State1} ->
+ ets:insert(Tid, {Id, State1}),
+ Result;
+ Error ->
+ Error
+ end;
+ _ ->
+ {error, #{reason => cannot_find, id => Id}}
+ end.
+
+
+-spec lookup(id(), store_state()) ->
+ state()
+ | {error, term()}.
+
+lookup(Id, _StoreState) ->
+ Tid = term_to_table(Id),
+ case ets:lookup(Tid, Id) of
+ [{Id, State}] ->
+ State;
+ _ ->
+ {error, #{reason => cannot_find, id => Id}}
+ end.
+
+
+create_tables() ->
+ Opts = [named_table, public, {read_concurrency, true}],
+ [ets:new(TableName, Opts) || TableName <- table_names()],
+ ok.
+
+delete_tables() ->
+ [ets:delete(TableName) || TableName <- table_names()],
+ ok.
+
+
+-spec term_to_table(any()) -> atom().
+term_to_table(Term) ->
+ PHash = erlang:phash2(Term),
+ table_name(PHash rem ?SHARDS_N).
+
+
+-dialyzer({no_return, table_names/0}).
+
+-spec table_names() -> [atom()].
+table_names() ->
+ [table_name(N) || N <- lists:seq(0, ?SHARDS_N - 1)].
+
+-spec table_name(non_neg_integer()) -> atom().
+table_name(Id) when is_integer(Id), Id >= 0 andalso Id < ?SHARDS_N ->
+ list_to_atom(atom_to_list(?MODULE) ++ "_" ++ integer_to_list(Id)). \ No newline at end of file
diff --git a/src/couch_rate/src/couch_rate_limiter.erl b/src/couch_rate/src/couch_rate_limiter.erl
new file mode 100644
index 000000000..97a630206
--- /dev/null
+++ b/src/couch_rate/src/couch_rate_limiter.erl
@@ -0,0 +1,392 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rate_limiter).
+
+%% This module implements an algorithm to control the consumption rate
+%% parameters such as:
+%% - batch size
+%% - delay between batches
+%% The components of the algorithm use
+%% - [ascending minima algorithm](http://web.archive.org/web/20120805114719/http://home.tiac.net/~cri/2001/slidingmin.html)
+%% - "Welford's method" of calculating average
+
+-export([
+ new/2,
+ from_map/2,
+ budget/2,
+ delay/2,
+ wait/2,
+ in/3,
+ success/3,
+ failure/2,
+ is_congestion/2,
+ min_latency/2,
+ format/2,
+ to_map/2
+]).
+
+-type msec() :: non_neg_integer().
+
+-define(STATE, ?MODULE).
+
+%% This is the number below which the math would not work due to round errors
+%% In particular the default values for thresholds would be equal
+-define(MIN_TARGET, 36).
+
+-define(record_to_keyval(Name, Record),
+ lists:zip(record_info(fields, Name),
+ tl(tuple_to_list(Record)))).
+
+-define(map_to_record(RecordName, Map),
+ element(1, lists:foldl(fun(Field, {Record, Idx}) ->
+ {setelement(Idx, Record, maps:get(Field, Map, element(Idx, Record))), Idx + 1}
+ end, {#RecordName{}, 2}, record_info(fields, RecordName)))).
+
+
+-define(record_to_map(RecordName, Record),
+ element(1, lists:foldl(fun(Field, {Map, Idx}) ->
+ {
+ maps:put(Field, element(Idx, Record), Map),
+ Idx + 1
+ }
+ end, {#{}, 2}, record_info(fields, RecordName)))).
+
+-record(?STATE, {
+ window_size = 0 :: 0 | pos_integer(),
+ timer = fun now_msec/0,
+ size = 1 :: pos_integer(),
+ epoch = 1 :: pos_integer(),
+ minimums :: queue:queue() | undefined,
+ start_ts = undefined,
+ mean_reads = 0.0,
+ mean_writes = 0.0,
+ reads = 0,
+ writes = 0,
+ target = 4500,
+ underload_threshold = 4275, %% target * 0.95
+ overload_threshold = 4725, %% target * 1.05
+ delay_threshold = 4950, %% target * 1.10
+ multiplicative_factor = 0.7,
+ regular_delay = 100 :: timeout(),
+ congested_delay = 5000 :: timeout(),
+ initial_budget = 100,
+ latency = 0,
+ has_failures = false
+}).
+
+-type state() :: #?STATE{}.
+
+
+-spec new(couch_rate:id(), Opts :: map()) -> state().
+
+new(_Id, #{sensitivity := S}) when S =< 0 ->
+ error("expected SensitivityTimeWindow > 0");
+
+new(_Id, #{target := T}) when T < ?MIN_TARGET ->
+ error("the target is too small");
+
+new(_Id, #{budget := B, target := T, window := W, sensitivity := S} = Opts) ->
+ WinSize = W div S + 1,
+ validate_arguments(?map_to_record(?STATE, maps:merge(#{
+ minimums => queue:new(),
+ window_size => WinSize,
+ initial_budget => B,
+ underload_threshold => round(T * 0.95),
+ overload_threshold => round(T * 1.05),
+ delay_threshold => round(T * 1.07)
+ }, maps:without([budget, window, sensitivity], Opts)))).
+
+
+-spec from_map(couch_rate:id(), map()) -> state().
+
+from_map(_Id, Map) ->
+ ?map_to_record(?STATE, Map).
+
+
+-spec budget(couch_rate:id(), state()) ->
+ {pos_integer(), state()}.
+
+budget(Id, #?STATE{} = State) ->
+ #?STATE{
+ reads = R,
+ writes = W,
+ mean_writes = MW,
+ mean_reads = MR,
+ multiplicative_factor = MultiplicativeFactor,
+ target = Target,
+ initial_budget = InitialBudget,
+ latency = Latency
+ } = State,
+ case pattern(Id, State) of
+ optimal ->
+ {max(1, round(MR)), State};
+ failed ->
+ %% decrease budget
+ {max(1, round(R * MultiplicativeFactor)), State};
+ overloaded ->
+ %% decrease budget
+ {max(1, round(R * MultiplicativeFactor)), State};
+ underloaded when W == 0 orelse Latency == 0 ->
+ {max(1, round(MR)), State};
+ underloaded ->
+ ReadWriteRatio = min(1, MR / max(1, MW)),
+ SingleWrite = Latency / W,
+ EstimatedWrites = floor(Target / SingleWrite),
+ {max(1, round(ReadWriteRatio * EstimatedWrites)), State};
+ init ->
+ {InitialBudget, State}
+ end.
+
+-spec delay(couch_rate:id(), state()) ->
+ {pos_integer(), state()}.
+
+delay(Id, #?STATE{} = State) ->
+ #?STATE{
+ regular_delay = RD,
+ congested_delay = CD
+ } = State,
+ case pattern(Id, State) of
+ failed ->
+ {CD, State};
+ _ ->
+ {RD, State}
+ end.
+
+
+-spec wait(couch_rate:id(), state()) ->
+ ok.
+
+wait(Id, State) ->
+ {Delay, _} = delay(Id, State),
+ timer:sleep(Delay).
+
+
+-spec in(couch_rate:id(), state(), Reads :: pos_integer()) ->
+ {ok, state()}.
+
+in(_Id, #?STATE{timer = TimerFun} = State, Reads) ->
+ {ok, State#?STATE{
+ reads = Reads,
+ start_ts = TimerFun()
+ }}.
+
+
+-spec success(couch_rate:id(), state(), Writes :: pos_integer()) ->
+ {ok, state()}.
+
+success(_Id, #?STATE{start_ts = undefined} = State, _Writes) ->
+ {ok, State};
+
+success(_Id, #?STATE{} = State, Writes) ->
+ #?STATE{
+ start_ts = TS,
+ timer = TimerFun,
+ reads = Reads,
+ mean_reads = MeanReads,
+ mean_writes = MeanWrites,
+ window_size = WinSize
+ } = State,
+ {ok, update_min(State#?STATE{
+ writes = Writes,
+ mean_writes = average(MeanWrites, WinSize, Writes),
+ mean_reads = average(MeanReads, WinSize, Reads),
+ latency = TimerFun() - TS,
+ has_failures = false
+ })}.
+
+
+-spec failure(couch_rate:id(), state()) -> {ok, state()}.
+
+failure(_Id, #?STATE{start_ts = undefined} = State) ->
+ {ok, State};
+
+failure(_Id, #?STATE{} = State) ->
+ #?STATE{
+ timer = TimerFun,
+ start_ts = TS
+ } = State,
+ {ok, update_min(State#?STATE{
+ writes = 0,
+ latency = TimerFun() - TS,
+ has_failures = true
+ })}.
+
+
+-spec is_congestion(couch_rate:id(), state()) -> boolean().
+
+is_congestion(Id, #?STATE{} = State) ->
+ case pattern(Id, State) of
+ overloaded -> true;
+ failed -> true;
+ _ -> false
+ end.
+
+
+-spec format(couch_rate:id(), state()) -> [{Key :: atom(), Value :: term()}].
+
+format(_Id, #?STATE{minimums = M} = State) ->
+ Map = ?record_to_map(?STATE, State),
+ Minimums = lists:map(fun({D, V}) ->
+ [{value, V}, {death, D}]
+ end, queue:to_list(M)),
+ maps:to_list(maps:merge(Map, #{
+ minimums => Minimums
+ })).
+
+
+-spec to_map(couch_rate:id(), state()) -> map().
+
+to_map(_Id, #?STATE{} = State) ->
+ ?record_to_map(?STATE, State).
+
+
+-spec update_min(state()) -> state().
+
+update_min(#?STATE{latency = ProcessingDelay} = Q0) ->
+ Q1 = remove_greater_than(Q0, ProcessingDelay),
+ Q2 = append(Q1, ProcessingDelay),
+ maybe_remove_first(Q2).
+
+
+-spec pattern(couch_rate:id(), state()) ->
+ init
+ | underloaded
+ | overloaded
+ | optimal
+ | failed.
+
+pattern(Id, #?STATE{} = State) ->
+ #?STATE{
+ underload_threshold = UnderloadThreshold,
+ overload_threshold = OverloadThreshold,
+ mean_writes = MW,
+ has_failures = HasFailures
+ } = State,
+ case min_latency(Id, State) of
+ MinRollingLatency when MinRollingLatency > OverloadThreshold ->
+ overloaded;
+ MinRollingLatency when MinRollingLatency > UnderloadThreshold ->
+ optimal;
+ MinRollingLatency when MinRollingLatency == 0 andalso MW == 0.0 ->
+ init;
+ _ when HasFailures ->
+ failed;
+ _ ->
+ underloaded
+ end.
+
+
+-spec min_latency(couch_rate:id(), state()) -> pos_integer() | 0.
+
+min_latency(_Id, #?STATE{size = 1}) ->
+ 0;
+
+min_latency(_Id, #?STATE{minimums = Minimums}) ->
+ {value, {_, Min}} = head(Minimums),
+ Min.
+
+
+validate_arguments(#?STATE{timer = TimerFun})
+ when not is_function(TimerFun, 0) ->
+ error("expected `timer` to be an arity 0 function");
+
+validate_arguments(#?STATE{window_size = WinSize})
+ when WinSize < 1 ->
+ error("expected `window_size` to be greater than 1");
+
+validate_arguments(#?STATE{initial_budget = Budget})
+ when Budget < 1 ->
+ error("expected `initial_budget` to be greater than 1");
+
+validate_arguments(#?STATE{overload_threshold = OT, target = T})
+ when OT =< T ->
+ error("expected `overload_threshold` to be greater than `target`");
+
+validate_arguments(#?STATE{underload_threshold = UT, target = T})
+ when UT >= T ->
+ error("expected `underload_threshold` to be less than `target`");
+
+validate_arguments(#?STATE{delay_threshold = DT, overload_threshold = OT})
+ when DT =< OT ->
+ error("expected `delay_threshold` to be greater than `overload_threshold`");
+
+validate_arguments(#?STATE{multiplicative_factor = MF})
+ when MF < 0 orelse MF > 1 ->
+ error("expected `multiplicative_factor` to be in the (0, 1) range");
+
+validate_arguments(#?STATE{} = State) ->
+ State.
+
+
+-spec remove_greater_than(state(), pos_integer()) -> state().
+
+remove_greater_than(#?STATE{minimums = Minimums, size = S} = State, Value) ->
+ case tail(Minimums) of
+ {value, {_, T}} when Value =< T ->
+ NewState = State#?STATE{minimums = tail_drop(Minimums), size = S - 1},
+ remove_greater_than(NewState, Value);
+ {value, _} ->
+ State;
+ empty ->
+ State#?STATE{epoch = 1}
+ end.
+
+
+-spec append(state(), pos_integer()) -> state().
+
+append(#?STATE{minimums = Minimums, epoch = E, window_size = S} = State, Value) ->
+ Death = E + S,
+ State#?STATE{
+ minimums = tail_put(Minimums, {Death, Value}),
+ epoch = E + 1,
+ size = S + 1
+ }.
+
+
+-spec maybe_remove_first(state()) -> state().
+
+maybe_remove_first(#?STATE{minimums = Minimums, epoch = E, size = S} = State) ->
+ case head(Minimums) of
+ {value, {E, _V}} ->
+ State#?STATE{minimums = head_drop(Minimums), size = S - 1};
+ _ ->
+ State
+ end.
+
+
+% Donald Knuth’s Art of Computer Programming, Vol 2, page 232, 3rd
+% Welford method
+average(Avg, WindowSize, Value) ->
+ Delta = Value - Avg,
+ Avg + Delta / WindowSize.
+
+%% The helper functions are added because queue module
+%% naming conventions are weird
+head(Q) -> queue:peek_r(Q).
+
+
+head_drop(Q) -> queue:drop_r(Q).
+
+tail(Q) -> queue:peek(Q).
+
+
+tail_put(Q, V) -> queue:in_r(V, Q).
+
+
+tail_drop(Q) -> queue:drop(Q).
+
+
+-spec now_msec() -> msec().
+now_msec() ->
+ {Mega, Sec, Micro} = os:timestamp(),
+ ((Mega * 1000000) + Sec) * 1000 + Micro div 1000. \ No newline at end of file
diff --git a/src/couch_rate/src/couch_rate_pd.erl b/src/couch_rate/src/couch_rate_pd.erl
new file mode 100644
index 000000000..5d79f7890
--- /dev/null
+++ b/src/couch_rate/src/couch_rate_pd.erl
@@ -0,0 +1,90 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rate_pd).
+
+-include("couch_rate.hrl").
+
+
+-export([
+ new/2,
+ create_if_missing/2,
+ lookup/2,
+ update/3
+]).
+
+-type id() :: term().
+-type state() :: term().
+-type result() :: term().
+-type store_state() :: term().
+
+-define(STATE_KEY, couch_rate_state).
+
+
+-spec create_if_missing(couch_rate:id(), state()) -> store_state().
+
+create_if_missing(Id, State) ->
+ case get({?STATE_KEY, Id}) of
+ undefined ->
+ put({?STATE_KEY, Id}, State),
+ ok;
+ _ ->
+ ok
+ end.
+
+
+-spec new(couch_rate:id(), state()) ->
+ store_state()
+ | {error, term()}.
+
+new(Id, State) ->
+ case get({?STATE_KEY, Id}) of
+ undefined ->
+ put({?STATE_KEY, Id}, State),
+ ok;
+ _ ->
+ {error, #{reason => already_exists, id => Id}}
+ end.
+
+
+-spec lookup(id(), store_state()) ->
+ state()
+ | {error, term()}.
+
+lookup(Id, _StoreState) ->
+ case get({?STATE_KEY, Id}) of
+ undefined ->
+ {error, #{reason => cannot_find, id => Id}};
+ State ->
+ State
+ end.
+
+
+-spec update(id(), store_state(), fun(
+ (id(), state()) -> {state(), result()}
+ )) ->
+ result()
+ | {error, term()}.
+
+update(Id, _StoreState, Fun) ->
+ case get({?STATE_KEY, Id}) of
+ undefined ->
+ {error, #{reason => cannot_find, id => Id}};
+ State ->
+ case Fun(Id, State) of
+ {Result, State} ->
+ put({?STATE_KEY, Id}, State),
+ Result;
+ Error ->
+ Error
+ end
+ end.
diff --git a/src/couch_rate/src/couch_rate_sup.erl b/src/couch_rate/src/couch_rate_sup.erl
new file mode 100644
index 000000000..1ce01b644
--- /dev/null
+++ b/src/couch_rate/src/couch_rate_sup.erl
@@ -0,0 +1,36 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rate_sup).
+-behaviour(supervisor).
+-vsn(1).
+
+-export([
+ start_link/0,
+ init/1
+]).
+
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+
+init([]) ->
+ couch_rate_ets:create_tables(),
+ Flags = #{
+ strategy => one_for_one,
+ intensity => 5,
+ period => 10
+ },
+ Children = [
+ ],
+ {ok, {Flags, Children}}. \ No newline at end of file
diff --git a/src/couch_rate/test/exunit/couch_rate_config_test.exs b/src/couch_rate/test/exunit/couch_rate_config_test.exs
new file mode 100644
index 000000000..7db30d272
--- /dev/null
+++ b/src/couch_rate/test/exunit/couch_rate_config_test.exs
@@ -0,0 +1,88 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+defmodule Couch.Rate.Config.Test do
+ use ExUnit.Case, async: true
+ use ExUnitProperties
+ import StreamData
+
+ @erlang_reserved_words MapSet.new([
+ "after",
+ "and",
+ "andalso",
+ "band",
+ "begin",
+ "bnot",
+ "bor",
+ "bsl",
+ "bsr",
+ "bxor",
+ "case",
+ "catch",
+ "cond",
+ "div",
+ "end",
+ "fun",
+ "if",
+ "let",
+ "not",
+ "of",
+ "or",
+ "orelse",
+ "receive",
+ "rem",
+ "try",
+ "when",
+ "xor"
+ ])
+
+ alias :couch_rate_config, as: RLC
+
+ test "parse valid configuration" do
+ parsed = RLC.from_str(~S(#{foo => 1, bar => 2.0}))
+ assert %{foo: 1, bar: 2} == parsed
+ end
+
+ property "roundtrip" do
+ check all(options <- valid_config()) do
+ parsed = RLC.from_str(RLC.to_str(options))
+ assert options == parsed
+ end
+ end
+
+ defp valid_config() do
+ map_of(
+ erlang_atom(),
+ one_of([
+ positive_integer(),
+ # we only support positive float
+ float(min: 0.0)
+ ])
+ )
+ end
+
+ defp erlang_atom() do
+ bind(string(:alphanumeric), fn str ->
+ bind(integer(?a..?z), fn char ->
+ erlang_atom(str, char)
+ end)
+ end)
+ end
+
+ defp erlang_atom(str, char) do
+ if MapSet.member?(@erlang_reserved_words, <<char, str::binary>>) do
+ String.to_atom(<<char, char, str::binary>>)
+ else
+ String.to_atom(<<char, str::binary>>)
+ end
+ end
+end
diff --git a/src/couch_rate/test/exunit/couch_rate_limiter_test.exs b/src/couch_rate/test/exunit/couch_rate_limiter_test.exs
new file mode 100644
index 000000000..ff70f793a
--- /dev/null
+++ b/src/couch_rate/test/exunit/couch_rate_limiter_test.exs
@@ -0,0 +1,350 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+defmodule Couch.Rate.Limiter.Test do
+ use ExUnit.Case, async: true
+
+ @transaction_timeout 5_000
+
+ alias :couch_rate, as: RL
+
+ describe "Stats :" do
+ @scenario %{rw_ratio: 1 / 1, target: 400, write_time: 100}
+ test "#{__ENV__.line} : #{inspect(@scenario)} (underloaded)" do
+ {rate_limiter, measurments} = simulate(@scenario, 1000)
+ stats = statistics(measurments)
+ maybe_debug(rate_limiter, measurments, stats)
+
+ assert stats.wait_time.p90 == 100,
+ "expected no artificial delays for more than 90% of batches"
+
+ budget = stats.budget
+
+ assert floor(budget.p95) in 1..7,
+ "expected budget to converge into the 1..7 range (got #{budget.p95})"
+
+ reads = stats.mean_reads
+
+ assert floor(reads.p95) in 1..7,
+ "expected mean_read to converge into the 1..7 range (got #{reads.p95})"
+
+ writes = stats.mean_writes
+ assert round(writes.p99) in 2..6
+ "expected mean_writes to converge into the 2..6 range (got #{writes.p95})"
+
+ assert stats.latency.p95 < @transaction_timeout,
+ "expected latency for 95% batches under @transaction_timout"
+
+ found_after = initial_search_speed(measurments)
+
+ assert found_after < 5,
+ "expected to find acceptable budget in less than 5 iterations (got: #{
+ found_after
+ })"
+
+ measurments
+ |> initial_search()
+ |> Enum.reduce(101, fn row, prev_budget ->
+ assert row.budget < prev_budget,
+ "expected to reduce budget while we fail"
+
+ row.budget
+ end)
+ end
+
+ @scenario %{rw_ratio: 1 / 8, target: 3900, write_time: 100}
+ test "#{__ENV__.line} : #{inspect(@scenario)} (optimal)" do
+ {rate_limiter, measurments} = simulate(@scenario, 1000)
+ stats = statistics(measurments)
+ maybe_debug(rate_limiter, measurments, stats)
+
+ assert stats.wait_time.p90 == 100,
+ "expected no artificial delays for more than 90% of batches"
+
+ budget = stats.budget
+
+ assert floor(budget.p95) in 4..7,
+ "expected budget to converge into the 4..7 range (got #{budget.p95})"
+
+ reads = stats.mean_reads
+
+ assert floor(reads.p95) in 4..7,
+ "expected mean_read to converge into the 4..7 range (got #{reads.p95})"
+
+ writes = stats.mean_writes
+ assert round(writes.p99) in 39..41
+ "expected mean_writes to converge into the 39..41 range (got #{writes.p95})"
+
+ assert stats.latency.p95 < @transaction_timeout,
+ "expected latency for 95% of batches under @transaction_timout"
+
+ found_after = initial_search_speed(measurments)
+
+ assert found_after < 10,
+ "expected to find acceptable budget in less than 10 iterations (got: #{
+ found_after
+ })"
+
+ measurments
+ |> initial_search()
+ |> Enum.reduce(101, fn row, prev_budget ->
+ assert row.budget < prev_budget,
+ "expected to reduce budget while we fail"
+
+ row.budget
+ end)
+ end
+
+ @scenario %{rw_ratio: 1 / 20, target: 3900, write_time: 100}
+ test "#{__ENV__.line} : #{inspect(@scenario)} (overloaded)" do
+ # This is a worst case scenario due to big variability of wait_time and
+ # big value read/write ratio
+ {rate_limiter, measurments} = simulate(@scenario, 1000)
+ stats = statistics(measurments)
+ maybe_debug(rate_limiter, measurments, stats)
+
+ assert stats.wait_time.p90 == 100,
+ "expected no artificial delays for more than 90% of batches"
+
+ budget = stats.budget
+ assert floor(budget.p95) in 1..4
+ "expected budget to converge into the 1..4 range (got #{budget.p95})"
+ reads = stats.mean_reads
+ assert floor(reads.p95) in 1..4
+ "expected mean_read to converge into the 1..4 range (got #{reads.p95})"
+ writes = stats.mean_writes
+ assert round(writes.p99) in 39..41
+ "expected mean_writes to converge into the 39..41 range (got #{writes.p95})"
+
+ assert stats.latency.p90 < @transaction_timeout,
+ "expected latency for 90% of batches under @transaction_timout"
+
+ found_after = initial_search_speed(measurments)
+
+ assert found_after < 16,
+ "expected to find acceptable budget in less than 16 iterations (got: #{
+ found_after
+ })"
+
+ measurments
+ |> initial_search()
+ |> Enum.reduce(101, fn row, prev_budget ->
+ assert row.budget < prev_budget,
+ "expected to reduce budget while we fail"
+
+ row.budget
+ end)
+ end
+ end
+
+ defp simulate(scenario, iterations) do
+ :couch_rate_ets.create_tables()
+
+ limiter =
+ RL.new(:limiter_id, :couch_rate_limiter, nil, %{
+ budget: 100,
+ target: scenario.target,
+ # average over 20 last measurments
+ window: scenario.write_time * 20,
+ sensitivity: scenario.write_time,
+ timer: &timer/0
+ })
+
+ result =
+ Enum.reduce(0..iterations, {limiter, []}, fn _idx, {limiter, stats} ->
+ {budget, limiter} = step(limiter, scenario.rw_ratio, scenario.write_time)
+ {limiter, update_measurments(limiter, stats, budget)}
+ end)
+
+ :couch_rate_ets.delete_tables()
+ result
+ end
+
+ defp step(limiter, read_write_ratio, write_time) do
+ {reads, limiter} = RL.budget(limiter)
+ writes = round(reads / read_write_ratio)
+ {delay, limiter} = RL.delay(limiter)
+ sleep(delay)
+ data_before = RL.to_map(limiter)
+ {:ok, limiter} = RL.in(limiter, reads)
+ data_after = RL.to_map(limiter)
+
+ assert data_after.size <= data_after.window_size + 1,
+ "The number of elements in minimums container shouldn't grow (got: #{
+ data_after.size
+ })"
+
+ if data_before.writes == 0 and
+ data_after.writes == 0 and
+ data_before.reads != 0 do
+ assert data_before.reads > data_after.reads,
+ "expected to reduce number of reads while transaction fails"
+ end
+
+ total_write_time =
+ 0..writes
+ |> Enum.reduce_while(0, fn _, acc ->
+ write_time = :rand.normal(write_time, write_time * 0.25)
+
+ if acc < @transaction_timeout do
+ {:cont, acc + write_time}
+ else
+ {:halt, acc}
+ end
+ end)
+
+ sleep(total_write_time)
+
+ if total_write_time < @transaction_timeout do
+ {:ok, limiter} = RL.success(limiter, writes)
+ {reads, limiter}
+ else
+ {:ok, limiter} = RL.failure(limiter)
+ {reads, limiter}
+ end
+ end
+
+ defp update_measurments(limiter, stats, budget) do
+ data = RL.to_map(limiter)
+ {wait_time, _} = RL.delay(limiter)
+
+ stats ++
+ [
+ %{
+ budget: budget,
+ slack: data.target - data.latency,
+ rw_ratio: data.mean_reads / max(1, data.mean_writes),
+ latency: data.latency,
+ new_budget: budget,
+ minimum_latency: RL.min_latency(limiter),
+ wait_time: wait_time,
+ elements_in_min_queue: data.size,
+ mean_reads: data.mean_reads,
+ mean_writes: data.mean_writes,
+ total_reads: data.reads,
+ total_writes: data.writes
+ }
+ ]
+ end
+
+ defp timer() do
+ now = Process.get(:time, 1)
+ Process.put(:time, now + 1)
+ now
+ end
+
+ defp sleep(sleep_time_in_ms) do
+ now = timer()
+ Process.put(:time, now + sleep_time_in_ms - 1)
+ end
+
+ defp format_table([first | _] = rows) do
+ spec =
+ first
+ |> Map.keys()
+ |> Enum.map(fn h -> {h, String.length(to_str(h))} end)
+
+ header = first |> Map.keys() |> Enum.map(&to_str/1) |> Enum.join(" , ")
+
+ lines =
+ Enum.map(rows, fn row ->
+ fields =
+ Enum.map(spec, fn {field, size} ->
+ String.pad_trailing("#{to_str(Map.get(row, field))}", size)
+ end)
+
+ Enum.join(fields, " , ")
+ end)
+
+ Enum.join([header | lines], "\n")
+ end
+
+ defp initial_search_speed(measurments) do
+ length(initial_search(measurments))
+ end
+
+ defp initial_search(measurments) do
+ Enum.reduce_while(measurments, [], fn row, acc ->
+ if row.total_writes == 0 do
+ {:cont, acc ++ [row]}
+ else
+ {:halt, acc}
+ end
+ end)
+ end
+
+ defp statistics(measurments) do
+ data =
+ Enum.reduce(measurments, %{}, fn row, acc ->
+ Enum.reduce(row, acc, fn {key, value}, acc ->
+ Map.update(acc, key, [], fn metric ->
+ metric ++ [value]
+ end)
+ end)
+ end)
+
+ Enum.reduce(data, %{}, fn {key, values}, acc ->
+ stats = Enum.into(:bear.get_statistics(values), %{})
+ {percentile, stats} = Map.pop(stats, :percentile)
+
+ stats =
+ Enum.reduce(percentile, stats, fn {key, value}, acc ->
+ Map.put(acc, String.to_atom("p#{to_str(key)}"), value)
+ end)
+
+ Map.put(acc, key, stats)
+ end)
+ end
+
+ defp format_stats(stats) do
+ rows =
+ Enum.map(stats, fn {key, values} ->
+ values
+ |> Enum.into(%{})
+ |> Map.put(:metric, key)
+ |> Map.delete(:histogram)
+ end)
+
+ format_table(rows)
+ end
+
+ defp to_str(int) when is_integer(int) do
+ "#{int}"
+ end
+
+ defp to_str(float) when is_float(float) do
+ "#{Float.to_string(Float.round(float, 2))}"
+ end
+
+ defp to_str(atom) when is_atom(atom) do
+ Atom.to_string(atom)
+ end
+
+ defp to_str(string) when is_binary(string) do
+ string
+ end
+
+ defp to_map(rate_limiter) do
+ RL.to_map(rate_limiter)
+ end
+
+ defp maybe_debug(rate_limiter, measurments, stats) do
+ if System.fetch_env("EXUNIT_DEBUG") != :error do
+ IO.puts("")
+ IO.puts("rate_limiter: #{inspect(to_map(rate_limiter))}")
+ IO.puts("measurments: #{inspect(measurments)}")
+ IO.puts("stats: #{inspect(stats)}")
+
+ IO.puts("\n" <> format_table(measurments) <> "\n" <> format_stats(stats))
+ end
+ end
+end
diff --git a/src/couch_rate/test/exunit/test_helper.exs b/src/couch_rate/test/exunit/test_helper.exs
new file mode 100644
index 000000000..9b9d6ef94
--- /dev/null
+++ b/src/couch_rate/test/exunit/test_helper.exs
@@ -0,0 +1,14 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+ExUnit.configure(formatters: [JUnitFormatter, ExUnit.CLIFormatter])
+ExUnit.start()
diff --git a/src/couch_replicator/src/couch_replicator_auth_session.erl b/src/couch_replicator/src/couch_replicator_auth_session.erl
index 30f499a33..a59c770b4 100644
--- a/src/couch_replicator/src/couch_replicator_auth_session.erl
+++ b/src/couch_replicator/src/couch_replicator_auth_session.erl
@@ -187,7 +187,7 @@ format_status(_Opt, [_PDict, State]) ->
[
{epoch, State#state.epoch},
{user, State#state.user},
- {session_url, State#state.session_url},
+ {session_url, couch_util:url_strip_password(State#state.session_url)},
{refresh_tstamp, State#state.refresh_tstamp}
].
diff --git a/src/couch_replicator/src/couch_replicator_httpc_pool.erl b/src/couch_replicator/src/couch_replicator_httpc_pool.erl
index 90234a6a0..c63a5efa6 100644
--- a/src/couch_replicator/src/couch_replicator_httpc_pool.erl
+++ b/src/couch_replicator/src/couch_replicator_httpc_pool.erl
@@ -20,7 +20,7 @@
% gen_server API
-export([init/1, handle_call/3, handle_info/2, handle_cast/2]).
--export([code_change/3, terminate/2]).
+-export([code_change/3, terminate/2, format_status/2]).
-include_lib("couch/include/couch_db.hrl").
@@ -145,6 +145,18 @@ code_change(_OldVsn, #state{}=State, _Extra) ->
terminate(_Reason, _State) ->
ok.
+format_status(_Opt, [_PDict, State]) ->
+ #state{
+ url = Url,
+ proxy_url = ProxyURL,
+ limit = Limit
+ } = State,
+ {[
+ {url, couch_util:url_strip_password(Url)},
+ {proxy_url, couch_util:url_strip_password(ProxyURL)},
+ {limit, Limit}
+ ]}.
+
monitor_client(Callers, Worker, {ClientPid, _}) ->
[{Worker, erlang:monitor(process, ClientPid)} | Callers].
diff --git a/src/couch_replicator/src/couch_replicator_scheduler.erl b/src/couch_replicator/src/couch_replicator_scheduler.erl
index 641443a7c..db5c85da3 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler.erl
@@ -225,7 +225,8 @@ update_job_stats(JobId, Stats) ->
%% gen_server functions
init(_) ->
- config:enable_feature('scheduler'),
+ % Temporarily disable on FDB, as it's not fully implemented yet
+ % config:enable_feature('scheduler'),
EtsOpts = [named_table, {keypos, #job.id}, {read_concurrency, true},
{write_concurrency, true}],
?MODULE = ets:new(?MODULE, EtsOpts),
diff --git a/src/couch_stats/src/couch_stats_aggregator.erl b/src/couch_stats/src/couch_stats_aggregator.erl
index 0416636c9..8d8cdf7e5 100644
--- a/src/couch_stats/src/couch_stats_aggregator.erl
+++ b/src/couch_stats/src/couch_stats_aggregator.erl
@@ -27,7 +27,8 @@
handle_cast/2,
handle_info/2,
code_change/3,
- terminate/2
+ terminate/2,
+ format_status/2
]).
@@ -88,6 +89,20 @@ terminate(_Reason, _State) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
+format_status(_Opt, [_PDict, State]) ->
+ #st{
+ descriptions=Descs,
+ stats=Stats,
+ collect_timer=CollectT,
+ reload_timer=ReloadT
+ } = State,
+ [{data, [{"State", [
+ {descriptions, {set_size, sets:size(Descs)}},
+ {stats, {length, length(Stats)}},
+ {collect_timer,CollectT},
+ {reload_timer,ReloadT}
+ ]}]}].
+
comparison_set(Metrics) ->
sets:from_list(
[{Name, proplists:get_value(type, Props)} || {Name, Props} <- Metrics]
diff --git a/src/couch_views/.gitignore b/src/couch_views/.gitignore
new file mode 100644
index 000000000..f1c455451
--- /dev/null
+++ b/src/couch_views/.gitignore
@@ -0,0 +1,19 @@
+.rebar3
+_*
+.eunit
+*.o
+*.beam
+*.plt
+*.swp
+*.swo
+.erlang.cookie
+ebin
+log
+erl_crash.dump
+.rebar
+logs
+_build
+.idea
+*.iml
+rebar3.crashdump
+*~
diff --git a/src/couch_views/README.md b/src/couch_views/README.md
new file mode 100644
index 000000000..5647913f0
--- /dev/null
+++ b/src/couch_views/README.md
@@ -0,0 +1,48 @@
+CouchDB Views
+=====
+
+This is the new application that builds and runs Map/reduce views against FoundationDB.
+Currently only map indexes are supported and it will always return the full index.
+
+Code layout:
+
+* `couch_views` - Main entry point to query a view
+* `couch_views_reader` - Reads from the index for queries
+* `couch_views_indexer` - `couch_jobs` worker that builds an index from the changes feed.
+* `couch_vews_jobs` - `couch_views` interactions with `couch_jobs`. It handles adding index jobs and subscribes to jobs.
+* `couch_views_fdb` - Maps view operations to FoundationDB logic.
+* `couch_views_encoding` - Encodes view keys that are byte comparable following CouchDB view sort order.
+* `couch_views_server` - Spawns `couch_views_indexer` workers to handle index update jobs.
+
+# Configuration
+
+## Configuring rate limiter
+
+Here is the example of configuration used in `couch_view` application:
+
+```
+[couch_rate.views]
+limiter = couch_rate_limiter
+opts = #{budget => 100, target => 2500, window => 60000, sensitivity => 1000}
+```
+
+Supported fields in `opts`:
+
+* `budget` - the initial value for estimated batch size
+* `target` - the amount in msec which we try to maintain for batch processing time
+* `window` - time interval for contention detector
+* `sensitivity` - minimal interval within the `window`
+
+Unsupported fields in `opts` (if you really know what you are doing):
+
+* `window_size` - how many batches to consider in contention detector
+* `timer` - this is used for testing to fast forward time `fun() -> current_time_in_ms() end`
+* `target` - the amount in msec which we try to maintain for batch processing time
+* `underload_threshold` - a threshold below which we would try to increase the budget
+* `overload_threshold` - a threshold above which we would start decreasing the budget
+* `delay_threshold` - a threshold above which we would start introducing delays between batches
+* `multiplicative_factor` - determines how fast we are going to decrease budget (must be in (0..1) range)
+* `regular_delay` - delay between batches when there is no overload
+* `congested_delay` - delay between batches when there is an overload
+* `initial_budget` - initial value for budget to start with
+
diff --git a/src/couch_views/include/couch_views.hrl b/src/couch_views/include/couch_views.hrl
new file mode 100644
index 000000000..3d0110f65
--- /dev/null
+++ b/src/couch_views/include/couch_views.hrl
@@ -0,0 +1,33 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% Index info/data subspaces
+-define(VIEW_INFO, 0).
+-define(VIEW_DATA, 1).
+
+% Index info keys
+-define(VIEW_UPDATE_SEQ, 0).
+-define(VIEW_ROW_COUNT, 1).
+-define(VIEW_KV_SIZE, 2).
+-define(VIEW_BUILD_STATUS, 3).
+-define(VIEW_CREATION_VS, 4).
+
+% Data keys
+-define(VIEW_ID_RANGE, 0).
+-define(VIEW_MAP_RANGE, 1).
+
+% jobs api
+-define(INDEX_JOB_TYPE, <<"views">>).
+
+% indexing progress
+-define(INDEX_BUILDING, <<"building">>).
+-define(INDEX_READY, <<"ready">>).
diff --git a/src/couch_views/rebar.config b/src/couch_views/rebar.config
new file mode 100644
index 000000000..362c8785e
--- /dev/null
+++ b/src/couch_views/rebar.config
@@ -0,0 +1,14 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{cover_enabled, true}.
+{cover_print_enabled, true}.
diff --git a/src/couch_views/src/couch_views.app.src b/src/couch_views/src/couch_views.app.src
new file mode 100644
index 000000000..cb8285ac2
--- /dev/null
+++ b/src/couch_views/src/couch_views.app.src
@@ -0,0 +1,34 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{application, couch_views, [
+ {description, "CouchDB Views on FDB"},
+ {vsn, git},
+ {mod, {couch_views_app, []}},
+ {registered, [
+ couch_views_sup,
+ couch_views_server
+ ]},
+ {applications, [
+ kernel,
+ stdlib,
+ erlfdb,
+ couch_epi,
+ couch_log,
+ config,
+ couch_stats,
+ fabric,
+ couch_jobs,
+ couch_eval,
+ couch_rate
+ ]}
+]}.
diff --git a/src/couch_views/src/couch_views.erl b/src/couch_views/src/couch_views.erl
new file mode 100644
index 000000000..d9ba0c16b
--- /dev/null
+++ b/src/couch_views/src/couch_views.erl
@@ -0,0 +1,236 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views).
+
+
+-behavior(fabric2_index).
+
+
+-export([
+ query/6,
+
+ % fabric2_index behavior
+ build_indices/2,
+ cleanup_indices/2,
+ get_info/2
+]).
+
+-include("couch_views.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+
+
+query(Db, DDoc, ViewName, Callback, Acc0, Args0) ->
+ case fabric2_db:is_users_db(Db) of
+ true ->
+ fabric2_users_db:after_doc_read(DDoc, Db);
+ false ->
+ ok
+ end,
+
+ DbName = fabric2_db:name(Db),
+ IsInteractive = couch_views_ddoc:is_interactive(DDoc),
+ {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+
+ #mrst{
+ views = Views
+ } = Mrst,
+
+ Args1 = to_mrargs(Args0),
+ Args2 = couch_mrview_util:set_view_type(Args1, ViewName, Views),
+ Args3 = couch_mrview_util:validate_args(Args2),
+ ok = check_range(Args3),
+ case is_reduce_view(Args3) of
+ true -> throw(not_implemented);
+ false -> ok
+ end,
+
+ try
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ ok = maybe_update_view(TxDb, Mrst, IsInteractive, Args3),
+ read_view(TxDb, Mrst, ViewName, Callback, Acc0, Args3)
+ end)
+ catch throw:{build_view, WaitSeq} ->
+ couch_views_jobs:build_view(Db, Mrst, WaitSeq),
+ read_view(Db, Mrst, ViewName, Callback, Acc0, Args3)
+ end.
+
+
+build_indices(#{} = Db, DDocs) when is_list(DDocs) ->
+ DbName = fabric2_db:name(Db),
+ lists:filtermap(fun(DDoc) ->
+ try couch_views_util:ddoc_to_mrst(DbName, DDoc) of
+ {ok, #mrst{} = Mrst} ->
+ {true, couch_views_jobs:build_view_async(Db, Mrst)}
+ catch _:_ ->
+ false
+ end
+ end, DDocs).
+
+
+cleanup_indices(#{} = Db, DDocs) when is_list(DDocs) ->
+ DbName = fabric2_db:name(Db),
+ ActiveSigs = lists:filtermap(fun(DDoc) ->
+ try couch_views_util:ddoc_to_mrst(DbName, DDoc) of
+ {ok, #mrst{sig = Sig}} ->
+ {true, Sig}
+ catch _:_ ->
+ false
+ end
+ end, DDocs),
+ ExistingSigs = couch_views_fdb:list_signatures(Db),
+ StaleSigs = ExistingSigs -- ActiveSigs,
+ lists:foreach(fun(Sig) ->
+ couch_views_jobs:remove(Db, Sig),
+ couch_views_fdb:clear_index(Db, Sig)
+ end, StaleSigs).
+
+
+get_info(Db, DDoc) ->
+ DbName = fabric2_db:name(Db),
+ {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+ Sig = fabric2_util:to_hex(Mrst#mrst.sig),
+ {UpdateSeq, DataSize, Status} = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ Seq = couch_views_fdb:get_update_seq(TxDb, Mrst),
+ DataSize = get_total_view_size(TxDb, Mrst),
+ JobStatus = case couch_views_jobs:job_state(TxDb, Mrst) of
+ {ok, pending} -> true;
+ {ok, running} -> true;
+ {ok, finished} -> false;
+ {error, not_found} -> false
+ end,
+ {Seq, DataSize, JobStatus}
+ end),
+ UpdateOptions = get_update_options(Mrst),
+ {ok, [
+ {language, Mrst#mrst.language},
+ {signature, Sig},
+ {sizes, {[
+ {active, DataSize}
+ ]}},
+ {update_seq, UpdateSeq},
+ {updater_running, Status},
+ {update_options, UpdateOptions}
+ ]}.
+
+
+get_total_view_size(TxDb, Mrst) ->
+ ViewIds = [View#mrview.id_num || View <- Mrst#mrst.views],
+ lists:foldl(fun (ViewId, Total) ->
+ Total + couch_views_fdb:get_kv_size(TxDb, Mrst, ViewId)
+ end, 0, ViewIds).
+
+
+read_view(Db, Mrst, ViewName, Callback, Acc0, Args) ->
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ try
+ couch_views_reader:read(TxDb, Mrst, ViewName, Callback, Acc0, Args)
+ after
+ UpdateAfter = Args#mrargs.update == lazy,
+ if UpdateAfter == false -> ok; true ->
+ couch_views_jobs:build_view_async(TxDb, Mrst)
+ end
+ end
+ end).
+
+
+maybe_update_view(_Db, _Mrst, _, #mrargs{update = false}) ->
+ ok;
+
+maybe_update_view(_Db, _Mrst, _, #mrargs{update = lazy}) ->
+ ok;
+
+maybe_update_view(TxDb, Mrst, true, _Args) ->
+ BuildState = couch_views_fdb:get_build_status(TxDb, Mrst),
+ if BuildState == ?INDEX_READY -> ok; true ->
+ VS = couch_views_fdb:get_creation_vs(TxDb, Mrst),
+ throw({build_view, fabric2_fdb:vs_to_seq(VS)})
+ end;
+
+maybe_update_view(TxDb, Mrst, false, _Args) ->
+ DbSeq = fabric2_db:get_update_seq(TxDb),
+ ViewSeq = couch_views_fdb:get_update_seq(TxDb, Mrst),
+ case DbSeq == ViewSeq of
+ true -> ok;
+ false -> throw({build_view, DbSeq})
+ end.
+
+
+is_reduce_view(#mrargs{view_type = ViewType}) ->
+ ViewType =:= red;
+is_reduce_view({Reduce, _, _}) ->
+ Reduce =:= red.
+
+
+to_mrargs(#mrargs{} = Args) ->
+ Args;
+
+to_mrargs(#{} = Args) ->
+ Fields = record_info(fields, mrargs),
+ Indexes = lists:seq(2, record_info(size, mrargs)),
+ LU = lists:zip(Fields, Indexes),
+
+ maps:fold(fun(Key, Value, Acc) ->
+ Index = fabric2_util:get_value(couch_util:to_existing_atom(Key), LU),
+ setelement(Index, Acc, Value)
+ end, #mrargs{}, Args).
+
+
+check_range(#mrargs{start_key = undefined}) ->
+ ok;
+
+check_range(#mrargs{end_key = undefined}) ->
+ ok;
+
+check_range(#mrargs{start_key = K, end_key = K}) ->
+ ok;
+
+check_range(Args) ->
+ #mrargs{
+ direction = Dir,
+ start_key = SK,
+ start_key_docid = SKD,
+ end_key = EK,
+ end_key_docid = EKD
+ } = Args,
+
+ case {Dir, view_cmp(SK, SKD, EK, EKD)} of
+ {fwd, false} ->
+ throw(check_range_error(<<"true">>));
+ {rev, true} ->
+ throw(check_range_error(<<"false">>));
+ _ ->
+ ok
+ end.
+
+
+check_range_error(Descending) ->
+ {query_parse_error,
+ <<"No rows can match your key range, reverse your ",
+ "start_key and end_key or set descending=",
+ Descending/binary>>}.
+
+
+view_cmp(SK, SKD, EK, EKD) ->
+ BinSK = couch_views_encoding:encode(SK, key),
+ BinEK = couch_views_encoding:encode(EK, key),
+ PackedSK = erlfdb_tuple:pack({BinSK, SKD}),
+ PackedEK = erlfdb_tuple:pack({BinEK, EKD}),
+ PackedSK =< PackedEK.
+
+
+get_update_options(#mrst{design_opts = Opts}) ->
+ IncDesign = couch_util:get_value(<<"include_design">>, Opts, false),
+ LocalSeq = couch_util:get_value(<<"local_seq">>, Opts, false),
+ UpdateOptions = if IncDesign -> [include_design]; true -> [] end
+ ++ if LocalSeq -> [local_seq]; true -> [] end,
+ [atom_to_binary(O, latin1) || O <- UpdateOptions].
diff --git a/src/couch_views/src/couch_views_app.erl b/src/couch_views/src/couch_views_app.erl
new file mode 100644
index 000000000..7337d0580
--- /dev/null
+++ b/src/couch_views/src/couch_views_app.erl
@@ -0,0 +1,31 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-module(couch_views_app).
+
+
+-behaviour(application).
+
+
+-export([
+ start/2,
+ stop/1
+]).
+
+
+start(_StartType, _StartArgs) ->
+ couch_views_sup:start_link().
+
+
+stop(_State) ->
+ ok.
diff --git a/src/couch_views/src/couch_views_ddoc.erl b/src/couch_views/src/couch_views_ddoc.erl
new file mode 100644
index 000000000..fae4a3433
--- /dev/null
+++ b/src/couch_views/src/couch_views_ddoc.erl
@@ -0,0 +1,42 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+-module(couch_views_ddoc).
+
+
+-export([
+ get_interactive_list/1,
+ get_mango_list/1,
+ is_interactive/1
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+
+% TODO: build a ddoc cache that checks the md_version
+get_interactive_list(Db) ->
+ DDocs = fabric2_db:get_design_docs(Db),
+ lists:filter(fun is_interactive/1, DDocs).
+
+
+get_mango_list(Db) ->
+ DDocs = fabric2_db:get_design_docs(Db),
+ lists:filter(fun (DDoc) ->
+ {Props} = couch_doc:to_json_obj(DDoc, []),
+ fabric2_util:get_value(<<"language">>, Props) == <<"query">>
+ end, DDocs).
+
+
+is_interactive(#doc{} = DDoc) ->
+ {Props} = couch_doc:to_json_obj(DDoc, []),
+ {Opts} = fabric2_util:get_value(<<"options">>, Props, {[]}),
+ fabric2_util:get_value(<<"interactive">>, Opts, false).
diff --git a/src/couch_views/src/couch_views_encoding.erl b/src/couch_views/src/couch_views_encoding.erl
new file mode 100644
index 000000000..2f69db306
--- /dev/null
+++ b/src/couch_views/src/couch_views_encoding.erl
@@ -0,0 +1,117 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_encoding).
+
+
+-export([
+ max/0,
+ encode/1,
+ encode/2,
+ decode/1
+]).
+
+
+-define(NULL, 0).
+-define(FALSE, 1).
+-define(TRUE, 2).
+-define(NUMBER, 3).
+-define(STRING, 4).
+-define(LIST, 5).
+-define(OBJECT, 6).
+-define(MAX, 255).
+
+
+max() ->
+ max_encoding_value.
+
+
+encode(X) ->
+ encode(X, value).
+
+
+encode(X, Type) when Type == key; Type == value ->
+ erlfdb_tuple:pack(encode_int(X, Type)).
+
+
+decode(Encoded) ->
+ Val = erlfdb_tuple:unpack(Encoded),
+ decode_int(Val).
+
+
+encode_int(null, _Type) ->
+ {?NULL};
+
+encode_int(false, _Type) ->
+ {?FALSE};
+
+encode_int(true, _Type) ->
+ {?TRUE};
+
+encode_int(max_encoding_value, _Type) ->
+ {?MAX};
+
+encode_int(Num, key) when is_number(Num) ->
+ {?NUMBER, float(Num)};
+
+encode_int(Num, value) when is_number(Num) ->
+ {?NUMBER, Num};
+
+encode_int(Bin, key) when is_binary(Bin) ->
+ {?STRING, couch_util:get_sort_key(Bin)};
+
+encode_int(Bin, value) when is_binary(Bin) ->
+ {?STRING, Bin};
+
+encode_int(List, Type) when is_list(List) ->
+ Encoded = lists:map(fun(Item) ->
+ encode_int(Item, Type)
+ end, List),
+ {?LIST, list_to_tuple(Encoded)};
+
+encode_int({Props}, Type) when is_list(Props) ->
+ Encoded = lists:map(fun({K, V}) ->
+ EK = encode_int(K, Type),
+ EV = encode_int(V, Type),
+ {EK, EV}
+ end, Props),
+ {?OBJECT, list_to_tuple(Encoded)}.
+
+
+decode_int({?NULL}) ->
+ null;
+
+decode_int({?FALSE}) ->
+ false;
+
+decode_int({?TRUE}) ->
+ true;
+
+decode_int({?MAX}) ->
+ max_encoding_value;
+
+decode_int({?STRING, Bin}) ->
+ Bin;
+
+decode_int({?NUMBER, Num}) ->
+ Num;
+
+decode_int({?LIST, List}) ->
+ lists:map(fun decode_int/1, tuple_to_list(List));
+
+decode_int({?OBJECT, Object}) ->
+ Props = lists:map(fun({EK, EV}) ->
+ K = decode_int(EK),
+ V = decode_int(EV),
+ {K, V}
+ end, tuple_to_list(Object)),
+ {Props}.
diff --git a/src/couch_views/src/couch_views_epi.erl b/src/couch_views/src/couch_views_epi.erl
new file mode 100644
index 000000000..127b09f13
--- /dev/null
+++ b/src/couch_views/src/couch_views_epi.erl
@@ -0,0 +1,60 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-module(couch_views_epi).
+
+
+-behaviour(couch_epi_plugin).
+
+
+-export([
+ app/0,
+ providers/0,
+ services/0,
+ data_subscriptions/0,
+ data_providers/0,
+ processes/0,
+ notify/3
+]).
+
+
+app() ->
+ couch_views.
+
+
+providers() ->
+ [
+ {fabric2_db, couch_views_fabric2_plugin}
+ ].
+
+
+services() ->
+ [
+ {couch_views, couch_views_plugin}
+ ].
+
+
+data_subscriptions() ->
+ [].
+
+
+data_providers() ->
+ [].
+
+
+processes() ->
+ [].
+
+
+notify(_Key, _Old, _New) ->
+ ok.
diff --git a/src/couch_views/src/couch_views_fabric2_plugin.erl b/src/couch_views/src/couch_views_fabric2_plugin.erl
new file mode 100644
index 000000000..cae0e1f75
--- /dev/null
+++ b/src/couch_views/src/couch_views_fabric2_plugin.erl
@@ -0,0 +1,24 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-module(couch_views_fabric2_plugin).
+
+
+-export([
+ after_doc_write/6
+]).
+
+
+after_doc_write(Db, Doc, NewWinner, OldWinner, NewRevId, Seq)->
+ couch_views_updater:index(Db, Doc, NewWinner, OldWinner, NewRevId, Seq),
+ [Db, Doc, NewWinner, OldWinner, NewRevId, Seq].
diff --git a/src/couch_views/src/couch_views_fdb.erl b/src/couch_views/src/couch_views_fdb.erl
new file mode 100644
index 000000000..c95722230
--- /dev/null
+++ b/src/couch_views/src/couch_views_fdb.erl
@@ -0,0 +1,475 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_fdb).
+
+-export([
+ new_interactive_index/3,
+ new_creation_vs/3,
+ get_creation_vs/2,
+ get_build_status/2,
+ set_build_status/3,
+
+ get_update_seq/2,
+ set_update_seq/3,
+
+ get_row_count/3,
+ get_kv_size/3,
+
+ fold_map_idx/6,
+
+ write_doc/4,
+
+ list_signatures/1,
+ clear_index/2
+]).
+
+-ifdef(TEST).
+-compile(export_all).
+-compile(nowarn_export_all).
+-endif.
+
+-define(LIST_VALUE, 0).
+-define(JSON_VALUE, 1).
+-define(VALUE, 2).
+
+
+-include("couch_views.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("fabric/include/fabric2.hrl").
+
+
+new_interactive_index(Db, Mrst, VS) ->
+ couch_views_fdb:new_creation_vs(Db, Mrst, VS),
+ couch_views_fdb:set_build_status(Db, Mrst, ?INDEX_BUILDING).
+
+
+%Interactive View Creation Versionstamp
+%(<db>, ?DB_VIEWS, ?VIEW_INFO, ?VIEW_CREATION_VS, Sig) = VS
+
+new_creation_vs(TxDb, #mrst{} = Mrst, VS) ->
+ #{
+ tx := Tx
+ } = TxDb,
+ Key = creation_vs_key(TxDb, Mrst#mrst.sig),
+ Value = erlfdb_tuple:pack_vs({VS}),
+ ok = erlfdb:set_versionstamped_value(Tx, Key, Value).
+
+
+get_creation_vs(TxDb, #mrst{} = Mrst) ->
+ get_creation_vs(TxDb, Mrst#mrst.sig);
+
+get_creation_vs(TxDb, Sig) ->
+ #{
+ tx := Tx
+ } = TxDb,
+ Key = creation_vs_key(TxDb, Sig),
+ case erlfdb:wait(erlfdb:get(Tx, Key)) of
+ not_found ->
+ not_found;
+ EK ->
+ {VS} = erlfdb_tuple:unpack(EK),
+ VS
+ end.
+
+
+%Interactive View Build Status
+%(<db>, ?DB_VIEWS, ?VIEW_INFO, ?VIEW_BUILD_STATUS, Sig) = INDEX_BUILDING | INDEX_READY
+
+get_build_status(TxDb, #mrst{sig = Sig}) ->
+ #{
+ tx := Tx
+ } = TxDb,
+ Key = build_status_key(TxDb, Sig),
+ erlfdb:wait(erlfdb:get(Tx, Key)).
+
+
+set_build_status(TxDb, #mrst{sig = Sig}, State) ->
+ #{
+ tx := Tx
+ } = TxDb,
+
+ Key = build_status_key(TxDb, Sig),
+ ok = erlfdb:set(Tx, Key, State).
+
+
+% View Build Sequence Access
+% (<db>, ?DB_VIEWS, Sig, ?VIEW_UPDATE_SEQ) = Sequence
+
+
+get_update_seq(TxDb, #mrst{sig = Sig}) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+
+ case erlfdb:wait(erlfdb:get(Tx, seq_key(DbPrefix, Sig))) of
+ not_found -> <<>>;
+ UpdateSeq -> UpdateSeq
+ end.
+
+
+set_update_seq(TxDb, Sig, Seq) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+ ok = erlfdb:set(Tx, seq_key(DbPrefix, Sig), Seq).
+
+
+get_row_count(TxDb, #mrst{sig = Sig}, ViewId) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+
+ case erlfdb:wait(erlfdb:get(Tx, row_count_key(DbPrefix, Sig, ViewId))) of
+ not_found -> 0; % Can this happen?
+ CountBin -> ?bin2uint(CountBin)
+ end.
+
+
+get_kv_size(TxDb, #mrst{sig = Sig}, ViewId) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+
+ case erlfdb:wait(erlfdb:get(Tx, kv_size_key(DbPrefix, Sig, ViewId))) of
+ not_found -> 0; % Can this happen?
+ SizeBin -> ?bin2uint(SizeBin)
+ end.
+
+
+fold_map_idx(TxDb, Sig, ViewId, Options, Callback, Acc0) ->
+ #{
+ db_prefix := DbPrefix
+ } = TxDb,
+
+ MapIdxPrefix = map_idx_prefix(DbPrefix, Sig, ViewId),
+ FoldAcc = #{
+ prefix => MapIdxPrefix,
+ callback => Callback,
+ acc => Acc0
+ },
+ Fun = aegis:wrap_fold_fun(TxDb, fun fold_fwd/2),
+
+ #{
+ acc := Acc1
+ } = fabric2_fdb:fold_range(TxDb, MapIdxPrefix, Fun, FoldAcc, Options),
+
+ Acc1.
+
+
+write_doc(TxDb, Sig, _ViewIds, #{deleted := true} = Doc) ->
+ #{
+ id := DocId
+ } = Doc,
+
+ ExistingViewKeys = get_view_keys(TxDb, Sig, DocId),
+
+ clear_id_idx(TxDb, Sig, DocId),
+ lists:foreach(fun({ViewId, TotalKeys, TotalSize, UniqueKeys}) ->
+ clear_map_idx(TxDb, Sig, ViewId, DocId, UniqueKeys),
+ update_row_count(TxDb, Sig, ViewId, -TotalKeys),
+ update_kv_size(TxDb, Sig, ViewId, -TotalSize)
+ end, ExistingViewKeys);
+
+write_doc(TxDb, Sig, ViewIds, Doc) ->
+ #{
+ id := DocId,
+ results := Results,
+ kv_sizes := KVSizes
+ } = Doc,
+
+ ExistingViewKeys = get_view_keys(TxDb, Sig, DocId),
+
+ clear_id_idx(TxDb, Sig, DocId),
+
+ lists:foreach(fun({ViewId, NewRows, KVSize}) ->
+ update_id_idx(TxDb, Sig, ViewId, DocId, NewRows, KVSize),
+
+ ExistingKeys = case lists:keyfind(ViewId, 1, ExistingViewKeys) of
+ {ViewId, TotalRows, TotalSize, EKeys} ->
+ RowChange = length(NewRows) - TotalRows,
+ update_row_count(TxDb, Sig, ViewId, RowChange),
+ update_kv_size(TxDb, Sig, ViewId, KVSize - TotalSize),
+ EKeys;
+ false ->
+ RowChange = length(NewRows),
+ update_row_count(TxDb, Sig, ViewId, RowChange),
+ update_kv_size(TxDb, Sig, ViewId, KVSize),
+ []
+ end,
+ update_map_idx(TxDb, Sig, ViewId, DocId, ExistingKeys, NewRows)
+ end, lists:zip3(ViewIds, Results, KVSizes)).
+
+
+list_signatures(Db) ->
+ #{
+ db_prefix := DbPrefix
+ } = Db,
+ ViewSeqRange = {?DB_VIEWS, ?VIEW_INFO, ?VIEW_UPDATE_SEQ},
+ RangePrefix = erlfdb_tuple:pack(ViewSeqRange, DbPrefix),
+ fabric2_fdb:fold_range(Db, RangePrefix, fun({Key, _Val}, Acc) ->
+ {Sig} = erlfdb_tuple:unpack(Key, RangePrefix),
+ [Sig | Acc]
+ end, [], []).
+
+
+clear_index(Db, Signature) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = Db,
+
+ % Clear index info keys
+ Keys = [
+ {?DB_VIEWS, ?VIEW_INFO, ?VIEW_UPDATE_SEQ, Signature},
+ {?DB_VIEWS, ?VIEW_INFO, ?VIEW_ROW_COUNT, Signature},
+ {?DB_VIEWS, ?VIEW_INFO, ?VIEW_KV_SIZE, Signature}
+ ],
+ lists:foreach(fun(Key) ->
+ FDBKey = erlfdb_tuple:pack(Key, DbPrefix),
+ erlfdb:clear(Tx, FDBKey)
+ end, Keys),
+
+ % Clear index data
+ RangeTuple = {?DB_VIEWS, ?VIEW_DATA, Signature},
+ RangePrefix = erlfdb_tuple:pack(RangeTuple, DbPrefix),
+ erlfdb:clear_range_startswith(Tx, RangePrefix).
+
+
+% For each row in a map view we store the the key/value
+% in FoundationDB:
+%
+% `(EncodedSortKey, (EncodedKey, EncodedValue))`
+%
+% The difference between `EncodedSortKey` and `EndcodedKey` is
+% the use of `couch_util:get_sort_key/1` which turns UTF-8
+% strings into binaries that are byte comparable. Given a sort
+% key binary we cannot recover the input so to return unmodified
+% user data we are forced to store the original.
+
+fold_fwd({RowKey, PackedKeyValue}, Acc) ->
+ #{
+ prefix := Prefix,
+ callback := UserCallback,
+ acc := UserAcc0
+ } = Acc,
+
+ {{_SortKey, DocId}, _DupeId} =
+ erlfdb_tuple:unpack(RowKey, Prefix),
+
+ {EncodedOriginalKey, EncodedValue} = erlfdb_tuple:unpack(PackedKeyValue),
+ Value = couch_views_encoding:decode(EncodedValue),
+ Key = couch_views_encoding:decode(EncodedOriginalKey),
+
+ UserAcc1 = UserCallback(DocId, Key, Value, UserAcc0),
+
+ Acc#{
+ acc := UserAcc1
+ }.
+
+
+clear_id_idx(TxDb, Sig, DocId) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+
+ {Start, End} = id_idx_range(DbPrefix, Sig, DocId),
+ ok = erlfdb:clear_range(Tx, Start, End).
+
+
+clear_map_idx(TxDb, Sig, ViewId, DocId, ViewKeys) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+
+ lists:foreach(fun(ViewKey) ->
+ {Start, End} = map_idx_range(DbPrefix, Sig, ViewId, ViewKey, DocId),
+ ok = erlfdb:clear_range(Tx, Start, End)
+ end, ViewKeys).
+
+
+update_id_idx(TxDb, Sig, ViewId, DocId, [], _KVSize) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+ Key = id_idx_key(DbPrefix, Sig, DocId, ViewId),
+ ok = erlfdb:clear(Tx, Key);
+
+update_id_idx(TxDb, Sig, ViewId, DocId, NewRows, KVSize) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+
+ Unique = lists:usort([K || {K, _V} <- NewRows]),
+
+ Key = id_idx_key(DbPrefix, Sig, DocId, ViewId),
+ Val = couch_views_encoding:encode([length(NewRows), KVSize, Unique]),
+ ok = erlfdb:set(Tx, Key, aegis:encrypt(TxDb, Key, Val)).
+
+
+update_map_idx(TxDb, Sig, ViewId, DocId, ExistingKeys, NewRows) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+
+ lists:foreach(fun(RemKey) ->
+ {Start, End} = map_idx_range(DbPrefix, Sig, ViewId, RemKey, DocId),
+ ok = erlfdb:clear_range(Tx, Start, End)
+ end, ExistingKeys),
+
+ KVsToAdd = process_rows(NewRows),
+ MapIdxPrefix = map_idx_prefix(DbPrefix, Sig, ViewId),
+
+ lists:foreach(fun({DupeId, Key1, Key2, EV}) ->
+ KK = map_idx_key(MapIdxPrefix, {Key1, DocId}, DupeId),
+ Val = erlfdb_tuple:pack({Key2, EV}),
+ ok = erlfdb:set(Tx, KK, aegis:encrypt(TxDb, KK, Val))
+ end, KVsToAdd).
+
+
+get_view_keys(TxDb, Sig, DocId) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+ {Start, End} = id_idx_range(DbPrefix, Sig, DocId),
+ lists:map(fun({K, V}) ->
+ {?DB_VIEWS, ?VIEW_DATA, Sig, ?VIEW_ID_RANGE, DocId, ViewId} =
+ erlfdb_tuple:unpack(K, DbPrefix),
+ [TotalKeys, TotalSize, UniqueKeys] = couch_views_encoding:decode(V),
+ {ViewId, TotalKeys, TotalSize, UniqueKeys}
+ end, aegis:decrypt(TxDb, erlfdb:get_range(Tx, Start, End, []))).
+
+
+update_row_count(TxDb, Sig, ViewId, Increment) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+ Key = row_count_key(DbPrefix, Sig, ViewId),
+ erlfdb:add(Tx, Key, Increment).
+
+
+update_kv_size(TxDb, Sig, ViewId, Increment) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+
+ % Track a view specific size for calls to
+ % GET /dbname/_design/doc/_info`
+ IdxKey = kv_size_key(DbPrefix, Sig, ViewId),
+ erlfdb:add(Tx, IdxKey, Increment),
+
+ % Track a database level rollup for calls to
+ % GET /dbname
+ DbKey = db_kv_size_key(DbPrefix),
+ erlfdb:add(Tx, DbKey, Increment).
+
+
+seq_key(DbPrefix, Sig) ->
+ Key = {?DB_VIEWS, ?VIEW_INFO, ?VIEW_UPDATE_SEQ, Sig},
+ erlfdb_tuple:pack(Key, DbPrefix).
+
+
+row_count_key(DbPrefix, Sig, ViewId) ->
+ Key = {?DB_VIEWS, ?VIEW_INFO, ?VIEW_ROW_COUNT, Sig, ViewId},
+ erlfdb_tuple:pack(Key, DbPrefix).
+
+
+kv_size_key(DbPrefix, Sig, ViewId) ->
+ Key = {?DB_VIEWS, ?VIEW_INFO, ?VIEW_KV_SIZE, Sig, ViewId},
+ erlfdb_tuple:pack(Key, DbPrefix).
+
+
+db_kv_size_key(DbPrefix) ->
+ Key = {?DB_STATS, <<"sizes">>, <<"views">>},
+ erlfdb_tuple:pack(Key, DbPrefix).
+
+
+id_idx_key(DbPrefix, Sig, DocId, ViewId) ->
+ Key = {?DB_VIEWS, ?VIEW_DATA, Sig, ?VIEW_ID_RANGE, DocId, ViewId},
+ erlfdb_tuple:pack(Key, DbPrefix).
+
+
+id_idx_range(DbPrefix, Sig, DocId) ->
+ Key = {?DB_VIEWS, ?VIEW_DATA, Sig, ?VIEW_ID_RANGE, DocId},
+ erlfdb_tuple:range(Key, DbPrefix).
+
+
+map_idx_prefix(DbPrefix, Sig, ViewId) ->
+ Key = {?DB_VIEWS, ?VIEW_DATA, Sig, ?VIEW_MAP_RANGE, ViewId},
+ erlfdb_tuple:pack(Key, DbPrefix).
+
+
+map_idx_key(MapIdxPrefix, MapKey, DupeId) ->
+ Key = {MapKey, DupeId},
+ erlfdb_tuple:pack(Key, MapIdxPrefix).
+
+
+map_idx_range(DbPrefix, Sig, ViewId, MapKey, DocId) ->
+ Encoded = couch_views_encoding:encode(MapKey, key),
+ Key = {
+ ?DB_VIEWS,
+ ?VIEW_DATA,
+ Sig,
+ ?VIEW_MAP_RANGE,
+ ViewId,
+ {Encoded, DocId}
+ },
+ erlfdb_tuple:range(Key, DbPrefix).
+
+
+creation_vs_key(Db, Sig) ->
+ #{
+ db_prefix := DbPrefix
+ } = Db,
+ Key = {?DB_VIEWS, ?VIEW_INFO, ?VIEW_CREATION_VS, Sig},
+ erlfdb_tuple:pack(Key, DbPrefix).
+
+
+build_status_key(Db, Sig) ->
+ #{
+ db_prefix := DbPrefix
+ } = Db,
+ Key = {?DB_VIEWS, ?VIEW_INFO, ?VIEW_BUILD_STATUS, Sig},
+ erlfdb_tuple:pack(Key, DbPrefix).
+
+
+process_rows(Rows) ->
+ Encoded = lists:map(fun({K, V}) ->
+ EK1 = couch_views_encoding:encode(K, key),
+ EK2 = couch_views_encoding:encode(K, value),
+ EV = couch_views_encoding:encode(V, value),
+ {EK1, EK2, EV}
+ end, Rows),
+
+ Grouped = lists:foldl(fun({K1, K2, V}, Acc) ->
+ dict:append(K1, {K2, V}, Acc)
+ end, dict:new(), Encoded),
+
+ dict:fold(fun(K1, Vals, DAcc) ->
+ Vals1 = lists:keysort(2, Vals),
+ {_, Labeled} = lists:foldl(fun({K2, V}, {Count, Acc}) ->
+ {Count + 1, [{Count, K1, K2, V} | Acc]}
+ end, {0, []}, Vals1),
+ Labeled ++ DAcc
+ end, [], Grouped).
diff --git a/src/couch_views/src/couch_views_http.erl b/src/couch_views/src/couch_views_http.erl
new file mode 100644
index 000000000..e21acfb9f
--- /dev/null
+++ b/src/couch_views/src/couch_views_http.erl
@@ -0,0 +1,359 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_http).
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+
+-export([
+ parse_body_and_query/2,
+ parse_body_and_query/3,
+ parse_params/2,
+ parse_params/4,
+ row_to_obj/1,
+ row_to_obj/2,
+ view_cb/2,
+ paginated/5,
+ paginated/6
+]).
+
+-define(BOOKMARK_VSN, 1).
+
+parse_body_and_query(#httpd{method='POST'} = Req, Keys) ->
+ Props = chttpd:json_body_obj(Req),
+ parse_body_and_query(Req, Props, Keys);
+
+parse_body_and_query(Req, Keys) ->
+ parse_params(chttpd:qs(Req), Keys, #mrargs{keys=Keys, group=undefined,
+ group_level=undefined}, [keep_group_level]).
+
+parse_body_and_query(Req, {Props}, Keys) ->
+ Args = #mrargs{keys=Keys, group=undefined, group_level=undefined},
+ BodyArgs = parse_params(Props, Keys, Args, [decoded]),
+ parse_params(chttpd:qs(Req), Keys, BodyArgs, [keep_group_level]).
+
+parse_params(#httpd{}=Req, Keys) ->
+ parse_params(chttpd:qs(Req), Keys);
+parse_params(Props, Keys) ->
+ Args = #mrargs{},
+ parse_params(Props, Keys, Args).
+
+
+parse_params(Props, Keys, Args) ->
+ parse_params(Props, Keys, Args, []).
+
+
+parse_params([{"bookmark", Bookmark}], _Keys, #mrargs{}, _Options) ->
+ bookmark_decode(Bookmark);
+
+parse_params(Props, Keys, #mrargs{}=Args, Options) ->
+ case couch_util:get_value("bookmark", Props, nil) of
+ nil ->
+ ok;
+ _ ->
+ throw({bad_request, "Cannot use `bookmark` with other options"})
+ end,
+ couch_mrview_http:parse_params(Props, Keys, Args, Options).
+
+
+row_to_obj(Row) ->
+ Id = couch_util:get_value(id, Row),
+ row_to_obj(Id, Row).
+
+
+row_to_obj(Id, Row) ->
+ couch_mrview_http:row_to_obj(Id, Row).
+
+
+view_cb(Msg, #vacc{paginated = false}=Acc) ->
+ couch_mrview_http:view_cb(Msg, Acc);
+view_cb(Msg, #vacc{paginated = true}=Acc) ->
+ paginated_cb(Msg, Acc).
+
+
+paginated_cb({row, Row}, #vacc{buffer=Buf}=Acc) ->
+ {ok, Acc#vacc{buffer = [row_to_obj(Row) | Buf]}};
+
+paginated_cb({error, Reason}, #vacc{}=_Acc) ->
+ throw({error, Reason});
+
+paginated_cb(complete, #vacc{buffer=Buf}=Acc) ->
+ {ok, Acc#vacc{buffer=lists:reverse(Buf)}};
+
+paginated_cb({meta, Meta}, #vacc{}=VAcc) ->
+ MetaMap = lists:foldl(fun(MetaData, Acc) ->
+ case MetaData of
+ {_Key, undefined} ->
+ Acc;
+ {total, _Value} ->
+ %% We set total_rows elsewere
+ Acc;
+ {Key, Value} ->
+ maps:put(list_to_binary(atom_to_list(Key)), Value, Acc)
+ end
+ end, #{}, Meta),
+ {ok, VAcc#vacc{meta=MetaMap}}.
+
+
+paginated(Req, EtagTerm, #mrargs{page_size = PageSize} = Args, KeyFun, Fun) ->
+ Etag = couch_httpd:make_etag(EtagTerm),
+ chttpd:etag_respond(Req, Etag, fun() ->
+ hd(do_paginated(PageSize, [Args], KeyFun, Fun))
+ end).
+
+
+paginated(Req, EtagTerm, PageSize, QueriesArgs, KeyFun, Fun) when is_list(QueriesArgs) ->
+ Etag = couch_httpd:make_etag(EtagTerm),
+ chttpd:etag_respond(Req, Etag, fun() ->
+ Results = do_paginated(PageSize, QueriesArgs, KeyFun, Fun),
+ #{results => Results}
+ end).
+
+
+do_paginated(PageSize, QueriesArgs, KeyFun, Fun) when is_list(QueriesArgs) ->
+ {_N, Results} = lists:foldl(fun(Args0, {Limit, Acc}) ->
+ case Limit > 0 of
+ true ->
+ {OriginalLimit, Args} = set_limit(Args0#mrargs{page_size = Limit}),
+ {Meta, Items} = Fun(Args),
+ Result0 = maybe_add_next_bookmark(
+ OriginalLimit, PageSize, Args, Meta, Items, KeyFun),
+ Result = maybe_add_previous_bookmark(Args, Result0, KeyFun),
+ #{total_rows := Total} = Result,
+ {Limit - Total, [Result | Acc]};
+ false ->
+ Bookmark = bookmark_encode(Args0),
+ Result = #{
+ rows => [],
+ next => Bookmark,
+ total_rows => 0
+ },
+ {Limit, [Result | Acc]}
+ end
+ end, {PageSize, []}, QueriesArgs),
+ lists:reverse(Results).
+
+
+maybe_add_next_bookmark(OriginalLimit, PageSize, Args0, Response, Items, KeyFun) ->
+ #mrargs{
+ page_size = RequestedLimit,
+ extra = Extra0
+ } = Args0,
+ case check_completion(OriginalLimit, RequestedLimit, Items) of
+ {Rows, nil} ->
+ maps:merge(Response, #{
+ rows => Rows,
+ total_rows => length(Rows)
+ });
+ {Rows, Next} ->
+ {FirstId, FirstKey} = first_key(KeyFun, Rows),
+ {NextId, NextKey} = KeyFun(Next),
+ Extra1 = lists:keystore(fid, 1, Extra0, {fid, FirstId}),
+ Extra2 = lists:keystore(fk, 1, Extra1, {fk, FirstKey}),
+ Args = Args0#mrargs{
+ page_size = PageSize,
+ start_key = NextKey,
+ start_key_docid = NextId,
+ extra = Extra2
+ },
+ Bookmark = bookmark_encode(Args),
+ maps:merge(Response, #{
+ rows => Rows,
+ next => Bookmark,
+ total_rows => length(Rows)
+ })
+ end.
+
+
+maybe_add_previous_bookmark(#mrargs{extra = Extra} = Args, #{rows := Rows} = Result, KeyFun) ->
+ StartKey = couch_util:get_value(fk, Extra),
+ StartId = couch_util:get_value(fid, Extra),
+ case {{StartId, StartKey}, first_key(KeyFun, Rows)} of
+ {{undefined, undefined}, {_, _}} ->
+ Result;
+ {{_, _}, {undefined, undefined}} ->
+ Result;
+ {{StartId, _}, {StartId, _}} ->
+ Result;
+ {{undefined, StartKey}, {undefined, StartKey}} ->
+ Result;
+ {{StartId, StartKey}, {EndId, EndKey}} ->
+ Bookmark = bookmark_encode(
+ Args#mrargs{
+ start_key = StartKey,
+ start_key_docid = StartId,
+ end_key = EndKey,
+ end_key_docid = EndId,
+ inclusive_end = false
+ }
+ ),
+ maps:put(previous, Bookmark, Result)
+ end.
+
+
+first_key(_KeyFun, []) ->
+ {undefined, undefined};
+
+first_key(KeyFun, [First | _]) ->
+ KeyFun(First).
+
+
+set_limit(#mrargs{page_size = PageSize, limit = Limit} = Args)
+ when is_integer(PageSize) andalso Limit > PageSize ->
+ {Limit, Args#mrargs{limit = PageSize + 1}};
+
+set_limit(#mrargs{page_size = PageSize, limit = Limit} = Args)
+ when is_integer(PageSize) ->
+ {Limit, Args#mrargs{limit = Limit + 1}}.
+
+
+check_completion(OriginalLimit, RequestedLimit, Items)
+ when is_integer(OriginalLimit) andalso OriginalLimit =< RequestedLimit ->
+ {Rows, _} = split(OriginalLimit, Items),
+ {Rows, nil};
+
+check_completion(_OriginalLimit, RequestedLimit, Items) ->
+ split(RequestedLimit, Items).
+
+
+split(Limit, Items) when length(Items) > Limit ->
+ case lists:split(Limit, Items) of
+ {Head, [NextItem | _]} ->
+ {Head, NextItem};
+ {Head, []} ->
+ {Head, nil}
+ end;
+
+split(_Limit, Items) ->
+ {Items, nil}.
+
+
+bookmark_encode(Args0) ->
+ Defaults = #mrargs{},
+ {RevTerms, Mask, _} = lists:foldl(fun(Value, {Acc, Mask, Idx}) ->
+ case element(Idx, Defaults) of
+ Value ->
+ {Acc, Mask, Idx + 1};
+ _Default when Idx == #mrargs.bookmark ->
+ {Acc, Mask, Idx + 1};
+ _Default ->
+ % Its `(Idx - 1)` because the initial `1`
+ % value already accounts for one bit.
+ {[Value | Acc], (1 bsl (Idx - 1)) bor Mask, Idx + 1}
+ end
+ end, {[], 0, 1}, tuple_to_list(Args0)),
+ Terms = lists:reverse(RevTerms),
+ TermBin = term_to_binary(Terms, [compressed, {minor_version, 2}]),
+ MaskBin = binary:encode_unsigned(Mask),
+ RawBookmark = <<?BOOKMARK_VSN, MaskBin/binary, TermBin/binary>>,
+ couch_util:encodeBase64Url(RawBookmark).
+
+
+bookmark_decode(Bookmark) ->
+ try
+ RawBin = couch_util:decodeBase64Url(Bookmark),
+ <<?BOOKMARK_VSN, MaskBin:4/binary, TermBin/binary>> = RawBin,
+ Mask = binary:decode_unsigned(MaskBin),
+ Index = mask_to_index(Mask, 1, []),
+ Terms = binary_to_term(TermBin, [safe]),
+ lists:foldl(fun({Idx, Value}, Acc) ->
+ setelement(Idx, Acc, Value)
+ end, #mrargs{}, lists:zip(Index, Terms))
+ catch _:_ ->
+ throw({bad_request, <<"Invalid bookmark">>})
+ end.
+
+
+mask_to_index(0, _Pos, Acc) ->
+ lists:reverse(Acc);
+mask_to_index(Mask, Pos, Acc) when is_integer(Mask), Mask > 0 ->
+ NewAcc = case Mask band 1 of
+ 0 -> Acc;
+ 1 -> [Pos | Acc]
+ end,
+ mask_to_index(Mask bsr 1, Pos + 1, NewAcc).
+
+
+-ifdef(TEST).
+
+-include_lib("eunit/include/eunit.hrl").
+
+bookmark_encode_decode_test() ->
+ ?assertEqual(
+ #mrargs{page_size = 5},
+ bookmark_decode(bookmark_encode(#mrargs{page_size = 5}))
+ ),
+
+ Randomized = lists:foldl(fun(Idx, Acc) ->
+ if Idx == #mrargs.bookmark -> Acc; true ->
+ setelement(Idx, Acc, couch_uuids:random())
+ end
+ end, #mrargs{}, lists:seq(1, record_info(size, mrargs))),
+
+ ?assertEqual(
+ Randomized,
+ bookmark_decode(bookmark_encode(Randomized))
+ ).
+
+
+check_completion_test() ->
+ ?assertEqual(
+ {[], nil},
+ check_completion(100, 1, [])
+ ),
+ ?assertEqual(
+ {[1], nil},
+ check_completion(100, 1, [1])
+ ),
+ ?assertEqual(
+ {[1], 2},
+ check_completion(100, 1, [1, 2])
+ ),
+ ?assertEqual(
+ {[1], 2},
+ check_completion(100, 1, [1, 2, 3])
+ ),
+ ?assertEqual(
+ {[1, 2], nil},
+ check_completion(100, 3, [1, 2])
+ ),
+ ?assertEqual(
+ {[1, 2, 3], nil},
+ check_completion(100, 3, [1, 2, 3])
+ ),
+ ?assertEqual(
+ {[1, 2, 3], 4},
+ check_completion(100, 3, [1, 2, 3, 4])
+ ),
+ ?assertEqual(
+ {[1, 2, 3], 4},
+ check_completion(100, 3, [1, 2, 3, 4, 5])
+ ),
+ ?assertEqual(
+ {[1], nil},
+ check_completion(1, 1, [1])
+ ),
+ ?assertEqual(
+ {[1, 2], nil},
+ check_completion(2, 3, [1, 2])
+ ),
+ ?assertEqual(
+ {[1, 2], nil},
+ check_completion(2, 3, [1, 2, 3])
+ ),
+ ?assertEqual(
+ {[1, 2], nil},
+ check_completion(2, 3, [1, 2, 3, 4, 5])
+ ),
+ ok.
+-endif. \ No newline at end of file
diff --git a/src/couch_views/src/couch_views_indexer.erl b/src/couch_views/src/couch_views_indexer.erl
new file mode 100644
index 000000000..9c8be6fca
--- /dev/null
+++ b/src/couch_views/src/couch_views_indexer.erl
@@ -0,0 +1,575 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_indexer).
+
+-export([
+ spawn_link/0
+]).
+
+
+-export([
+ init/0,
+ map_docs/2,
+ write_docs/4
+]).
+
+-ifdef(TEST).
+-compile(export_all).
+-compile(nowarn_export_all).
+-endif.
+
+-include("couch_views.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("fabric/include/fabric2.hrl").
+
+% TODO:
+% * Handle timeouts of transaction and other errors
+
+-define(KEY_SIZE_LIMIT, 8000).
+-define(VALUE_SIZE_LIMIT, 64000).
+
+spawn_link() ->
+ proc_lib:spawn_link(?MODULE, init, []).
+
+
+init() ->
+ Opts = #{no_schedule => true},
+ {ok, Job, Data0} = couch_jobs:accept(?INDEX_JOB_TYPE, Opts),
+
+ couch_views_server:accepted(self()),
+
+ Data = upgrade_data(Data0),
+ #{
+ <<"db_name">> := DbName,
+ <<"db_uuid">> := DbUUID,
+ <<"ddoc_id">> := DDocId,
+ <<"sig">> := JobSig,
+ <<"retries">> := Retries
+ } = Data,
+
+ {ok, Db} = try
+ fabric2_db:open(DbName, [?ADMIN_CTX, {uuid, DbUUID}])
+ catch error:database_does_not_exist ->
+ fail_job(Job, Data, db_deleted, "Database was deleted")
+ end,
+
+ {ok, DDoc} = case fabric2_db:open_doc(Db, DDocId) of
+ {ok, DDoc0} ->
+ {ok, DDoc0};
+ {not_found, _} ->
+ fail_job(Job, Data, ddoc_deleted, "Design document was deleted")
+ end,
+
+ {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+ HexSig = fabric2_util:to_hex(Mrst#mrst.sig),
+
+ if HexSig == JobSig -> ok; true ->
+ fail_job(Job, Data, sig_changed, "Design document was modified")
+ end,
+
+ Limiter = couch_rate:create_if_missing({DbName, DDocId}, "views"),
+
+ State = #{
+ tx_db => undefined,
+ db_uuid => DbUUID,
+ db_seq => undefined,
+ view_seq => undefined,
+ last_seq => undefined,
+ view_vs => undefined,
+ job => Job,
+ job_data => Data,
+ count => 0,
+ changes_done => 0,
+ limiter => Limiter,
+ doc_acc => [],
+ design_opts => Mrst#mrst.design_opts
+ },
+
+ try
+ update(Db, Mrst, State)
+ catch
+ exit:normal ->
+ ok;
+ error:database_does_not_exist ->
+ fail_job(Job, Data, db_deleted, "Database was deleted");
+ Error:Reason ->
+ couch_rate:failure(Limiter),
+ NewRetry = Retries + 1,
+ RetryLimit = retry_limit(),
+
+ case should_retry(NewRetry, RetryLimit, Reason) of
+ true ->
+ DataErr = Data#{<<"retries">> := NewRetry},
+ % Set the last_seq to 0 so that it doesn't trigger a
+ % successful view build for anyone listening to the
+ % couch_views_jobs:wait_for_job
+ % Note this won't cause the view to rebuild from 0 again
+ StateErr = State#{job_data := DataErr, last_seq := <<"0">>},
+ report_progress(StateErr, update);
+ false ->
+ fail_job(Job, Data, Error, Reason)
+ end
+ end.
+
+
+upgrade_data(Data) ->
+ Defaults = [
+ {<<"retries">>, 0},
+ {<<"db_uuid">>, undefined}
+ ],
+ lists:foldl(fun({Key, Default}, Acc) ->
+ case maps:is_key(Key, Acc) of
+ true -> Acc;
+ false -> maps:put(Key, Default, Acc)
+ end
+ end, Data, Defaults),
+ % initialize active task
+ fabric2_active_tasks:update_active_task_info(Data, #{}).
+
+
+% Transaction limit exceeded don't retry
+should_retry(_, _, {erlfdb_error, 2101}) ->
+ false;
+
+should_retry(Retries, RetryLimit, _) when Retries < RetryLimit ->
+ true;
+
+should_retry(_, _, _) ->
+ false.
+
+
+add_error(error, {erlfdb_error, Code}, Data) ->
+ CodeBin = couch_util:to_binary(Code),
+ CodeString = erlfdb:get_error_string(Code),
+ Data#{
+ error => foundationdb_error,
+ reason => list_to_binary([CodeBin, <<"-">>, CodeString])
+ };
+
+add_error(Error, Reason, Data) ->
+ Data#{
+ error => couch_util:to_binary(Error),
+ reason => couch_util:to_binary(Reason)
+ }.
+
+
+update(#{} = Db, Mrst0, State0) ->
+ Limiter = maps:get(limiter, State0),
+ case couch_rate:budget(Limiter) of
+ 0 ->
+ couch_rate:wait(Limiter),
+ update(Db, Mrst0, State0);
+ Limit ->
+ {Mrst1, State1} = do_update(Db, Mrst0, State0#{limit => Limit, limiter => Limiter}),
+ case State1 of
+ finished ->
+ couch_eval:release_map_context(Mrst1#mrst.qserver);
+ _ ->
+ couch_rate:wait(Limiter),
+ update(Db, Mrst1, State1)
+ end
+ end.
+
+
+do_update(Db, Mrst0, State0) ->
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ State1 = get_update_start_state(TxDb, Mrst0, State0),
+
+ {ok, State2} = fold_changes(State1),
+
+ #{
+ count := Count,
+ doc_acc := DocAcc,
+ last_seq := LastSeq,
+ limit := Limit,
+ limiter := Limiter,
+ view_vs := ViewVS,
+ changes_done := ChangesDone0,
+ design_opts := DesignOpts
+ } = State2,
+ DocAcc1 = fetch_docs(TxDb, DesignOpts, DocAcc),
+ couch_rate:in(Limiter, Count),
+
+ {Mrst1, MappedDocs} = map_docs(Mrst0, DocAcc1),
+ WrittenDocs = write_docs(TxDb, Mrst1, MappedDocs, State2),
+
+ ChangesDone = ChangesDone0 + WrittenDocs,
+
+ couch_rate:success(Limiter, WrittenDocs),
+
+ case Count < Limit of
+ true ->
+ maybe_set_build_status(TxDb, Mrst1, ViewVS,
+ ?INDEX_READY),
+ report_progress(State2#{changes_done := ChangesDone},
+ finished),
+ {Mrst1, finished};
+ false ->
+ State3 = report_progress(State2, update),
+ {Mrst1, State3#{
+ tx_db := undefined,
+ count := 0,
+ doc_acc := [],
+ changes_done := ChangesDone,
+ view_seq := LastSeq
+ }}
+ end
+ end).
+
+
+maybe_set_build_status(_TxDb, _Mrst1, not_found, _State) ->
+ ok;
+
+maybe_set_build_status(TxDb, Mrst1, _ViewVS, State) ->
+ couch_views_fdb:set_build_status(TxDb, Mrst1, State).
+
+
+% In the first iteration of update we need
+% to populate our db and view sequences
+get_update_start_state(TxDb, Mrst, #{db_seq := undefined} = State) ->
+ ViewVS = couch_views_fdb:get_creation_vs(TxDb, Mrst),
+ ViewSeq = couch_views_fdb:get_update_seq(TxDb, Mrst),
+
+ State#{
+ tx_db := TxDb,
+ db_seq := fabric2_db:get_update_seq(TxDb),
+ view_vs := ViewVS,
+ view_seq := ViewSeq,
+ last_seq := ViewSeq
+ };
+
+get_update_start_state(TxDb, _Idx, State) ->
+ State#{
+ tx_db := TxDb
+ }.
+
+
+fold_changes(State) ->
+ #{
+ view_seq := SinceSeq,
+ limit := Limit,
+ tx_db := TxDb
+ } = State,
+
+ Fun = fun process_changes/2,
+ Opts = [{limit, Limit}, {restart_tx, false}],
+ fabric2_db:fold_changes(TxDb, SinceSeq, Fun, State, Opts).
+
+
+process_changes(Change, Acc) ->
+ #{
+ doc_acc := DocAcc,
+ count := Count,
+ design_opts := DesignOpts,
+ view_vs := ViewVS
+ } = Acc,
+
+ #{
+ id := Id,
+ sequence := LastSeq
+ } = Change,
+
+ IncludeDesign = lists:keymember(<<"include_design">>, 1, DesignOpts),
+
+ Acc1 = case {Id, IncludeDesign} of
+ {<<?DESIGN_DOC_PREFIX, _/binary>>, false} ->
+ maps:merge(Acc, #{
+ count => Count + 1,
+ last_seq => LastSeq
+ });
+ _ ->
+ Acc#{
+ doc_acc := DocAcc ++ [Change],
+ count := Count + 1,
+ last_seq := LastSeq
+ }
+ end,
+
+ DocVS = fabric2_fdb:seq_to_vs(LastSeq),
+
+ Go = maybe_stop_at_vs(ViewVS, DocVS),
+ {Go, Acc1}.
+
+
+maybe_stop_at_vs({versionstamp, _} = ViewVS, DocVS) when DocVS >= ViewVS ->
+ stop;
+
+maybe_stop_at_vs(_, _) ->
+ ok.
+
+
+map_docs(Mrst, []) ->
+ {Mrst, []};
+
+map_docs(Mrst, Docs) ->
+ % Run all the non deleted docs through the view engine and
+ Mrst1 = start_query_server(Mrst),
+ QServer = Mrst1#mrst.qserver,
+
+ {Deleted0, NotDeleted0} = lists:partition(fun(Doc) ->
+ #{deleted := Deleted} = Doc,
+ Deleted
+ end, Docs),
+
+ Deleted1 = lists:map(fun(Doc) ->
+ Doc#{results => []}
+ end, Deleted0),
+
+ DocsToMap = lists:map(fun(Doc) ->
+ #{doc := DocRec} = Doc,
+ DocRec
+ end, NotDeleted0),
+
+ {ok, AllResults} = couch_eval:map_docs(QServer, DocsToMap),
+
+ % The expanded function head here is making an assertion
+ % that the results match the given doc
+ NotDeleted1 = lists:zipwith(fun(#{id := DocId} = Doc, {DocId, Results}) ->
+ Doc#{results => Results}
+ end, NotDeleted0, AllResults),
+
+ % I'm being a bit careful here resorting the docs
+ % in order of the changes feed. Theoretically this is
+ % unnecessary since we're inside a single transaction.
+ % However, I'm concerned if we ever split this up
+ % into multiple transactions that this detail might
+ % be important but forgotten.
+ MappedDocs = lists:sort(fun(A, B) ->
+ #{sequence := ASeq} = A,
+ #{sequence := BSeq} = B,
+ ASeq =< BSeq
+ end, Deleted1 ++ NotDeleted1),
+
+ {Mrst1, MappedDocs}.
+
+
+write_docs(TxDb, Mrst, Docs, State) ->
+ #mrst{
+ views = Views,
+ sig = Sig
+ } = Mrst,
+
+ #{
+ last_seq := LastSeq
+ } = State,
+
+ ViewIds = [View#mrview.id_num || View <- Views],
+ KeyLimit = key_size_limit(),
+ ValLimit = value_size_limit(),
+
+ DocsNumber = lists:foldl(fun(Doc0, N) ->
+ Doc1 = calculate_kv_sizes(Mrst, Doc0, KeyLimit, ValLimit),
+ couch_views_fdb:write_doc(TxDb, Sig, ViewIds, Doc1),
+ N + 1
+ end, 0, Docs),
+
+ if LastSeq == false -> ok; true ->
+ couch_views_fdb:set_update_seq(TxDb, Sig, LastSeq)
+ end,
+ DocsNumber.
+
+
+fetch_docs(Db, DesignOpts, Changes) ->
+ {Deleted, NotDeleted} = lists:partition(fun(Doc) ->
+ #{deleted := Deleted} = Doc,
+ Deleted
+ end, Changes),
+
+ RevState = lists:foldl(fun(Change, Acc) ->
+ #{id := Id} = Change,
+ RevFuture = fabric2_fdb:get_winning_revs_future(Db, Id, 1),
+ Acc#{
+ RevFuture => {Id, Change}
+ }
+ end, #{}, NotDeleted),
+
+ RevFutures = maps:keys(RevState),
+ BodyState = lists:foldl(fun(RevFuture, Acc) ->
+ {Id, Change} = maps:get(RevFuture, RevState),
+ Revs = fabric2_fdb:get_revs_wait(Db, RevFuture),
+
+ % I'm assuming that in this changes transaction that the winning
+ % doc body exists since it is listed in the changes feed as not deleted
+ #{winner := true} = RevInfo = lists:last(Revs),
+ BodyFuture = fabric2_fdb:get_doc_body_future(Db, Id, RevInfo),
+ Acc#{
+ BodyFuture => {Id, RevInfo, Change}
+ }
+ end, #{}, erlfdb:wait_for_all(RevFutures)),
+
+ AddLocalSeq = fabric2_util:get_value(<<"local_seq">>, DesignOpts, false),
+
+ BodyFutures = maps:keys(BodyState),
+ ChangesWithDocs = lists:map(fun (BodyFuture) ->
+ {Id, RevInfo, Change} = maps:get(BodyFuture, BodyState),
+ Doc = fabric2_fdb:get_doc_body_wait(Db, Id, RevInfo, BodyFuture),
+
+ Doc1 = case maps:get(branch_count, RevInfo, 1) of
+ 1 when AddLocalSeq ->
+ {ok, DocWithLocalSeq} = fabric2_db:apply_open_doc_opts(Doc,
+ [RevInfo], [local_seq]),
+ DocWithLocalSeq;
+ 1 ->
+ Doc;
+ _ ->
+ RevConflicts = fabric2_fdb:get_all_revs(Db, Id),
+ DocOpts = if not AddLocalSeq -> []; true -> [local_seq] end,
+
+ {ok, DocWithConflicts} = fabric2_db:apply_open_doc_opts(Doc,
+ RevConflicts, [conflicts | DocOpts]),
+ DocWithConflicts
+ end,
+ Change#{doc => Doc1}
+ end, erlfdb:wait_for_all(BodyFutures)),
+
+ % This combines the deleted changes with the changes that contain docs
+ % Important to note that this is now unsorted. Which is fine for now
+ % But later could be an issue if we split this across transactions
+ Deleted ++ ChangesWithDocs.
+
+
+start_query_server(#mrst{qserver = nil} = Mrst) ->
+ #mrst{
+ db_name = DbName,
+ idx_name = DDocId,
+ language = Language,
+ sig = Sig,
+ lib = Lib,
+ views = Views
+ } = Mrst,
+ {ok, QServer} = couch_eval:acquire_map_context(
+ DbName,
+ DDocId,
+ Language,
+ Sig,
+ Lib,
+ [View#mrview.def || View <- Views]
+ ),
+ Mrst#mrst{qserver = QServer};
+
+start_query_server(#mrst{} = Mrst) ->
+ Mrst.
+
+
+calculate_kv_sizes(Mrst, Doc, KeyLimit, ValLimit) ->
+ #mrst{
+ db_name = DbName,
+ idx_name = IdxName
+ } = Mrst,
+ #{
+ results := Results
+ } = Doc,
+ try
+ KVSizes = lists:map(fun(ViewRows) ->
+ lists:foldl(fun({K, V}, Acc) ->
+ KeySize = erlang:external_size(K),
+ ValSize = erlang:external_size(V),
+
+ if KeySize =< KeyLimit -> ok; true ->
+ throw({size_error, key})
+ end,
+
+ if ValSize =< ValLimit -> ok; true ->
+ throw({size_error, value})
+ end,
+
+ Acc + KeySize + ValSize
+ end, 0, ViewRows)
+ end, Results),
+ Doc#{kv_sizes => KVSizes}
+ catch throw:{size_error, Type} ->
+ #{id := DocId} = Doc,
+ Fmt = "View ~s size error for docid `~s`, excluded from indexing "
+ "in db `~s` for design doc `~s`",
+ couch_log:error(Fmt, [Type, DocId, DbName, IdxName]),
+ Doc#{deleted := true, results := [], kv_sizes => []}
+ end.
+
+
+report_progress(State, UpdateType) ->
+ #{
+ tx_db := TxDb,
+ job := Job1,
+ job_data := JobData,
+ last_seq := LastSeq,
+ db_seq := DBSeq,
+ changes_done := ChangesDone
+ } = State,
+
+ #{
+ <<"db_name">> := DbName,
+ <<"db_uuid">> := DbUUID,
+ <<"ddoc_id">> := DDocId,
+ <<"sig">> := Sig,
+ <<"retries">> := Retries
+ } = JobData,
+
+ ActiveTasks = fabric2_active_tasks:get_active_task_info(JobData),
+ TotalDone = case maps:get(<<"changes_done">>, ActiveTasks, 0) of
+ 0 -> ChangesDone;
+ N -> N + ChangesDone
+ end,
+
+ NewActiveTasks = couch_views_util:active_tasks_info(TotalDone,
+ DbName, DDocId, LastSeq, DBSeq),
+
+ % Reconstruct from scratch to remove any
+ % possible existing error state.
+ NewData0 = #{
+ <<"db_name">> => DbName,
+ <<"db_uuid">> => DbUUID,
+ <<"ddoc_id">> => DDocId,
+ <<"sig">> => Sig,
+ <<"view_seq">> => LastSeq,
+ <<"retries">> => Retries
+ },
+ NewData = fabric2_active_tasks:update_active_task_info(NewData0,
+ NewActiveTasks),
+
+ case UpdateType of
+ update ->
+ case couch_jobs:update(TxDb, Job1, NewData) of
+ {ok, Job2} ->
+ State#{job := Job2};
+ {error, halt} ->
+ couch_log:error("~s job halted :: ~w", [?MODULE, Job1]),
+ exit(normal)
+ end;
+ finished ->
+ case couch_jobs:finish(TxDb, Job1, NewData) of
+ ok ->
+ State;
+ {error, halt} ->
+ couch_log:error("~s job halted :: ~w", [?MODULE, Job1]),
+ exit(normal)
+ end
+ end.
+
+
+fail_job(Job, Data, Error, Reason) ->
+ NewData = add_error(Error, Reason, Data),
+ couch_jobs:finish(undefined, Job, NewData),
+ exit(normal).
+
+
+retry_limit() ->
+ config:get_integer("couch_views", "retry_limit", 3).
+
+
+key_size_limit() ->
+ config:get_integer("couch_views", "key_size_limit", ?KEY_SIZE_LIMIT).
+
+
+value_size_limit() ->
+ config:get_integer("couch_views", "value_size_limit", ?VALUE_SIZE_LIMIT). \ No newline at end of file
diff --git a/src/couch_views/src/couch_views_jobs.erl b/src/couch_views/src/couch_views_jobs.erl
new file mode 100644
index 000000000..4b0aa2660
--- /dev/null
+++ b/src/couch_views/src/couch_views_jobs.erl
@@ -0,0 +1,163 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_jobs).
+
+-export([
+ set_timeout/0,
+ build_view/3,
+ build_view_async/2,
+ remove/2,
+ job_state/2
+]).
+
+-ifdef(TEST).
+-compile(export_all).
+-compile(nowarn_export_all).
+-endif.
+
+
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+-include("couch_views.hrl").
+
+
+set_timeout() ->
+ couch_jobs:set_type_timeout(?INDEX_JOB_TYPE, 26).
+
+
+build_view(TxDb, Mrst, UpdateSeq) ->
+ {ok, JobId} = build_view_async(TxDb, Mrst),
+ case wait_for_job(JobId, Mrst#mrst.idx_name, UpdateSeq) of
+ ok -> ok;
+ retry -> build_view(TxDb, Mrst, UpdateSeq)
+ end.
+
+
+build_view_async(TxDb0, Mrst) ->
+ JobId = job_id(TxDb0, Mrst),
+ JobData = job_data(TxDb0, Mrst),
+ DbUUID = fabric2_db:get_uuid(TxDb0),
+ TxDb1 = ensure_correct_tx(TxDb0),
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(TxDb1), fun(JTx) ->
+ case couch_jobs:get_job_data(JTx, ?INDEX_JOB_TYPE, JobId) of
+ {error, not_found} ->
+ ok;
+ {ok, #{} = OldJobData} ->
+ case maps:get(<<"db_uuid">>, OldJobData, undefined) of
+ DbUUID -> ok;
+ _ -> couch_jobs:remove(JTx, ?INDEX_JOB_TYPE, JobId)
+ end
+ end,
+ ok = couch_jobs:add(JTx, ?INDEX_JOB_TYPE, JobId, JobData)
+ end),
+ {ok, JobId}.
+
+
+remove(TxDb, Sig) ->
+ DbName = fabric2_db:name(TxDb),
+ JobId = job_id(DbName, Sig),
+ couch_jobs:remove(TxDb, ?INDEX_JOB_TYPE, JobId).
+
+
+job_state(#{} = TxDb, #mrst{} = Mrst) ->
+ JobId = job_id(TxDb, Mrst),
+ couch_jobs:get_job_state(TxDb, ?INDEX_JOB_TYPE, JobId).
+
+
+ensure_correct_tx(#{tx := undefined} = TxDb) ->
+ TxDb;
+
+ensure_correct_tx(#{tx := Tx} = TxDb) ->
+ case erlfdb:is_read_only(Tx) of
+ true -> TxDb#{tx := undefined};
+ false -> TxDb
+ end.
+
+
+wait_for_job(JobId, DDocId, UpdateSeq) ->
+ case couch_jobs:subscribe(?INDEX_JOB_TYPE, JobId) of
+ {ok, Subscription, _State, _Data} ->
+ wait_for_job(JobId, Subscription, DDocId, UpdateSeq);
+ {ok, finished, Data} ->
+ case Data of
+ #{<<"view_seq">> := ViewSeq} when ViewSeq >= UpdateSeq ->
+ ok;
+ _ ->
+ retry
+ end
+ end.
+
+
+wait_for_job(JobId, Subscription, DDocId, UpdateSeq) ->
+ case wait(Subscription) of
+ {not_found, not_found} ->
+ erlang:error(index_not_found);
+ {error, Error} ->
+ erlang:error(Error);
+ {finished, #{<<"error">> := <<"ddoc_deleted">>} = Data} ->
+ case maps:get(<<"ddoc_id">>, Data) of
+ DDocId ->
+ couch_jobs:remove(undefined, ?INDEX_JOB_TYPE, JobId),
+ erlang:error({ddoc_deleted, maps:get(<<"reason">>, Data)});
+ _OtherDocId ->
+ % A different design doc wiht the same signature
+ % was deleted. Resubmit this job which will overwrite
+ % the ddoc_id in the job.
+ retry
+ end;
+ {finished, #{<<"error">> := Error, <<"reason">> := Reason}} ->
+ couch_jobs:remove(undefined, ?INDEX_JOB_TYPE, JobId),
+ erlang:error({binary_to_existing_atom(Error, latin1), Reason});
+ {finished, #{<<"view_seq">> := ViewSeq}} when ViewSeq >= UpdateSeq ->
+ ok;
+ {finished, _} ->
+ wait_for_job(JobId, DDocId, UpdateSeq);
+ {_State, #{<<"view_seq">> := ViewSeq}} when ViewSeq >= UpdateSeq ->
+ couch_jobs:unsubscribe(Subscription),
+ ok;
+ {_, _} ->
+ wait_for_job(JobId, Subscription, DDocId, UpdateSeq)
+ end.
+
+
+job_id(#{name := DbName}, #mrst{sig = Sig}) ->
+ job_id(DbName, Sig);
+
+job_id(DbName, Sig) ->
+ HexSig = fabric2_util:to_hex(Sig),
+ % Put signature first in order to be able to use the no_schedule
+ % couch_jobs:accept/2 option
+ <<HexSig/binary, "-", DbName/binary>>.
+
+
+job_data(Db, Mrst) ->
+ #mrst{
+ idx_name = DDocId,
+ sig = Sig
+ } = Mrst,
+
+ #{
+ db_name => fabric2_db:name(Db),
+ db_uuid => fabric2_db:get_uuid(Db),
+ ddoc_id => DDocId,
+ sig => fabric2_util:to_hex(Sig),
+ retries => 0
+ }.
+
+
+wait(Subscription) ->
+ case couch_jobs:wait(Subscription, infinity) of
+ {?INDEX_JOB_TYPE, _JobId, JobState, JobData} ->
+ {JobState, JobData};
+ timeout ->
+ {error, timeout}
+ end.
diff --git a/src/couch_views/src/couch_views_plugin.erl b/src/couch_views/src/couch_views_plugin.erl
new file mode 100644
index 000000000..f8169179a
--- /dev/null
+++ b/src/couch_views/src/couch_views_plugin.erl
@@ -0,0 +1,40 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-module(couch_views_plugin).
+
+
+-export([
+ after_interactive_write/4
+]).
+
+
+-define(SERVICE_ID, couch_views).
+
+
+after_interactive_write(Db, Mrst, Result, DocNumber) ->
+ with_pipe(after_interactive_write, [Db, Mrst, Result, DocNumber]),
+ ok.
+
+
+%% ------------------------------------------------------------------
+%% Internal Function Definitions
+%% ------------------------------------------------------------------
+
+with_pipe(Func, Args) ->
+ do_apply(Func, Args, [pipe]).
+
+
+do_apply(Func, Args, Opts) ->
+ Handle = couch_epi:get_handle(?SERVICE_ID),
+ couch_epi:apply(Handle, ?SERVICE_ID, Func, Args, Opts). \ No newline at end of file
diff --git a/src/couch_views/src/couch_views_reader.erl b/src/couch_views/src/couch_views_reader.erl
new file mode 100644
index 000000000..61a78d7f8
--- /dev/null
+++ b/src/couch_views/src/couch_views_reader.erl
@@ -0,0 +1,216 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_reader).
+
+-export([
+ read/6
+]).
+
+
+-include("couch_views.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("fabric/include/fabric2.hrl").
+
+
+read(Db, Mrst, ViewName, UserCallback, UserAcc0, Args) ->
+ #mrst{
+ language = Lang,
+ sig = Sig,
+ views = Views
+ } = Mrst,
+
+ ViewId = get_view_id(Lang, Args, ViewName, Views),
+ Fun = fun handle_row/4,
+
+ try
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ Meta = get_meta(TxDb, Mrst, ViewId, Args),
+ UserAcc1 = maybe_stop(UserCallback(Meta, UserAcc0)),
+
+ Acc0 = #{
+ db => TxDb,
+ skip => Args#mrargs.skip,
+ mrargs => undefined,
+ callback => UserCallback,
+ acc => UserAcc1
+ },
+
+ Acc1 = lists:foldl(fun(KeyArgs, KeyAcc0) ->
+ Opts = mrargs_to_fdb_options(KeyArgs),
+ KeyAcc1 = KeyAcc0#{
+ mrargs := KeyArgs
+ },
+ couch_views_fdb:fold_map_idx(
+ TxDb,
+ Sig,
+ ViewId,
+ Opts,
+ Fun,
+ KeyAcc1
+ )
+ end, Acc0, expand_keys_args(Args)),
+
+ #{
+ acc := UserAcc2
+ } = Acc1,
+ {ok, maybe_stop(UserCallback(complete, UserAcc2))}
+ end)
+ catch throw:{done, Out} ->
+ {ok, Out}
+ end.
+
+
+get_meta(TxDb, Mrst, ViewId, #mrargs{update_seq = true}) ->
+ TotalRows = couch_views_fdb:get_row_count(TxDb, Mrst, ViewId),
+ ViewSeq = couch_views_fdb:get_update_seq(TxDb, Mrst),
+ {meta, [{update_seq, ViewSeq}, {total, TotalRows}, {offset, null}]};
+
+get_meta(TxDb, Mrst, ViewId, #mrargs{}) ->
+ TotalRows = couch_views_fdb:get_row_count(TxDb, Mrst, ViewId),
+ {meta, [{total, TotalRows}, {offset, null}]}.
+
+
+handle_row(_DocId, _Key, _Value, #{skip := Skip} = Acc) when Skip > 0 ->
+ Acc#{skip := Skip - 1};
+
+handle_row(DocId, Key, Value, Acc) ->
+ #{
+ db := TxDb,
+ mrargs := Args,
+ callback := UserCallback,
+ acc := UserAcc0
+ } = Acc,
+
+ BaseRow = [
+ {id, DocId},
+ {key, Key},
+ {value, Value}
+ ],
+
+ Row = BaseRow ++ if not Args#mrargs.include_docs -> []; true ->
+ DocOpts0 = Args#mrargs.doc_options,
+ DocOpts1 = DocOpts0 ++ case Args#mrargs.conflicts of
+ true -> [conflicts];
+ _ -> []
+ end,
+
+ {TargetDocId, Rev} = get_doc_id(DocId, Value),
+ DocObj = load_doc(TxDb, TargetDocId, Rev, DocOpts1),
+ [{doc, DocObj}]
+ end,
+
+ UserAcc1 = maybe_stop(UserCallback({row, Row}, UserAcc0)),
+ Acc#{acc := UserAcc1}.
+
+
+get_view_id(Lang, Args, ViewName, Views) ->
+ case couch_mrview_util:extract_view(Lang, Args, ViewName, Views) of
+ {map, View, _Args} -> View#mrview.id_num;
+ {red, {_Idx, _Lang, View}} -> View#mrview.id_num
+ end.
+
+
+expand_keys_args(#mrargs{keys = undefined} = Args) ->
+ [Args];
+
+expand_keys_args(#mrargs{keys = Keys} = Args) ->
+ lists:map(fun(Key) ->
+ Args#mrargs{
+ start_key = Key,
+ end_key = Key
+ }
+ end, Keys).
+
+
+mrargs_to_fdb_options(Args) ->
+ #mrargs{
+ start_key = StartKey0,
+ start_key_docid = StartKeyDocId,
+ end_key = EndKey0,
+ end_key_docid = EndKeyDocId,
+ direction = Direction,
+ limit = Limit,
+ skip = Skip,
+ inclusive_end = InclusiveEnd
+ } = Args,
+
+ StartKey1 = if StartKey0 == undefined -> undefined; true ->
+ couch_views_encoding:encode(StartKey0, key)
+ end,
+
+ StartKeyOpts = case {StartKey1, StartKeyDocId} of
+ {undefined, _} ->
+ [];
+ {StartKey1, StartKeyDocId} ->
+ [{start_key, {StartKey1, StartKeyDocId}}]
+ end,
+
+ EndKey1 = if EndKey0 == undefined -> undefined; true ->
+ couch_views_encoding:encode(EndKey0, key)
+ end,
+
+ EndKeyOpts = case {EndKey1, EndKeyDocId, Direction} of
+ {undefined, _, _} ->
+ [];
+ {EndKey1, <<>>, rev} when not InclusiveEnd ->
+ % When we iterate in reverse with
+ % inclusive_end=false we have to set the
+ % EndKeyDocId to <<255>> so that we don't
+ % include matching rows.
+ [{end_key_gt, {EndKey1, <<255>>}}];
+ {EndKey1, <<255>>, _} when not InclusiveEnd ->
+ % When inclusive_end=false we need to
+ % elide the default end_key_docid so as
+ % to not sort past the docids with the
+ % given end key.
+ [{end_key_gt, {EndKey1}}];
+ {EndKey1, EndKeyDocId, _} when not InclusiveEnd ->
+ [{end_key_gt, {EndKey1, EndKeyDocId}}];
+ {EndKey1, EndKeyDocId, _} when InclusiveEnd ->
+ [{end_key, {EndKey1, EndKeyDocId}}]
+ end,
+
+ [
+ {dir, Direction},
+ {limit, Limit + Skip},
+ {streaming_mode, want_all},
+ {restart_tx, true}
+ ] ++ StartKeyOpts ++ EndKeyOpts.
+
+
+maybe_stop({ok, Acc}) -> Acc;
+maybe_stop({stop, Acc}) -> throw({done, Acc}).
+
+
+get_doc_id(Id, {Props}) ->
+ DocId = couch_util:get_value(<<"_id">>, Props, Id),
+ Rev = couch_util:get_value(<<"_rev">>, Props, null),
+ {DocId, Rev};
+
+get_doc_id(Id, _Value) ->
+ {Id, null}.
+
+
+load_doc(TxDb, Id, null, DocOpts) ->
+ case fabric2_db:open_doc(TxDb, Id, DocOpts) of
+ {ok, Doc} -> couch_doc:to_json_obj(Doc, DocOpts);
+ {not_found, _} -> null
+ end;
+
+load_doc(TxDb, Id, Rev, DocOpts) ->
+ Rev1 = couch_doc:parse_rev(Rev),
+ case fabric2_db:open_doc_revs(TxDb, Id, [Rev1], DocOpts) of
+ {ok, [{ok, Doc}]} -> couch_doc:to_json_obj(Doc, DocOpts);
+ {ok, [_Else]} -> null
+ end.
diff --git a/src/couch_views/src/couch_views_server.erl b/src/couch_views/src/couch_views_server.erl
new file mode 100644
index 000000000..71a4abb8d
--- /dev/null
+++ b/src/couch_views/src/couch_views_server.erl
@@ -0,0 +1,176 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_server).
+
+
+-behaviour(gen_server).
+
+
+-export([
+ start_link/0
+]).
+
+-export([
+ accepted/1
+]).
+
+-export([
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3,
+ format_status/2
+]).
+
+-define(MAX_ACCEPTORS, 5).
+-define(MAX_WORKERS, 100).
+
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+accepted(Worker) when is_pid(Worker) ->
+ gen_server:call(?MODULE, {accepted, Worker}, infinity).
+
+
+init(_) ->
+ process_flag(trap_exit, true),
+ couch_views_jobs:set_timeout(),
+ St = #{
+ acceptors => #{},
+ workers => #{},
+ max_acceptors => max_acceptors(),
+ max_workers => max_workers()
+ },
+ {ok, spawn_acceptors(St)}.
+
+
+terminate(_, _St) ->
+ ok.
+
+
+handle_call({accepted, Pid}, _From, St) ->
+ #{
+ acceptors := Acceptors,
+ workers := Workers
+ } = St,
+ case maps:is_key(Pid, Acceptors) of
+ true ->
+ St1 = St#{
+ acceptors := maps:remove(Pid, Acceptors),
+ workers := Workers#{Pid => true}
+ },
+ {reply, ok, spawn_acceptors(St1)};
+ false ->
+ LogMsg = "~p : unknown acceptor processs ~p",
+ couch_log:error(LogMsg, [?MODULE, Pid]),
+ {stop, {unknown_acceptor_pid, Pid}, St}
+ end;
+
+handle_call(Msg, _From, St) ->
+ {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(Msg, St) ->
+ {stop, {bad_cast, Msg}, St}.
+
+
+handle_info({'EXIT', Pid, Reason}, St) ->
+ #{
+ acceptors := Acceptors,
+ workers := Workers
+ } = St,
+
+ % In Erlang 21+ could check map keys directly in the function head
+ case {maps:is_key(Pid, Acceptors), maps:is_key(Pid, Workers)} of
+ {true, false} -> handle_acceptor_exit(St, Pid, Reason);
+ {false, true} -> handle_worker_exit(St, Pid, Reason);
+ {false, false} -> handle_unknown_exit(St, Pid, Reason)
+ end;
+
+handle_info(Msg, St) ->
+ {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+ {ok, St}.
+
+
+format_status(_Opt, [_PDict, State]) ->
+ #{
+ workers := Workers,
+ acceptors := Acceptors
+ } = State,
+ Scrubbed = State#{
+ workers => {map_size, maps:size(Workers)},
+ acceptors => {map_size, maps:size(Acceptors)}
+ },
+ [{data, [{"State",
+ Scrubbed
+ }]}].
+
+
+% Worker process exit handlers
+
+handle_acceptor_exit(#{acceptors := Acceptors} = St, Pid, Reason) ->
+ St1 = St#{acceptors := maps:remove(Pid, Acceptors)},
+ LogMsg = "~p : acceptor process ~p exited with ~p",
+ couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
+ {noreply, spawn_acceptors(St1)}.
+
+
+handle_worker_exit(#{workers := Workers} = St, Pid, normal) ->
+ St1 = St#{workers := maps:remove(Pid, Workers)},
+ {noreply, spawn_acceptors(St1)};
+
+handle_worker_exit(#{workers := Workers} = St, Pid, Reason) ->
+ St1 = St#{workers := maps:remove(Pid, Workers)},
+ LogMsg = "~p : indexer process ~p exited with ~p",
+ couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
+ {noreply, spawn_acceptors(St1)}.
+
+
+handle_unknown_exit(St, Pid, Reason) ->
+ LogMsg = "~p : unknown process ~p exited with ~p",
+ couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
+ {stop, {unknown_pid_exit, Pid}, St}.
+
+
+spawn_acceptors(St) ->
+ #{
+ workers := Workers,
+ acceptors := Acceptors,
+ max_acceptors := MaxAcceptors,
+ max_workers := MaxWorkers
+ } = St,
+ ACnt = maps:size(Acceptors),
+ WCnt = maps:size(Workers),
+ case ACnt < MaxAcceptors andalso (ACnt + WCnt) < MaxWorkers of
+ true ->
+ Pid = couch_views_indexer:spawn_link(),
+ NewSt = St#{acceptors := Acceptors#{Pid => true}},
+ spawn_acceptors(NewSt);
+ false ->
+ St
+ end.
+
+
+max_acceptors() ->
+ config:get_integer("couch_views", "max_acceptors", ?MAX_ACCEPTORS).
+
+
+max_workers() ->
+ config:get_integer("couch_views", "max_workers", ?MAX_WORKERS).
diff --git a/src/couch_views/src/couch_views_sup.erl b/src/couch_views/src/couch_views_sup.erl
new file mode 100644
index 000000000..94531893d
--- /dev/null
+++ b/src/couch_views/src/couch_views_sup.erl
@@ -0,0 +1,66 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-module(couch_views_sup).
+
+
+-behaviour(supervisor).
+
+
+-export([
+ start_link/0
+]).
+
+
+-export([
+ init/1
+]).
+
+
+start_link() ->
+ ok = register_views_index(),
+ Arg = case fabric2_node_types:is_type(view_indexing) of
+ true -> normal;
+ false -> builds_disabled
+ end,
+ supervisor:start_link({local, ?MODULE}, ?MODULE, Arg).
+
+
+init(normal) ->
+ Children = [
+ #{
+ id => couch_views_server,
+ start => {couch_views_server, start_link, []}
+ }
+ ] ++ couch_epi:register_service(couch_views_epi, []),
+ {ok, {flags(), Children}};
+
+init(builds_disabled) ->
+ couch_log:notice("~p : view_indexing disabled", [?MODULE]),
+ couch_views_jobs:set_timeout(),
+ {ok, {flags(), []}}.
+
+
+register_views_index() ->
+ case fabric2_node_types:is_type(api_frontend) of
+ true -> fabric2_index:register_index(couch_views);
+ false -> ok
+ end.
+
+
+flags() ->
+ #{
+ strategy => one_for_one,
+ intensity => 1,
+ period => 5
+ }.
diff --git a/src/couch_views/src/couch_views_updater.erl b/src/couch_views/src/couch_views_updater.erl
new file mode 100644
index 000000000..ba9fadb51
--- /dev/null
+++ b/src/couch_views/src/couch_views_updater.erl
@@ -0,0 +1,110 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+-module(couch_views_updater).
+
+-export([
+ index/6
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+
+% If the doc revision doesn't not match the NewRevId passed here we can ignore
+% the document since it is then a conflict document and it doesn't need
+% to be indexed.
+index(Db, #doc{id = Id, revs = Revs} = Doc, _NewWinner, _OldWinner, NewRevId,
+ Seq) ->
+ try
+ {Depth, [FirstRev | _]} = Revs,
+ DocRev = {Depth, FirstRev},
+ if DocRev /= NewRevId -> ok; true ->
+ index_int(Db, Doc, Seq)
+ end
+ catch
+ error:{erlfdb_error, ErrCode} when is_integer(ErrCode) ->
+ Stack = erlang:get_stacktrace(),
+ DbName = fabric2_db:name(Db),
+ couch_log:error("Mango index erlfdb error Db ~s Doc ~p ~p",
+ [DbName, Id, ErrCode]),
+ erlang:raise(error, {erlfdb_error, ErrCode}, Stack);
+ Error:Reason ->
+ DbName = fabric2_db:name(Db),
+ couch_log:error("Mango index error for Db ~s Doc ~p ~p ~p",
+ [DbName, Id, Error, Reason])
+ end.
+
+
+% Check if design doc is an interactive index and kick off background worker
+% to build the new index up to the creation_vs
+index_int(Db, #doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>,
+ deleted = false} = DDoc, Seq) ->
+ DbName = fabric2_db:name(Db),
+
+ case couch_views_ddoc:is_interactive(DDoc) of
+ true ->
+ {ok, Mrst} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
+ case couch_views_fdb:get_creation_vs(Db, Mrst) of
+ not_found ->
+ couch_views_fdb:new_interactive_index(Db, Mrst, Seq),
+ {ok, _} = couch_views_jobs:build_view_async(Db, Mrst);
+ _ ->
+ ok
+ end;
+ false ->
+ ok
+ end,
+ write_doc(Db, DDoc);
+
+
+index_int(Db, #doc{} = Doc, _Seq) ->
+ write_doc(Db, Doc).
+
+
+write_doc(Db, #doc{deleted = Deleted} = Doc) ->
+ DbName = fabric2_db:name(Db),
+ DDocs = couch_views_ddoc:get_interactive_list(Db),
+
+ Result0 = [#{
+ id => Doc#doc.id,
+ results => [],
+ deleted => Deleted,
+ doc => Doc
+ }],
+
+ %% Interactive updates do not update the views update_seq
+ State = #{
+ last_seq => false
+ },
+
+ lists:foreach(fun(DDoc) ->
+ {ok, Mrst} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
+
+ case should_index_doc(Doc, Mrst) of
+ true ->
+ {Mrst1, Result1} = couch_views_indexer:map_docs(Mrst, Result0),
+ DocNumber = couch_views_indexer:write_docs(Db, Mrst1,
+ Result1, State),
+ couch_views_plugin:after_interactive_write(Db, Mrst1,
+ Result1, DocNumber),
+ couch_eval:release_map_context(Mrst1#mrst.qserver);
+ false ->
+ ok
+ end
+ end, DDocs).
+
+
+should_index_doc(<<?DESIGN_DOC_PREFIX, _/binary>>, Mrst) ->
+ lists:keymember(<<"include_design">>, 1, Mrst#mrst.design_opts);
+
+should_index_doc(_, _) ->
+ true.
diff --git a/src/couch_views/src/couch_views_util.erl b/src/couch_views/src/couch_views_util.erl
new file mode 100644
index 000000000..6298acf33
--- /dev/null
+++ b/src/couch_views/src/couch_views_util.erl
@@ -0,0 +1,305 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_util).
+
+
+-export([
+ ddoc_to_mrst/2,
+ validate_args/1,
+ validate_args/2,
+ is_paginated/1,
+ active_tasks_info/5
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+-include("couch_views.hrl").
+
+
+ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
+ MakeDict = fun({Name, {MRFuns}}, DictBySrcAcc) ->
+ case couch_util:get_value(<<"map">>, MRFuns) of
+ MapSrc when MapSrc /= undefined ->
+ RedSrc = couch_util:get_value(<<"reduce">>, MRFuns, null),
+ {ViewOpts} = couch_util:get_value(<<"options">>, MRFuns, {[]}),
+ View = case dict:find({MapSrc, ViewOpts}, DictBySrcAcc) of
+ {ok, View0} -> View0;
+ error -> #mrview{def=MapSrc, options=ViewOpts}
+ end,
+ {MapNames, RedSrcs} = case RedSrc of
+ null ->
+ MNames = [Name | View#mrview.map_names],
+ {MNames, View#mrview.reduce_funs};
+ _ ->
+ RedFuns = [{Name, RedSrc} | View#mrview.reduce_funs],
+ {View#mrview.map_names, RedFuns}
+ end,
+ View2 = View#mrview{map_names=MapNames, reduce_funs=RedSrcs},
+ dict:store({MapSrc, ViewOpts}, View2, DictBySrcAcc);
+ undefined ->
+ DictBySrcAcc
+ end;
+ ({Name, Else}, DictBySrcAcc) ->
+ couch_log:error("design_doc_to_view_group ~s views ~p",
+ [Name, Else]),
+ DictBySrcAcc
+ end,
+ {DesignOpts} = proplists:get_value(<<"options">>, Fields, {[]}),
+ Partitioned = proplists:get_value(<<"partitioned">>, DesignOpts, false),
+
+ {RawViews} = couch_util:get_value(<<"views">>, Fields, {[]}),
+ BySrc = lists:foldl(MakeDict, dict:new(), RawViews),
+
+ NumViews = fun({_, View}, N) ->
+ {View#mrview{id_num = N}, N+1}
+ end,
+ {Views, _} = lists:mapfoldl(NumViews, 0, lists:sort(dict:to_list(BySrc))),
+
+ Language = couch_util:get_value(<<"language">>, Fields, <<"javascript">>),
+ Lib = couch_util:get_value(<<"lib">>, RawViews, {[]}),
+
+ IdxState = #mrst{
+ db_name=DbName,
+ idx_name=Id,
+ lib=Lib,
+ views=Views,
+ language=Language,
+ design_opts=DesignOpts,
+ partitioned=Partitioned
+ },
+ SigInfo = {Views, Language, DesignOpts, couch_index_util:sort_lib(Lib)},
+ {ok, IdxState#mrst{sig=couch_hash:md5_hash(term_to_binary(SigInfo))}}.
+
+
+validate_args(Args) ->
+ validate_args(Args, []).
+
+
+% This is mostly a copy of couch_mrview_util:validate_args/1 but it doesn't
+% update start / end keys and also throws a not_implemented error for reduce
+%
+validate_args(#mrargs{} = Args, Opts) ->
+ GroupLevel = determine_group_level(Args),
+ Reduce = Args#mrargs.reduce,
+
+ case Reduce == undefined orelse is_boolean(Reduce) of
+ true -> ok;
+ _ -> mrverror(<<"Invalid `reduce` value.">>)
+ end,
+
+ case {Args#mrargs.view_type, Reduce} of
+ {map, true} -> mrverror(<<"Reduce is invalid for map-only views.">>);
+ _ -> ok
+ end,
+
+ case {Args#mrargs.view_type, GroupLevel, Args#mrargs.keys} of
+ {red, exact, _} -> ok;
+ {red, _, KeyList} when is_list(KeyList) ->
+ Msg = <<"Multi-key fetchs for reduce views must use `group=true`">>,
+ mrverror(Msg);
+ _ -> ok
+ end,
+
+ case Args#mrargs.keys of
+ Keys when is_list(Keys) -> ok;
+ undefined -> ok;
+ _ -> mrverror(<<"`keys` must be an array of strings.">>)
+ end,
+
+ case {Args#mrargs.keys, Args#mrargs.start_key,
+ Args#mrargs.end_key} of
+ {undefined, _, _} -> ok;
+ {[], _, _} -> ok;
+ {[_|_], undefined, undefined} -> ok;
+ _ -> mrverror(<<"`keys` is incompatible with `key`"
+ ", `start_key` and `end_key`">>)
+ end,
+
+ case Args#mrargs.start_key_docid of
+ undefined -> ok;
+ SKDocId0 when is_binary(SKDocId0) -> ok;
+ _ -> mrverror(<<"`start_key_docid` must be a string.">>)
+ end,
+
+ case Args#mrargs.end_key_docid of
+ undefined -> ok;
+ EKDocId0 when is_binary(EKDocId0) -> ok;
+ _ -> mrverror(<<"`end_key_docid` must be a string.">>)
+ end,
+
+ case Args#mrargs.direction of
+ fwd -> ok;
+ rev -> ok;
+ _ -> mrverror(<<"Invalid direction.">>)
+ end,
+
+ case {Args#mrargs.limit >= 0, Args#mrargs.limit == undefined} of
+ {true, _} -> ok;
+ {_, true} -> ok;
+ _ -> mrverror(<<"`limit` must be a positive integer.">>)
+ end,
+
+ case Args#mrargs.skip < 0 of
+ true -> mrverror(<<"`skip` must be >= 0">>);
+ _ -> ok
+ end,
+
+ case {Args#mrargs.view_type, GroupLevel} of
+ {red, exact} -> ok;
+ {_, 0} -> ok;
+ {red, Int} when is_integer(Int), Int >= 0 -> ok;
+ {red, _} -> mrverror(<<"`group_level` must be >= 0">>);
+ {map, _} -> mrverror(<<"Invalid use of grouping on a map view.">>)
+ end,
+
+ case Args#mrargs.stable of
+ true -> ok;
+ false -> ok;
+ _ -> mrverror(<<"Invalid value for `stable`.">>)
+ end,
+
+ case Args#mrargs.update of
+ true -> ok;
+ false -> ok;
+ lazy -> ok;
+ _ -> mrverror(<<"Invalid value for `update`.">>)
+ end,
+
+ case is_boolean(Args#mrargs.inclusive_end) of
+ true -> ok;
+ _ -> mrverror(<<"Invalid value for `inclusive_end`.">>)
+ end,
+
+ case {Args#mrargs.view_type, Args#mrargs.include_docs} of
+ {red, true} -> mrverror(<<"`include_docs` is invalid for reduce">>);
+ {_, ID} when is_boolean(ID) -> ok;
+ _ -> mrverror(<<"Invalid value for `include_docs`">>)
+ end,
+
+ case {Args#mrargs.view_type, Args#mrargs.conflicts} of
+ {_, undefined} -> ok;
+ {map, V} when is_boolean(V) -> ok;
+ {red, undefined} -> ok;
+ {map, _} -> mrverror(<<"Invalid value for `conflicts`.">>);
+ {red, _} -> mrverror(<<"`conflicts` is invalid for reduce views.">>)
+ end,
+
+ case is_boolean(Args#mrargs.sorted) of
+ true -> ok;
+ _ -> mrverror(<<"Invalid value for `sorted`.">>)
+ end,
+
+ MaxPageSize = couch_util:get_value(page_size, Opts, 0),
+ case {Args#mrargs.page_size, MaxPageSize} of
+ {_, 0} -> ok;
+ {Value, _} -> validate_limit(<<"page_size">>, Value, 1, MaxPageSize)
+ end,
+
+ case {Args#mrargs.skip, MaxPageSize} of
+ {_, 0} -> ok;
+ {Skip, _} -> validate_limit(<<"skip">>, Skip, 0, MaxPageSize)
+ end,
+
+ case {is_list(Args#mrargs.keys), is_integer(Args#mrargs.page_size)} of
+ {true, true} ->
+ mrverror(<<"`page_size` is incompatible with `keys`">>);
+ _ ->
+ ok
+ end,
+
+ case {Reduce, Args#mrargs.view_type} of
+ {false, _} -> ok;
+ {_, red} -> throw(not_implemented);
+ _ -> ok
+ end,
+
+ Args#mrargs{group_level=GroupLevel}.
+
+validate_limit(Name, Value, _Min, _Max) when not is_integer(Value) ->
+ mrverror(<<"`", Name/binary, "` should be an integer">>);
+
+validate_limit(Name, Value, Min, Max) when Value > Max ->
+ range_error_msg(Name, Min, Max);
+
+validate_limit(Name, Value, Min, Max) when Value < Min ->
+ range_error_msg(Name, Min, Max);
+
+validate_limit(_Name, _Value, _Min, _Max) ->
+ ok.
+
+range_error_msg(Name, Min, Max) ->
+ MinBin = list_to_binary(integer_to_list(Min)),
+ MaxBin = list_to_binary(integer_to_list(Max)),
+ mrverror(<<
+ "`",
+ Name/binary,
+ "` should be an integer in range [",
+ MinBin/binary,
+ " .. ",
+ MaxBin/binary,
+ "]"
+ >>).
+
+
+determine_group_level(#mrargs{group=undefined, group_level=undefined}) ->
+ 0;
+
+determine_group_level(#mrargs{group=false, group_level=undefined}) ->
+ 0;
+
+determine_group_level(#mrargs{group=false, group_level=Level}) when Level > 0 ->
+ mrverror(<<"Can't specify group=false and group_level>0 at the same time">>);
+
+determine_group_level(#mrargs{group=true, group_level=undefined}) ->
+ exact;
+
+determine_group_level(#mrargs{group_level=GroupLevel}) ->
+ GroupLevel.
+
+
+mrverror(Mesg) ->
+ throw({query_parse_error, Mesg}).
+
+
+is_paginated(#mrargs{page_size = PageSize}) when is_integer(PageSize) ->
+ true;
+
+is_paginated(_) ->
+ false.
+
+
+active_tasks_info(ChangesDone, DbName, DDocId, LastSeq, DBSeq) ->
+ #{
+ <<"type">> => <<"indexer">>,
+ <<"database">> => DbName,
+ <<"changes_done">> => ChangesDone,
+ <<"design_document">> => DDocId,
+ <<"current_version_stamp">> => convert_seq_to_stamp(LastSeq),
+ <<"db_version_stamp">> => convert_seq_to_stamp(DBSeq),
+ <<"node">> => erlang:atom_to_binary(node(), utf8),
+ <<"pid">> => list_to_binary(pid_to_list(self()))
+ }.
+
+
+convert_seq_to_stamp(<<"0">>) ->
+ <<"0-0-0">>;
+
+convert_seq_to_stamp(undefined) ->
+ <<"0-0-0">>;
+
+convert_seq_to_stamp(Seq) ->
+ {_, Stamp, Batch, DocNumber} = fabric2_fdb:seq_to_vs(Seq),
+ VS = integer_to_list(Stamp) ++ "-" ++ integer_to_list(Batch) ++ "-"
+ ++ integer_to_list(DocNumber),
+ list_to_binary(VS).
diff --git a/src/couch_views/test/couch_views_active_tasks_test.erl b/src/couch_views/test/couch_views_active_tasks_test.erl
new file mode 100644
index 000000000..c782ffcbd
--- /dev/null
+++ b/src/couch_views/test/couch_views_active_tasks_test.erl
@@ -0,0 +1,168 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_active_tasks_test).
+
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
+-include_lib("fabric/test/fabric2_test.hrl").
+
+
+-define(MAP_FUN1, <<"map_fun1">>).
+-define(MAP_FUN2, <<"map_fun2">>).
+-define(INDEX_FOO, <<"_design/foo">>).
+-define(INDEX_BAR, <<"_design/bar">>).
+-define(TOTAL_DOCS, 1000).
+
+
+setup() ->
+ Ctx = test_util:start_couch([
+ fabric,
+ couch_jobs,
+ couch_js,
+ couch_views
+ ]),
+ Ctx.
+
+
+cleanup(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+
+foreach_setup() ->
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+
+ DDoc = create_ddoc(?INDEX_FOO, ?MAP_FUN1),
+ Docs = make_docs(?TOTAL_DOCS),
+ fabric2_db:update_docs(Db, [DDoc | Docs]),
+
+ {Db, DDoc}.
+
+
+foreach_teardown({Db, _}) ->
+ meck:unload(),
+ ok = fabric2_db:delete(fabric2_db:name(Db), []).
+
+
+active_tasks_test_() ->
+ {
+ "Active Tasks test",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ {
+ foreach,
+ fun foreach_setup/0,
+ fun foreach_teardown/1,
+ [
+ ?TDEF_FE(verify_basic_active_tasks),
+ ?TDEF_FE(verify_muliple_active_tasks)
+ ]
+ }
+ }
+ }.
+
+
+verify_basic_active_tasks({Db, DDoc}) ->
+ pause_indexer_for_changes(self()),
+ couch_views:build_indices(Db, [DDoc]),
+ {IndexerPid, {changes_done, ChangesDone}} = wait_to_reach_changes(10000),
+ [ActiveTask] = fabric2_active_tasks:get_active_tasks(),
+ ChangesDone1 = maps:get(<<"changes_done">>, ActiveTask),
+ Type = maps:get(<<"type">>, ActiveTask),
+ DbName = maps:get(<<"database">>, ActiveTask),
+ DDocId = maps:get(<<"design_document">>, ActiveTask),
+ Node = maps:get(<<"node">>, ActiveTask),
+ PidBin = maps:get(<<"pid">>, ActiveTask),
+ Pid = erlang:list_to_pid(binary_to_list(PidBin)),
+ ?assertEqual(<<"indexer">>, Type),
+ ?assertEqual(fabric2_db:name(Db), DbName),
+ ?assertEqual(?INDEX_FOO, DDocId),
+ ?assertEqual(atom_to_binary(node(), utf8), Node),
+ ?assert(is_pid(Pid)),
+ ?assert(is_process_alive(Pid)),
+ ?assertEqual(IndexerPid, Pid),
+ IndexerPid ! continue,
+ % we assume the indexer has run for a bit so it has to > 0
+ ?assert(ChangesDone1 > 0),
+ ?assert(ChangesDone1 =< ChangesDone),
+ ?assertEqual(ChangesDone, ?TOTAL_DOCS).
+
+
+verify_muliple_active_tasks({Db, DDoc}) ->
+ DDoc2 = create_ddoc(?INDEX_BAR, ?MAP_FUN2),
+ fabric2_db:update_doc(Db, DDoc2, []),
+ pause_indexer_for_changes(self()),
+ couch_views:build_indices(Db, [DDoc, DDoc2]),
+
+ {IndexerPid, {changes_done, ChangesDone}} = wait_to_reach_changes(10000),
+ {IndexerPid2, {changes_done, ChangesDone2}} = wait_to_reach_changes(10000),
+
+ ActiveTasks = fabric2_active_tasks:get_active_tasks(),
+
+ ?assertEqual(length(ActiveTasks), 2),
+
+ IndexerPid ! continue,
+ IndexerPid2 ! continue,
+
+ ?assertEqual(ChangesDone, ?TOTAL_DOCS),
+ ?assertEqual(ChangesDone2, ?TOTAL_DOCS).
+
+
+create_ddoc(DDocId, IndexName) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, DDocId},
+ {<<"views">>, {[
+ {IndexName, {[
+ {<<"map">>, <<"function(doc) {emit(doc.val, doc.val);}">>}
+ ]}}
+ ]}}
+ ]}).
+
+
+doc(Id, Val) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, list_to_binary(integer_to_list(Id))},
+ {<<"val">>, Val}
+ ]}).
+
+
+make_docs(Count) ->
+ [doc(I, Count) || I <- lists:seq(1, Count)].
+
+
+pause_indexer_for_changes(ParentPid) ->
+ meck:new(couch_views_util, [passthrough]),
+ meck:expect(couch_views_util, active_tasks_info, fun(ChangesDone,
+ DbName, DDocId, LastSeq, DBSeq) ->
+ case ChangesDone of
+ ?TOTAL_DOCS ->
+ ParentPid ! {self(), {changes_done, ChangesDone}},
+ receive continue -> ok end;
+ _ ->
+ ok
+ end,
+ meck:passthrough([ChangesDone, DbName, DDocId, LastSeq,
+ DBSeq])
+ end).
+
+
+wait_to_reach_changes(Timeout) ->
+ receive
+ {Pid, {changes_done, ChangesDone}} when is_pid(Pid) ->
+ {Pid, {changes_done, ChangesDone}}
+ after Timeout ->
+ error(timeout_in_pause_indexer_for_changes)
+ end.
diff --git a/src/couch_views/test/couch_views_cleanup_test.erl b/src/couch_views/test/couch_views_cleanup_test.erl
new file mode 100644
index 000000000..e4dcdceea
--- /dev/null
+++ b/src/couch_views/test/couch_views_cleanup_test.erl
@@ -0,0 +1,411 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_cleanup_test).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("fabric/include/fabric2.hrl").
+-include_lib("fabric/test/fabric2_test.hrl").
+
+
+clean_old_indices_test_() ->
+ {
+ "Test cleanup of stale indices",
+ {
+ setup,
+ fun setup_all/0,
+ fun cleanup_all/1,
+ {
+ foreach,
+ fun setup/0,
+ fun cleanup/1,
+ [
+ ?TDEF_FE(empty_db),
+ ?TDEF_FE(db_with_no_ddocs),
+ ?TDEF_FE(db_with_ddoc),
+ ?TDEF_FE(db_with_many_ddocs),
+ ?TDEF_FE(after_ddoc_deletion),
+ ?TDEF_FE(all_ddocs_deleted),
+ ?TDEF_FE(after_ddoc_recreated),
+ ?TDEF_FE(refcounted_sigs),
+ ?TDEF_FE(removes_old_jobs),
+ ?TDEF_FE(after_job_accepted_initial_build),
+ ?TDEF_FE(after_job_accepted_rebuild),
+ ?TDEF_FE(during_index_initial_build),
+ ?TDEF_FE(during_index_rebuild)
+ ]
+ }
+ }
+ }.
+
+
+setup_all() ->
+ test_util:start_couch([
+ fabric,
+ couch_jobs,
+ couch_js,
+ couch_views
+ ]).
+
+
+cleanup_all(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+
+setup() ->
+ Opts = [{user_ctx, ?ADMIN_USER}],
+ {ok, Db} = fabric2_db:create(?tempdb(), Opts),
+ Db.
+
+
+cleanup(Db) ->
+ meck:unload(),
+ ok = fabric2_db:delete(fabric2_db:name(Db), []).
+
+
+empty_db(Db) ->
+ ?assertEqual(ok, fabric2_index:cleanup(Db)).
+
+
+db_with_no_ddocs(Db) ->
+ create_docs(Db, 10),
+ ?assertEqual(ok, fabric2_index:cleanup(Db)).
+
+
+db_with_ddoc(Db) ->
+ create_docs(Db, 10),
+ DDoc = create_ddoc(Db, <<"foo">>),
+ ?assertEqual(10, length(run_query(Db, DDoc))),
+ ?assertEqual(ok, fabric2_index:cleanup(Db)),
+ ?assertEqual(10, length(run_query(Db, DDoc))).
+
+
+db_with_many_ddocs(Db) ->
+ create_docs(Db, 10),
+ DDocs = create_ddocs(Db, 5),
+ lists:foreach(fun(DDoc) ->
+ ?assertEqual(10, length(run_query(Db, DDoc)))
+ end, DDocs),
+ ?assertEqual(ok, fabric2_index:cleanup(Db)).
+
+
+after_ddoc_deletion(Db) ->
+ create_docs(Db, 10),
+ DDocs = create_ddocs(Db, 2),
+ lists:foreach(fun(DDoc) ->
+ ?assertEqual(10, length(run_query(Db, DDoc)))
+ end, DDocs),
+ [ToDel | RestDDocs] = DDocs,
+ delete_doc(Db, ToDel),
+ % Not yet cleaned up
+ ?assertEqual(true, view_has_data(Db, ToDel)),
+ ?assertEqual(ok, fabric2_index:cleanup(Db)),
+ ?assertError({ddoc_deleted, _}, run_query(Db, ToDel)),
+ lists:foreach(fun(DDoc) ->
+ ?assertEqual(10, length(run_query(Db, DDoc)))
+ end, RestDDocs).
+
+
+all_ddocs_deleted(Db) ->
+ create_docs(Db, 10),
+ DDocs = create_ddocs(Db, 5),
+ lists:foreach(fun(DDoc) ->
+ ?assertEqual(10, length(run_query(Db, DDoc)))
+ end, DDocs),
+ lists:foreach(fun(DDoc) ->
+ delete_doc(Db, DDoc)
+ end, DDocs),
+ % Not yet cleaned up
+ lists:foreach(fun(DDoc) ->
+ ?assertEqual(true, view_has_data(Db, DDoc))
+ end, DDocs),
+ ?assertEqual(ok, fabric2_index:cleanup(Db)),
+ lists:foreach(fun(DDoc) ->
+ ?assertError({ddoc_deleted, _}, run_query(Db, DDoc))
+ end, DDocs).
+
+
+after_ddoc_recreated(Db) ->
+ create_docs(Db, 10),
+ DDocs = create_ddocs(Db, 3),
+ lists:foreach(fun(DDoc) ->
+ ?assertEqual(10, length(run_query(Db, DDoc)))
+ end, DDocs),
+ [ToDel | RestDDocs] = DDocs,
+ Deleted = delete_doc(Db, ToDel),
+ % Not yet cleaned up
+ ?assertEqual(true, view_has_data(Db, ToDel)),
+ ?assertEqual(ok, fabric2_index:cleanup(Db)),
+ ?assertError({ddoc_deleted, _}, run_query(Db, ToDel)),
+ lists:foreach(fun(DDoc) ->
+ ?assertEqual(10, length(run_query(Db, DDoc)))
+ end, RestDDocs),
+ recreate_doc(Db, Deleted),
+ lists:foreach(fun(DDoc) ->
+ ?assertEqual(10, length(run_query(Db, DDoc)))
+ end, DDocs),
+ ?assertEqual(ok, fabric2_index:cleanup(Db)),
+ lists:foreach(fun(DDoc) ->
+ ?assertEqual(10, length(run_query(Db, DDoc)))
+ end, DDocs).
+
+
+refcounted_sigs(Db) ->
+ create_docs(Db, 10),
+ DDoc1 = create_ddoc(Db, <<"1">>),
+ DDoc2 = create_doc(Db, <<"_design/2">>, DDoc1#doc.body),
+ ?assertEqual(10, length(run_query(Db, DDoc1))),
+ ?assertEqual(10, length(run_query(Db, DDoc2))),
+
+ ?assertEqual(true, view_has_data(Db, DDoc1)),
+ ?assertEqual(true, view_has_data(Db, DDoc2)),
+
+ delete_doc(Db, DDoc1),
+ ok = fabric2_index:cleanup(Db),
+
+ ?assertEqual(true, view_has_data(Db, DDoc1)),
+ ?assertEqual(true, view_has_data(Db, DDoc2)),
+
+ delete_doc(Db, DDoc2),
+ ok = fabric2_index:cleanup(Db),
+
+ ?assertEqual(false, view_has_data(Db, DDoc1)),
+ ?assertEqual(false, view_has_data(Db, DDoc2)).
+
+
+removes_old_jobs(Db) ->
+ create_docs(Db, 10),
+ DDoc = create_ddoc(Db, <<"foo">>),
+
+ ?assertEqual(10, length(run_query(Db, DDoc))),
+ ?assertEqual(true, view_has_data(Db, DDoc)),
+ ?assertEqual(true, job_exists(Db, DDoc)),
+
+ delete_doc(Db, DDoc),
+ ?assertEqual(ok, fabric2_index:cleanup(Db)),
+
+ ?assertEqual(false, view_has_data(Db, DDoc)),
+ ?assertEqual(false, job_exists(Db, DDoc)).
+
+
+after_job_accepted_initial_build(Db) ->
+ cleanup_during_initial_build(Db, fun meck_intercept_job_accept/2).
+
+
+after_job_accepted_rebuild(Db) ->
+ cleanup_during_rebuild(Db, fun meck_intercept_job_accept/2).
+
+
+during_index_initial_build(Db) ->
+ cleanup_during_initial_build(Db, fun meck_intercept_job_update/2).
+
+
+during_index_rebuild(Db) ->
+ cleanup_during_rebuild(Db, fun meck_intercept_job_update/2).
+
+
+cleanup_during_initial_build(Db, InterruptFun) ->
+ InterruptFun(fabric2_db:name(Db), self()),
+
+ create_docs(Db, 10),
+ DDoc = create_ddoc(Db, <<"foo">>),
+
+ {_, Ref1} = spawn_monitor(fun() -> run_query(Db, DDoc) end),
+
+ receive {JobPid, triggered} -> ok end,
+ delete_doc(Db, DDoc),
+ ok = fabric2_index:cleanup(Db),
+ JobPid ! continue,
+
+ receive {'DOWN', Ref1, _, _, _} -> ok end,
+
+ ok = fabric2_index:cleanup(Db),
+ ?assertError({ddoc_deleted, _}, run_query(Db, DDoc)),
+
+ ?assertEqual(false, view_has_data(Db, DDoc)),
+ ?assertEqual(false, job_exists(Db, DDoc)).
+
+
+cleanup_during_rebuild(Db, InterruptFun) ->
+ create_docs(Db, 10),
+ DDoc = create_ddoc(Db, <<"foo">>),
+ ?assertEqual(10, length(run_query(Db, DDoc))),
+
+ InterruptFun(fabric2_db:name(Db), self()),
+
+ create_docs(Db, 10, 10),
+
+ {_, Ref1} = spawn_monitor(fun() -> run_query(Db, DDoc) end),
+
+ receive {JobPid, triggered} -> ok end,
+ delete_doc(Db, DDoc),
+ ok = fabric2_index:cleanup(Db),
+ JobPid ! continue,
+
+ receive {'DOWN', Ref1, _, _, _} -> ok end,
+
+ ok = fabric2_index:cleanup(Db),
+ ?assertError({ddoc_deleted, _}, run_query(Db, DDoc)),
+
+ ?assertEqual(false, view_has_data(Db, DDoc)),
+ ?assertEqual(false, job_exists(Db, DDoc)).
+
+
+
+run_query(Db, DDocId) when is_binary(DDocId) ->
+ {ok, DDoc} = fabric2_db:open_doc(Db, <<"_design/", DDocId/binary>>),
+ run_query(Db, DDoc);
+
+run_query(Db, DDoc) ->
+ Fun = fun default_cb/2,
+ {ok, Result} = couch_views:query(Db, DDoc, <<"bar">>, Fun, [], #{}),
+ Result.
+
+
+default_cb(complete, Acc) ->
+ {ok, lists:reverse(Acc)};
+default_cb({final, Info}, []) ->
+ {ok, [Info]};
+default_cb({final, _}, Acc) ->
+ {ok, Acc};
+default_cb({meta, _}, Acc) ->
+ {ok, Acc};
+default_cb(ok, ddoc_updated) ->
+ {ok, ddoc_updated};
+default_cb(Row, Acc) ->
+ {ok, [Row | Acc]}.
+
+
+view_has_data(Db, DDoc) ->
+ DbName = fabric2_db:name(Db),
+ {ok, #mrst{sig = Sig}} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+ SigKeyTuple = {?DB_VIEWS, ?VIEW_INFO, ?VIEW_UPDATE_SEQ, Sig},
+ SigKey = erlfdb_tuple:pack(SigKeyTuple, DbPrefix),
+ SigVal = erlfdb:wait(erlfdb:get(Tx, SigKey)),
+
+ RangeKeyTuple = {?DB_VIEWS, ?VIEW_DATA, Sig},
+ RangeKey = erlfdb_tuple:pack(RangeKeyTuple, DbPrefix),
+ Range = erlfdb:wait(erlfdb:get_range_startswith(Tx, RangeKey)),
+
+ SigVal /= not_found andalso Range /= []
+ end).
+
+
+meck_intercept_job_accept(TgtDbName, ParentPid) ->
+ meck:new(fabric2_db, [passthrough]),
+ meck:expect(fabric2_db, open, fun
+ (DbName, Opts) when DbName == TgtDbName ->
+ Result = meck:passthrough([DbName, Opts]),
+ ParentPid ! {self(), triggered},
+ receive continue -> ok end,
+ meck:unload(),
+ Result;
+ (DbName, Opts) ->
+ meck:passthrough([DbName, Opts])
+ end).
+
+
+meck_intercept_job_update(_DbName, ParentPid) ->
+ meck:new(couch_jobs, [passthrough]),
+ meck:expect(couch_jobs, finish, fun(Tx, Job, Data) ->
+ ParentPid ! {self(), triggered},
+ receive continue -> ok end,
+ Result = meck:passthrough([Tx, Job, Data]),
+ meck:unload(),
+ Result
+ end).
+
+
+create_ddoc(Db, Id) ->
+ MapFunFmt = "function(doc) {var f = \"~s\"; emit(doc.val, f)}",
+ MapFun = io_lib:format(MapFunFmt, [Id]),
+ Body = {[
+ {<<"views">>, {[
+ {<<"bar">>, {[{<<"map">>, iolist_to_binary(MapFun)}]}}
+ ]}}
+ ]},
+ create_doc(Db, <<"_design/", Id/binary>>, Body).
+
+
+recreate_doc(Db, #doc{deleted = true} = Doc) ->
+ #doc{
+ id = DDocId,
+ body = Body
+ } = Doc,
+ create_doc(Db, DDocId, Body).
+
+
+create_ddocs(Db, Count) when is_integer(Count), Count > 1 ->
+ lists:map(fun(Seq) ->
+ Id = io_lib:format("~6..0b", [Seq]),
+ create_ddoc(Db, iolist_to_binary(Id))
+ end, lists:seq(1, Count)).
+
+
+create_doc(Db, Id) ->
+ create_doc(Db, Id, {[{<<"value">>, Id}]}).
+
+
+create_doc(Db, Id, Body) ->
+ Doc = #doc{
+ id = Id,
+ body = Body
+ },
+ {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, Doc),
+ Doc#doc{revs = {Pos, [Rev]}}.
+
+
+create_docs(Db, Count) ->
+ create_docs(Db, Count, 0).
+
+
+create_docs(Db, Count, Offset) ->
+ lists:map(fun(Seq) ->
+ Id = io_lib:format("~6..0b", [Seq]),
+ create_doc(Db, iolist_to_binary(Id))
+ end, lists:seq(Offset + 1, Offset + Count)).
+
+
+delete_doc(Db, DDoc) ->
+ #doc{
+ revs = {_, [_ | _] = Revs}
+ } = DDoc,
+ {ok, {NewPos, Rev}} = fabric2_db:update_doc(Db, DDoc#doc{deleted = true}),
+ DDoc#doc{
+ revs = {NewPos, [Rev | Revs]},
+ deleted = true
+ }.
+
+
+job_exists(Db, DDoc) ->
+ JobId = job_id(Db, DDoc),
+ case couch_jobs:get_job_data(Db, ?INDEX_JOB_TYPE, JobId) of
+ {ok, _} -> true;
+ {error, not_found} -> false
+ end.
+
+
+job_id(Db, DDoc) ->
+ DbName = fabric2_db:name(Db),
+ {ok, #mrst{sig = Sig}} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+ HexSig = fabric2_util:to_hex(Sig),
+ <<HexSig/binary, "-", DbName/binary>>.
diff --git a/src/couch_views/test/couch_views_encoding_test.erl b/src/couch_views/test/couch_views_encoding_test.erl
new file mode 100644
index 000000000..d15f616cb
--- /dev/null
+++ b/src/couch_views/test/couch_views_encoding_test.erl
@@ -0,0 +1,117 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_encoding_test).
+
+-include_lib("eunit/include/eunit.hrl").
+
+val_encoding_test() ->
+ Values = [
+ null,
+ true,
+ 1.0,
+ <<"a">>,
+ {[{<<"a">>, 1.0}, {<<"b">>, <<"hello">>}]}
+ ],
+ lists:foreach(fun (Val) ->
+ EncVal = couch_views_encoding:encode(Val),
+ ?assertEqual(Val, couch_views_encoding:decode(EncVal))
+ end, Values).
+
+
+setup() ->
+ % Load the ICU driver for couch_util:get_sort_key/1
+ {ok, CfgPid} = gen_server:start_link(config, [], []),
+ {ok, DrvPid} = gen_server:start_link(couch_drv, [], []),
+ {CfgPid, DrvPid}.
+
+
+teardown({CfgPid, DrvPid}) ->
+ unlink(CfgPid),
+ unlink(DrvPid),
+ exit(CfgPid, kill),
+ exit(DrvPid, kill).
+
+
+correct_ordering_test_() ->
+ {
+ setup,
+ fun setup/0,
+ fun teardown/1,
+ [
+ fun t_correct_ordering/0
+ ]
+ }.
+
+
+t_correct_ordering() ->
+ ?_test(begin
+ Ordered = [
+ % Special values sort before all other types
+ null,
+ false,
+ true,
+
+ % Then numbers
+ 1,
+ 2,
+ 3.0,
+ 4,
+
+ % Then text, case sensitive
+ <<"a">>,
+ <<"A">>,
+ <<"aa">>,
+ <<"b">>,
+ <<"B">>,
+ <<"ba">>,
+ <<"bb">>,
+
+ % Then arrays, compared element by element until different.
+ % Longer arrays sort after their prefixes
+ [<<"a">>],
+ [<<"b">>],
+ [<<"b">>, <<"c">>],
+ [<<"b">>, <<"c">>, <<"a">>],
+ [<<"b">>, <<"d">>],
+ [<<"b">>, <<"d">>, <<"e">>],
+
+ % Then objects, compared each key value in the list until different.
+ % Larger objects sort after their subset objects
+ {[{<<"a">>, 1}]},
+ {[{<<"a">>, 2}]},
+ {[{<<"b">>, 1}]},
+ {[{<<"b">>, 2}]},
+
+ % Member order does matter for collation
+ {[{<<"b">>, 2}, {<<"a">>, 1}]},
+ {[{<<"b">>, 2}, {<<"c">>, 2}]}
+ ],
+
+ Encoded = lists:map(fun(Elem) ->
+ K = couch_views_encoding:encode(Elem, key),
+ V = couch_views_encoding:encode(Elem, value),
+ {K, V}
+ end, Ordered),
+ Shuffled = shuffle(Encoded),
+ Reordered = lists:sort(Shuffled),
+
+ lists:foreach(fun({Original, {_K, ViewEncoded}}) ->
+ ?assertEqual(Original, couch_views_encoding:decode(ViewEncoded))
+ end, lists:zip(Ordered, Reordered))
+ end).
+
+
+shuffle(List) when is_list(List) ->
+ Tagged = [{rand:uniform(), Item} || Item <- List],
+ {_, Randomized} = lists:unzip(lists:sort(Tagged)),
+ Randomized.
diff --git a/src/couch_views/test/couch_views_indexer_test.erl b/src/couch_views/test/couch_views_indexer_test.erl
new file mode 100644
index 000000000..cb8378f01
--- /dev/null
+++ b/src/couch_views/test/couch_views_indexer_test.erl
@@ -0,0 +1,754 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_indexer_test).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
+-include_lib("fabric/test/fabric2_test.hrl").
+
+
+-define(MAP_FUN1, <<"map_fun1">>).
+-define(MAP_FUN2, <<"map_fun2">>).
+
+
+indexer_test_() ->
+ {
+ "Test view indexing",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ {
+ foreach,
+ fun foreach_setup/0,
+ fun foreach_teardown/1,
+ [
+ ?TDEF_FE(indexed_empty_db),
+ ?TDEF_FE(indexed_single_doc),
+ ?TDEF_FE(updated_docs_are_reindexed),
+ ?TDEF_FE(updated_docs_without_changes_are_reindexed),
+ ?TDEF_FE(deleted_docs_not_indexed),
+ ?TDEF_FE(deleted_docs_are_unindexed),
+ ?TDEF_FE(multipe_docs_with_same_key),
+ ?TDEF_FE(multipe_keys_from_same_doc),
+ ?TDEF_FE(multipe_identical_keys_from_same_doc),
+ ?TDEF_FE(fewer_multipe_identical_keys_from_same_doc),
+ ?TDEF_FE(multiple_design_docs),
+ ?TDEF_FE(handle_size_key_limits),
+ ?TDEF_FE(handle_size_value_limits),
+ ?TDEF_FE(index_autoupdater_callback),
+ ?TDEF_FE(handle_db_recreated_when_running),
+ ?TDEF_FE(handle_db_recreated_after_finished),
+ ?TDEF_FE(index_budget_is_changing),
+ ?TDEF_FE(index_can_recover_from_crash, 60)
+ ]
+ }
+ }
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([
+ fabric,
+ couch_jobs,
+ couch_js,
+ couch_views
+ ]),
+ Ctx.
+
+
+cleanup(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+
+foreach_setup() ->
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ Db.
+
+
+foreach_teardown(Db) ->
+ meck:unload(),
+ config:delete("couch_views", "change_limit"),
+ ok = fabric2_db:delete(fabric2_db:name(Db), []).
+
+
+indexed_empty_db(Db) ->
+ DDoc = create_ddoc(),
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ ?assertEqual({ok, []}, run_query(Db, DDoc, ?MAP_FUN1)).
+
+
+indexed_single_doc(Db) ->
+ DDoc = create_ddoc(),
+ Doc1 = doc(0),
+
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, _} = fabric2_db:update_doc(Db, Doc1, []),
+
+ {ok, Out} = run_query(Db, DDoc, ?MAP_FUN1),
+
+ ?assertEqual([row(<<"0">>, 0, 0)], Out).
+
+
+updated_docs_are_reindexed(Db) ->
+ DDoc = create_ddoc(),
+ Doc1 = doc(0),
+
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, Doc1, []),
+
+ {ok, Out1} = run_query(Db, DDoc, ?MAP_FUN1),
+
+ ?assertEqual([row(<<"0">>, 0, 0)], Out1),
+
+ Doc2 = Doc1#doc{
+ revs = {Pos, [Rev]},
+ body = {[{<<"val">>, 1}]}
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc2, []),
+
+ {ok, Out2} = run_query(Db, DDoc, ?MAP_FUN1),
+
+ ?assertEqual([row(<<"0">>, 1, 1)], Out2),
+
+ % Check that our id index is updated properly
+ % as well.
+ DbName = fabric2_db:name(Db),
+ {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+ Sig = Mrst#mrst.sig,
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ ?assertMatch(
+ [{0, 1, _, [1]}],
+ couch_views_fdb:get_view_keys(TxDb, Sig, <<"0">>)
+ )
+ end).
+
+
+updated_docs_without_changes_are_reindexed(Db) ->
+ DDoc = create_ddoc(),
+ Doc1 = doc(0),
+
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, Doc1, []),
+
+ {ok, Out1} = run_query(Db, DDoc, ?MAP_FUN1),
+
+ ?assertEqual([row(<<"0">>, 0, 0)], Out1),
+
+ Doc2 = Doc1#doc{
+ revs = {Pos, [Rev]},
+ body = {[{<<"val">>, 0}]}
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc2, []),
+
+ {ok, Out2} = run_query(Db, DDoc, ?MAP_FUN1),
+
+ ?assertEqual([row(<<"0">>, 0, 0)], Out2),
+
+ % Check fdb directly to make sure we've also
+ % removed the id idx keys properly.
+ DbName = fabric2_db:name(Db),
+ {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+ Sig = Mrst#mrst.sig,
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ ?assertMatch(
+ [{0, 1, _, [0]}],
+ couch_views_fdb:get_view_keys(TxDb, Sig, <<"0">>)
+ )
+ end).
+
+
+deleted_docs_not_indexed(Db) ->
+ DDoc = create_ddoc(),
+ Doc1 = doc(0),
+
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, Doc1, []),
+ Doc2 = Doc1#doc{
+ revs = {Pos, [Rev]},
+ deleted = true,
+ body = {[{<<"val">>, 1}]}
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc2, []),
+
+ ?assertEqual({ok, []}, run_query(Db, DDoc, ?MAP_FUN1)).
+
+
+deleted_docs_are_unindexed(Db) ->
+ DDoc = create_ddoc(),
+ Doc1 = doc(0),
+
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, Doc1, []),
+
+ {ok, Out1} = run_query(Db, DDoc, ?MAP_FUN1),
+ ?assertEqual([row(<<"0">>, 0, 0)], Out1),
+
+ Doc2 = Doc1#doc{
+ revs = {Pos, [Rev]},
+ deleted = true,
+ body = {[{<<"val">>, 1}]}
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc2, []),
+
+ ?assertEqual({ok, []}, run_query(Db, DDoc, ?MAP_FUN1)),
+
+ % Check fdb directly to make sure we've also
+ % removed the id idx keys properly.
+ DbName = fabric2_db:name(Db),
+ {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+ Sig = Mrst#mrst.sig,
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ ?assertEqual([], couch_views_fdb:get_view_keys(TxDb, Sig, <<"0">>))
+ end).
+
+
+multipe_docs_with_same_key(Db) ->
+ DDoc = create_ddoc(),
+ Doc1 = doc(0, 1),
+ Doc2 = doc(1, 1),
+
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, _} = fabric2_db:update_docs(Db, [Doc1, Doc2], []),
+
+ {ok, Out} = run_query(Db, DDoc, ?MAP_FUN1),
+
+ ?assertEqual([
+ row(<<"0">>, 1, 1),
+ row(<<"1">>, 1, 1)
+ ], Out).
+
+
+multipe_keys_from_same_doc(Db) ->
+ DDoc = create_ddoc(multi_emit_different),
+ Doc = doc(0, 1),
+
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, _} = fabric2_db:update_doc(Db, Doc, []),
+
+ {ok, Out} = run_query(Db, DDoc, ?MAP_FUN1),
+
+ ?assertEqual([
+ row(<<"0">>, 1, 1),
+ row(<<"0">>, <<"0">>, <<"0">>)
+ ], Out).
+
+
+multipe_identical_keys_from_same_doc(Db) ->
+ DDoc = create_ddoc(multi_emit_same),
+ Doc = doc(0, 1),
+
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, _} = fabric2_db:update_doc(Db, Doc, []),
+
+ {ok, Out} = run_query(Db, DDoc, ?MAP_FUN1),
+
+ ?assertEqual([
+ row(<<"0">>, 1, 1),
+ row(<<"0">>, 1, 2)
+ ], Out).
+
+
+fewer_multipe_identical_keys_from_same_doc(Db) ->
+ DDoc = create_ddoc(multi_emit_same),
+ Doc0 = #doc{
+ id = <<"0">>,
+ body = {[{<<"val">>, 1}, {<<"extra">>, 3}]}
+ },
+
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, Doc0, []),
+
+ {ok, Out1} = run_query(Db, DDoc, ?MAP_FUN1),
+
+ ?assertEqual([
+ row(<<"0">>, 1, 1),
+ row(<<"0">>, 1, 2),
+ row(<<"0">>, 1, 3)
+ ], Out1),
+
+ Doc1 = #doc{
+ id = <<"0">>,
+ revs = {Pos, [Rev]},
+ body = {[{<<"val">>, 1}]}
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc1, []),
+
+ {ok, Out2} = run_query(Db, DDoc, ?MAP_FUN1),
+
+ ?assertEqual([
+ row(<<"0">>, 1, 1),
+ row(<<"0">>, 1, 2)
+ ], Out2).
+
+
+handle_size_key_limits(Db) ->
+ ok = meck:new(config, [passthrough]),
+ ok = meck:expect(config, get_integer, fun(Section, Key, Default) ->
+ case Section == "couch_views" andalso Key == "key_size_limit" of
+ true -> 15;
+ _ -> Default
+ end
+ end),
+
+ DDoc = create_ddoc(multi_emit_key_limit),
+ Docs = [doc(1, 2)] ++ [doc(2, 1)],
+
+ {ok, _} = fabric2_db:update_docs(Db, [DDoc | Docs], []),
+
+ {ok, Out} = run_query(Db, DDoc, ?MAP_FUN1),
+
+ ?assertEqual([row(<<"1">>, 2, 2)], Out),
+
+ {ok, Doc} = fabric2_db:open_doc(Db, <<"2">>),
+ Doc2 = Doc#doc {
+ body = {[{<<"val">>, 2}]}
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc2),
+
+ {ok, Out1} = run_query(Db, DDoc, ?MAP_FUN1),
+
+ ?assertEqual([
+ row(<<"1">>, 2, 2),
+ row(<<"2">>, 2, 2)
+ ], Out1).
+
+
+handle_size_value_limits(Db) ->
+ ok = meck:new(config, [passthrough]),
+ ok = meck:expect(config, get_integer, fun(Section, _, Default) ->
+ case Section of
+ "couch_views" -> 15;
+ _ -> Default
+ end
+ end),
+
+ DDoc = create_ddoc(multi_emit_key_limit),
+ Docs = [doc(1, 2)] ++ [doc(2, 3)],
+
+ {ok, _} = fabric2_db:update_docs(Db, [DDoc | Docs], []),
+
+ {ok, Out} = run_query(Db, DDoc, ?MAP_FUN2),
+
+ ?assertEqual([
+ row(<<"1">>, 2, 2),
+ row(<<"2">>, 3, 3),
+ row(<<"1">>, 22, 2),
+ row(<<"2">>, 23, 3)
+ ], Out),
+
+ {ok, Doc} = fabric2_db:open_doc(Db, <<"1">>),
+ Doc2 = Doc#doc{
+ body = {[{<<"val">>, 1}]}
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc2),
+
+ {ok, Out1} = run_query(Db, DDoc, ?MAP_FUN2),
+
+ ?assertEqual([
+ row(<<"2">>, 3, 3),
+ row(<<"2">>, 23, 3)
+ ], Out1).
+
+
+index_autoupdater_callback(Db) ->
+ DDoc = create_ddoc(),
+ Doc1 = doc(0),
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, _} = fabric2_db:update_doc(Db, Doc1, []),
+
+ DbSeq = fabric2_db:get_update_seq(Db),
+
+ Result = couch_views:build_indices(Db, [DDoc]),
+ ?assertMatch([{ok, <<_/binary>>}], Result),
+ [{ok, JobId}] = Result,
+
+ ?assertEqual(ok, couch_views_jobs:wait_for_job(JobId, DDoc#doc.id, DbSeq)).
+
+index_budget_is_changing(Db) ->
+ ok = meck:new(couch_rate, [passthrough]),
+ ok = meck:expect(couch_rate, budget, fun(State) ->
+ meck:passthrough([State])
+ end),
+
+ LimiterOpts = #{
+ budget => 100,
+ sensitivity => 500,
+ target => 500,
+ timer => fun timer/0,
+ window => 2000
+ },
+
+ ok = meck:expect(couch_rate, create_if_missing, fun(Id, Module, Store, _Options) ->
+ meck:passthrough([Id, Module, Store, LimiterOpts])
+ end),
+
+ ok = meck:expect(couch_rate, wait, fun(State) ->
+ Delay = couch_rate:delay(State),
+ put(time, timer() + Delay - 1)
+ end),
+
+ DDoc = create_ddoc(),
+ Docs = lists:map(fun doc/1, lists:seq(1, 200)),
+
+ {ok, _} = fabric2_db:update_docs(Db, [DDoc | Docs], []),
+
+ {ok, _Out} = couch_views:query(
+ Db,
+ DDoc,
+ <<"map_fun2">>,
+ fun fold_fun/2,
+ [],
+ #mrargs{}
+ ),
+ ?assert(length(lists:usort(budget_history())) > 1).
+
+
+timer() ->
+ get(time) == undefined andalso put(time, 1),
+ Now = get(time),
+ put(time, Now + 1),
+ Now.
+
+
+budget_history() ->
+ [Result || {_Pid, {couch_rate, budget, _}, Result} <- meck:history(couch_rate)].
+
+
+multiple_design_docs(Db) ->
+ Cleanup = fun() ->
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ DDocs = fabric2_db:get_design_docs(Db),
+ ok = couch_views:cleanup_indices(TxDb, DDocs)
+ end)
+ end,
+
+ % This is how we check that no index updates took place
+ meck:new(couch_views_fdb, [passthrough]),
+ meck:expect(couch_views_fdb, write_doc, fun(TxDb, Sig, ViewIds, Doc) ->
+ meck:passthrough([TxDb, Sig, ViewIds, Doc])
+ end),
+
+ DDoc1 = create_ddoc(simple, <<"_design/bar1">>),
+ DDoc2 = create_ddoc(simple, <<"_design/bar2">>),
+
+ {ok, _} = fabric2_db:update_doc(Db, doc(0), []),
+ {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, DDoc1, []),
+ ?assertEqual({ok, [row(<<"0">>, 0, 0)]}, run_query(Db, DDoc1, ?MAP_FUN1)),
+
+ % Because run_query/3 can return, and unsurbscribe from the job,
+ % before it actually finishes, ensure we wait for the job to
+ % finish so we get a deterministic setup every time.
+ JobId = get_job_id(Db, DDoc1),
+ ?assertEqual(ok, wait_job_finished(JobId, 5000)),
+
+ % Add the second ddoc with same view as first one.
+ {ok, {Pos2, Rev2}} = fabric2_db:update_doc(Db, DDoc2, []),
+
+ DDoc1Del = DDoc1#doc{revs = {Pos1, [Rev1]}, deleted = true},
+ {ok, _} = fabric2_db:update_doc(Db, DDoc1Del, []),
+
+ Cleanup(),
+
+ meck:reset(couch_views_fdb),
+ ?assertEqual({ok, [row(<<"0">>, 0, 0)]}, run_query(Db, DDoc2, ?MAP_FUN1)),
+ ?assertEqual(ok, wait_job_finished(JobId, 5000)),
+ ?assertEqual(0, meck:num_calls(couch_views_fdb, write_doc, 4)),
+
+ DDoc2Del = DDoc2#doc{revs = {Pos2, [Rev2]}, deleted = true},
+ {ok, _} = fabric2_db:update_doc(Db, DDoc2Del, []),
+
+ Cleanup(),
+
+ % After the last ddoc is deleted we should get an error
+ ?assertError({ddoc_deleted, _}, run_query(Db, DDoc2, ?MAP_FUN1)).
+
+
+handle_db_recreated_when_running(Db) ->
+ DbName = fabric2_db:name(Db),
+
+ DDoc = create_ddoc(),
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, _} = fabric2_db:update_doc(Db, doc(0), []),
+ {ok, _} = fabric2_db:update_doc(Db, doc(1), []),
+
+ % To intercept job building while it is running ensure updates happen one
+ % row at a time.
+ ok = meck:new(couch_rate, [passthrough]),
+ ok = meck:expect(couch_rate, budget, ['_'], meck:val(1)),
+
+ meck_intercept_job_update(self()),
+
+ [{ok, JobId}] = couch_views:build_indices(Db, [DDoc]),
+
+ {Indexer, _Job, _Data} = wait_indexer_update(10000),
+
+ {ok, State} = couch_jobs:get_job_state(undefined, ?INDEX_JOB_TYPE, JobId),
+ ?assertEqual(running, State),
+
+ {ok, SubId, running, _} = couch_jobs:subscribe(?INDEX_JOB_TYPE, JobId),
+
+ ok = fabric2_db:delete(DbName, []),
+ {ok, Db1} = fabric2_db:create(DbName, [?ADMIN_CTX]),
+
+ Indexer ! continue,
+
+ ?assertMatch({
+ ?INDEX_JOB_TYPE,
+ JobId,
+ finished,
+ #{<<"error">> := <<"db_deleted">>}
+ }, couch_jobs:wait(SubId, infinity)),
+
+ {ok, _} = fabric2_db:update_doc(Db1, DDoc, []),
+ {ok, _} = fabric2_db:update_doc(Db1, doc(2), []),
+ {ok, _} = fabric2_db:update_doc(Db1, doc(3), []),
+
+ reset_intercept_job_update(Indexer),
+
+ {ok, Out2} = run_query(Db1, DDoc, ?MAP_FUN1),
+ ?assertEqual([
+ row(<<"2">>, 2, 2),
+ row(<<"3">>, 3, 3)
+ ], Out2).
+
+
+handle_db_recreated_after_finished(Db) ->
+ DbName = fabric2_db:name(Db),
+
+ DDoc = create_ddoc(),
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, _} = fabric2_db:update_doc(Db, doc(0), []),
+ {ok, _} = fabric2_db:update_doc(Db, doc(1), []),
+
+ {ok, Out1} = run_query(Db, DDoc, ?MAP_FUN1),
+ ?assertEqual([
+ row(<<"0">>, 0, 0),
+ row(<<"1">>, 1, 1)
+ ], Out1),
+
+ ok = fabric2_db:delete(DbName, []),
+
+ ?assertError(database_does_not_exist, run_query(Db, DDoc, ?MAP_FUN1)),
+
+ {ok, Db1} = fabric2_db:create(DbName, [?ADMIN_CTX]),
+
+ {ok, _} = fabric2_db:update_doc(Db1, DDoc, []),
+ {ok, _} = fabric2_db:update_doc(Db1, doc(2), []),
+ {ok, _} = fabric2_db:update_doc(Db1, doc(3), []),
+
+ ?assertError(database_does_not_exist, run_query(Db, DDoc, ?MAP_FUN1)),
+
+ {ok, Out2} = run_query(Db1, DDoc, ?MAP_FUN1),
+ ?assertEqual([
+ row(<<"2">>, 2, 2),
+ row(<<"3">>, 3, 3)
+ ], Out2).
+
+
+index_can_recover_from_crash(Db) ->
+ ok = meck:new(config, [passthrough]),
+ ok = meck:expect(config, get_integer, fun(Section, Key, Default) ->
+ case Section == "couch_views" andalso Key == "change_limit" of
+ true -> 1;
+ _ -> Default
+ end
+ end),
+ meck:new(couch_eval, [passthrough]),
+ meck:expect(couch_eval, map_docs, fun(State, Docs) ->
+ Doc = hd(Docs),
+ case Doc#doc.id == <<"2">> of
+ true ->
+ % remove the mock so that next time the doc is processed
+ % it will work
+ meck:unload(couch_eval),
+ throw({fake_crash, test_jobs_restart});
+ false ->
+ meck:passthrough([State, Docs])
+ end
+ end),
+
+ DDoc = create_ddoc(),
+ Docs = make_docs(3),
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, _} = fabric2_db:update_docs(Db, Docs, []),
+
+ {ok, Out} = run_query(Db, DDoc, ?MAP_FUN1),
+ ?assertEqual([
+ row(<<"1">>, 1, 1),
+ row(<<"2">>, 2, 2),
+ row(<<"3">>, 3, 3)
+ ], Out).
+
+
+row(Id, Key, Value) ->
+ {row, [
+ {id, Id},
+ {key, Key},
+ {value, Value}
+ ]}.
+
+
+fold_fun({meta, _Meta}, Acc) ->
+ {ok, Acc};
+fold_fun({row, _} = Row, Acc) ->
+ {ok, [Row | Acc]};
+fold_fun(complete, Acc) ->
+ {ok, lists:reverse(Acc)}.
+
+
+create_ddoc() ->
+ create_ddoc(simple).
+
+
+create_ddoc(Type) ->
+ create_ddoc(Type, <<"_design/bar">>).
+
+
+create_ddoc(simple, DocId) when is_binary(DocId) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, DocId},
+ {<<"views">>, {[
+ {?MAP_FUN1, {[
+ {<<"map">>, <<"function(doc) {emit(doc.val, doc.val);}">>}
+ ]}},
+ {?MAP_FUN2, {[
+ {<<"map">>, <<"function(doc) {}">>}
+ ]}}
+ ]}}
+ ]});
+
+create_ddoc(multi_emit_different, DocId) when is_binary(DocId) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, DocId},
+ {<<"views">>, {[
+ {?MAP_FUN1, {[
+ {<<"map">>, <<"function(doc) { "
+ "emit(doc._id, doc._id); "
+ "emit(doc.val, doc.val); "
+ "}">>}
+ ]}},
+ {?MAP_FUN2, {[
+ {<<"map">>, <<"function(doc) {}">>}
+ ]}}
+ ]}}
+ ]});
+
+create_ddoc(multi_emit_same, DocId) when is_binary(DocId) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, DocId},
+ {<<"views">>, {[
+ {?MAP_FUN1, {[
+ {<<"map">>, <<"function(doc) { "
+ "emit(doc.val, doc.val * 2); "
+ "emit(doc.val, doc.val); "
+ "if(doc.extra) {"
+ " emit(doc.val, doc.extra);"
+ "}"
+ "}">>}
+ ]}},
+ {?MAP_FUN2, {[
+ {<<"map">>, <<"function(doc) {}">>}
+ ]}}
+ ]}}
+ ]});
+
+create_ddoc(multi_emit_key_limit, DocId) when is_binary(DocId) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, DocId},
+ {<<"views">>, {[
+ {?MAP_FUN1, {[
+ {<<"map">>, <<"function(doc) { "
+ "if (doc.val === 1) { "
+ "emit('a very long string to be limited', doc.val);"
+ "} else {"
+ "emit(doc.val, doc.val)"
+ "}"
+ "}">>}
+ ]}},
+ {?MAP_FUN2, {[
+ {<<"map">>, <<"function(doc) { "
+ "emit(doc.val + 20, doc.val);"
+ "if (doc.val === 1) { "
+ "emit(doc.val, 'a very long string to be limited');"
+ "} else {"
+ "emit(doc.val, doc.val)"
+ "}"
+ "}">>}
+ ]}}
+ ]}}
+ ]}).
+
+
+make_docs(Count) ->
+ [doc(I) || I <- lists:seq(1, Count)].
+
+
+doc(Id) ->
+ doc(Id, Id).
+
+
+doc(Id, Val) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, list_to_binary(integer_to_list(Id))},
+ {<<"val">>, Val}
+ ]}).
+
+
+run_query(#{} = Db, DDoc, <<_/binary>> = View) ->
+ couch_views:query(Db, DDoc, View, fun fold_fun/2, [], #mrargs{}).
+
+
+get_job_id(#{} = Db, DDoc) ->
+ DbName = fabric2_db:name(Db),
+ {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+ couch_views_jobs:job_id(Db, Mrst).
+
+
+wait_job_finished(JobId, Timeout) ->
+ case couch_jobs:subscribe(?INDEX_JOB_TYPE, JobId) of
+ {ok, Sub, _, _} ->
+ case couch_jobs:wait(Sub, finished, Timeout) of
+ {?INDEX_JOB_TYPE, _, _, _} -> ok;
+ timeout -> timeout
+ end;
+ {ok, finished, _} ->
+ ok
+ end.
+
+
+meck_intercept_job_update(ParentPid) ->
+ meck:new(couch_jobs, [passthrough]),
+ meck:expect(couch_jobs, update, fun(Db, Job, Data) ->
+ ParentPid ! {self(), Job, Data},
+ receive continue -> ok end,
+ meck:passthrough([Db, Job, Data])
+ end).
+
+
+reset_intercept_job_update(IndexerPid) ->
+ meck:expect(couch_jobs, update, fun(Db, Job, Data) ->
+ meck:passthrough([Db, Job, Data])
+ end),
+ IndexerPid ! continue.
+
+
+wait_indexer_update(Timeout) ->
+ receive
+ {Pid, Job, Data} when is_pid(Pid) -> {Pid, Job, Data}
+ after Timeout ->
+ error(timeout_in_wait_indexer_update)
+ end.
diff --git a/src/couch_views/test/couch_views_info_test.erl b/src/couch_views/test/couch_views_info_test.erl
new file mode 100644
index 000000000..993801a0d
--- /dev/null
+++ b/src/couch_views/test/couch_views_info_test.erl
@@ -0,0 +1,174 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_info_test).
+
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("fabric/test/fabric2_test.hrl").
+
+
+-define(MAP_FUN1, <<"map_fun1">>).
+
+
+setup() ->
+ Ctx = test_util:start_couch([
+ fabric,
+ couch_jobs,
+ couch_js,
+ couch_views
+ ]),
+ Ctx.
+
+
+cleanup(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+
+foreach_setup() ->
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ DDoc = create_ddoc(),
+ Doc1 = doc(0, 1),
+
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, _} = fabric2_db:update_doc(Db, Doc1, []),
+
+ run_query(Db, DDoc, ?MAP_FUN1),
+ {Db, DDoc}.
+
+
+foreach_teardown({Db, _}) ->
+ meck:unload(),
+ ok = fabric2_db:delete(fabric2_db:name(Db), []).
+
+
+views_info_test_() ->
+ {
+ "Views index info test",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ {
+ foreach,
+ fun foreach_setup/0,
+ fun foreach_teardown/1,
+ [
+ ?TDEF_FE(sig_is_binary),
+ ?TDEF_FE(language_is_js),
+ ?TDEF_FE(update_seq_is_binary),
+ ?TDEF_FE(updater_running_is_boolean),
+ ?TDEF_FE(active_size_is_non_neg_int),
+ ?TDEF_FE(update_opts_is_bin_list)
+ ]
+ }
+ }
+ }.
+
+
+sig_is_binary({Db, DDoc}) ->
+ {ok, Info} = couch_views:get_info(Db, DDoc),
+ ?assert(is_binary(prop(signature, Info))).
+
+
+language_is_js({Db, DDoc}) ->
+ {ok, Info} = couch_views:get_info(Db, DDoc),
+ ?assertEqual(<<"javascript">>, prop(language, Info)).
+
+
+active_size_is_non_neg_int({Db, DDoc}) ->
+ {ok, Info} = couch_views:get_info(Db, DDoc),
+ ?assert(check_non_neg_int([sizes, active], Info)).
+
+
+updater_running_is_boolean({Db, DDoc}) ->
+ meck:new(couch_jobs, [passthrough]),
+
+ meck:expect(couch_jobs, get_job_state, 3, meck:val({ok, running})),
+ {ok, Info1} = couch_views:get_info(Db, DDoc),
+ ?assert(prop(updater_running, Info1)),
+
+ meck:expect(couch_jobs, get_job_state, 3, meck:val({ok, pending})),
+ {ok, Info2} = couch_views:get_info(Db, DDoc),
+ ?assert(prop(updater_running, Info2)),
+
+ meck:expect(couch_jobs, get_job_state, 3, meck:val({ok, finished})),
+ {ok, Info3} = couch_views:get_info(Db, DDoc),
+ ?assert(not prop(updater_running, Info3)),
+
+ meck:expect(couch_jobs, get_job_state, 3, meck:val({error, not_found})),
+ {ok, Info4} = couch_views:get_info(Db, DDoc),
+ ?assert(not prop(updater_running, Info4)).
+
+
+update_seq_is_binary({Db, DDoc}) ->
+ {ok, Info} = couch_views:get_info(Db, DDoc),
+ ?assert(is_binary(prop(update_seq, Info))).
+
+
+update_opts_is_bin_list({Db, DDoc}) ->
+ {ok, Info} = couch_views:get_info(Db, DDoc),
+ Opts = prop(update_options, Info),
+ ?assert(is_list(Opts) andalso
+ (Opts == [] orelse lists:all([is_binary(B) || B <- Opts]))).
+
+
+check_non_neg_int(Key, Info) ->
+ Size = prop(Key, Info),
+ is_integer(Size) andalso Size >= 0.
+
+
+prop(Key, {Props}) when is_list(Props) ->
+ prop(Key, Props);
+
+prop([Key], Info) ->
+ prop(Key, Info);
+
+prop([Key | Rest], Info) ->
+ prop(Rest, prop(Key, Info));
+
+prop(Key, Info) when is_atom(Key), is_list(Info) ->
+ couch_util:get_value(Key, Info).
+
+
+create_ddoc() ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, <<"_design/bar">>},
+ {<<"views">>, {[
+ {?MAP_FUN1, {[
+ {<<"map">>, <<"function(doc) {emit(doc.val, doc.val);}">>}
+ ]}}
+ ]}}
+ ]}).
+
+
+doc(Id, Val) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, list_to_binary(integer_to_list(Id))},
+ {<<"val">>, Val}
+ ]}).
+
+
+fold_fun({meta, _Meta}, Acc) ->
+ {ok, Acc};
+
+fold_fun({row, _} = Row, Acc) ->
+ {ok, [Row | Acc]};
+
+fold_fun(complete, Acc) ->
+ {ok, lists:reverse(Acc)}.
+
+
+run_query(#{} = Db, DDoc, <<_/binary>> = View) ->
+ couch_views:query(Db, DDoc, View, fun fold_fun/2, [], #mrargs{}).
diff --git a/src/couch_views/test/couch_views_map_test.erl b/src/couch_views/test/couch_views_map_test.erl
new file mode 100644
index 000000000..c419546e1
--- /dev/null
+++ b/src/couch_views/test/couch_views_map_test.erl
@@ -0,0 +1,610 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_map_test).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include("couch_views.hrl").
+
+
+-define(TDEF(A), {atom_to_list(A), fun A/0}).
+
+
+setup() ->
+ test_util:start_couch([
+ fabric,
+ couch_jobs,
+ couch_js,
+ couch_views
+ ]).
+
+
+teardown(State) ->
+ test_util:stop_couch(State).
+
+
+map_views_test_() ->
+ {
+ "Map views",
+ {
+ setup,
+ fun setup/0,
+ fun teardown/1,
+ [
+ ?TDEF(should_map),
+ ?TDEF(should_map_with_startkey),
+ ?TDEF(should_map_with_endkey),
+ ?TDEF(should_map_with_endkey_not_inclusive),
+ ?TDEF(should_map_reverse_and_limit),
+ ?TDEF(should_map_with_range_reverse),
+ ?TDEF(should_map_with_limit_and_skip),
+ ?TDEF(should_map_with_limit_and_skip_reverse),
+ ?TDEF(should_map_with_include_docs),
+ ?TDEF(should_map_with_include_docs_reverse),
+ ?TDEF(should_map_with_startkey_with_key_array),
+ ?TDEF(should_map_with_startkey_and_endkey_with_key_array),
+ ?TDEF(should_map_empty_views),
+ ?TDEF(should_map_duplicate_keys),
+ ?TDEF(should_map_with_doc_emit),
+ ?TDEF(should_map_update_is_false),
+ ?TDEF(should_map_update_is_lazy),
+ ?TDEF(should_map_wait_for_interactive),
+ ?TDEF(should_map_local_seq)
+ % fun should_give_ext_size_seq_indexed_test/1
+ ]
+ }
+ }.
+
+
+should_map() ->
+ Result = run_query(<<"baz">>, #{}),
+ Expect = {ok, [
+ {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
+ {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
+ {row, [{id, <<"7">>}, {key, 7}, {value, 7}]},
+ {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
+ {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
+ {row, [{id, <<"10">>}, {key, 10}, {value, 10}]}
+ ]},
+ ?assertEqual(Expect, Result).
+
+
+should_map_with_startkey() ->
+ Result = run_query(<<"baz">>, #{start_key => 4}),
+ Expect = {ok, [
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
+ {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
+ {row, [{id, <<"7">>}, {key, 7}, {value, 7}]},
+ {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
+ {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
+ {row, [{id, <<"10">>}, {key, 10}, {value, 10}]}
+ ]},
+ ?assertEqual(Expect, Result).
+
+
+should_map_with_endkey() ->
+ Result = run_query(<<"baz">>, #{end_key => 5}),
+ Expect = {ok, [
+ {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
+ ?assertEqual(Expect, Result).
+
+
+should_map_with_endkey_not_inclusive() ->
+ Result = run_query(<<"baz">>, #{
+ end_key => 5,
+ inclusive_end => false
+ }),
+ Expect = {ok, [
+ {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}
+ ]},
+ ?assertEqual(Expect, Result).
+
+
+should_map_reverse_and_limit() ->
+ Result = run_query(<<"baz">>, #{
+ direction => rev,
+ limit => 3
+ }),
+ Expect = {ok, [
+ {row, [{id, <<"10">>}, {key, 10}, {value, 10}]},
+ {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
+ {row, [{id, <<"8">>}, {key, 8}, {value, 8}]}
+ ]},
+ ?assertEqual(Expect, Result).
+
+
+should_map_with_range_reverse() ->
+ Result = run_query(<<"baz">>, #{
+ direction => rev,
+ start_key => 5,
+ end_key => 3,
+ inclusive_end => true
+ }),
+ Expect = {ok, [
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}
+ ]},
+ ?assertEqual(Expect, Result).
+
+
+should_map_with_limit_and_skip() ->
+ Result = run_query(<<"baz">>, #{
+ start_key => 2,
+ limit => 3,
+ skip => 3
+ }),
+ Expect = {ok, [
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
+ {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
+ {row, [{id, <<"7">>}, {key, 7}, {value, 7}]}
+ ]},
+ ?assertEqual(Expect, Result).
+
+
+should_map_with_limit_and_skip_reverse() ->
+ Result = run_query(<<"baz">>, #{
+ start_key => 10,
+ limit => 3,
+ skip => 3,
+ direction => rev
+ }),
+ Expect = {ok, [
+ {row, [{id, <<"7">>}, {key, 7}, {value, 7}]},
+ {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
+ ?assertEqual(Expect, Result).
+
+
+should_map_with_include_docs() ->
+ Result = run_query(<<"baz">>, #{
+ start_key => 8,
+ end_key => 8,
+ include_docs => true
+ }),
+ Doc = {[
+ {<<"_id">>, <<"8">>},
+ {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
+ {<<"val">>, 8}
+ ]},
+ Expect = {ok, [
+ {row, [{id, <<"8">>}, {key, 8}, {value, 8}, {doc, Doc}]}
+ ]},
+ ?assertEqual(Expect, Result).
+
+
+should_map_with_include_docs_reverse() ->
+ Result = run_query(<<"baz">>, #{
+ start_key => 8,
+ end_key => 8,
+ include_docs => true,
+ direction => rev
+ }),
+ Doc = {[
+ {<<"_id">>, <<"8">>},
+ {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
+ {<<"val">>, 8}
+ ]},
+ Expect = {ok, [
+ {row, [{id, <<"8">>}, {key, 8}, {value, 8}, {doc, Doc}]}
+ ]},
+ ?assertEqual(Expect, Result).
+
+
+should_map_with_startkey_with_key_array() ->
+ Rows = [
+ {row, [{id, <<"4">>}, {key, [<<"4">>, 4]}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, [<<"5">>, 5]}, {value, 5}]},
+ {row, [{id, <<"6">>}, {key, [<<"6">>, 6]}, {value, 6}]},
+ {row, [{id, <<"7">>}, {key, [<<"7">>, 7]}, {value, 7}]},
+ {row, [{id, <<"8">>}, {key, [<<"8">>, 8]}, {value, 8}]},
+ {row, [{id, <<"9">>}, {key, [<<"9">>, 9]}, {value, 9}]}
+ ],
+
+ Result = run_query(<<"boom">>, #{
+ start_key => [<<"4">>]
+ }),
+
+ ?assertEqual({ok, Rows}, Result),
+
+ ResultRev = run_query(<<"boom">>, #{
+ start_key => [<<"9">>, 9],
+ direction => rev,
+ limit => 6
+ }),
+
+ ?assertEqual({ok, lists:reverse(Rows)}, ResultRev).
+
+
+should_map_with_startkey_and_endkey_with_key_array() ->
+ Rows1 = [
+ {row, [{id, <<"4">>}, {key, [<<"4">>, 4]}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, [<<"5">>, 5]}, {value, 5}]},
+ {row, [{id, <<"6">>}, {key, [<<"6">>, 6]}, {value, 6}]},
+ {row, [{id, <<"7">>}, {key, [<<"7">>, 7]}, {value, 7}]},
+ {row, [{id, <<"8">>}, {key, [<<"8">>, 8]}, {value, 8}]}
+ ],
+
+ Rows2 = [
+ {row, [{id, <<"4">>}, {key, [<<"4">>, 4]}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, [<<"5">>, 5]}, {value, 5}]},
+ {row, [{id, <<"6">>}, {key, [<<"6">>, 6]}, {value, 6}]},
+ {row, [{id, <<"7">>}, {key, [<<"7">>, 7]}, {value, 7}]},
+ {row, [{id, <<"8">>}, {key, [<<"8">>, 8]}, {value, 8}]},
+ {row, [{id, <<"9">>}, {key, [<<"9">>, 9]}, {value, 9}]}
+ ],
+
+ Result = run_query(<<"boom">>, #{
+ start_key => [<<"4">>],
+ end_key => [<<"8">>, []]
+ }),
+
+ ?assertEqual({ok, Rows1}, Result),
+
+ ResultRev = run_query(<<"boom">>, #{
+ start_key => [<<"8">>, []],
+ end_key => [<<"4">>],
+ direction => rev
+ }),
+
+ ?assertEqual({ok, lists:reverse(Rows1)}, ResultRev),
+
+ ResultRev2 = run_query(<<"boom">>, #{
+ start_key => [<<"9">>, 9],
+ end_key => [<<"4">>],
+ direction => rev,
+ inclusive_end => false
+ }),
+
+ % Here, [<<"4">>] is less than [<<"4">>, 4] so we
+ % expect rows 9-4
+ ?assertEqual({ok, lists:reverse(Rows2)}, ResultRev2),
+
+ ResultRev3 = run_query(<<"boom">>, #{
+ start_key => [<<"9">>, 9],
+ end_key => [<<"4">>, 4],
+ direction => rev,
+ inclusive_end => false
+ }),
+
+ % Here, specifying [<<"4">>, 4] as the key will prevent
+ % us from including that row which leaves rows 9-5
+ ?assertEqual({ok, lists:reverse(lists:nthtail(1, Rows2))}, ResultRev3).
+
+
+should_map_empty_views() ->
+ Result = run_query(<<"bing">>, #{}),
+ Expect = {ok, []},
+ ?assertEqual(Expect, Result).
+
+
+should_map_with_doc_emit() ->
+ Result = run_query(<<"doc_emit">>, #{
+ start_key => 8,
+ limit => 1
+ }),
+ Doc = {[
+ {<<"_id">>, <<"8">>},
+ {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
+ {<<"val">>, 8}
+ ]},
+ Expect = {ok, [
+ {row, [{id, <<"8">>}, {key, 8}, {value, Doc}]}
+ ]},
+ ?assertEqual(Expect, Result).
+
+
+should_map_duplicate_keys() ->
+ Result = run_query(<<"duplicate_keys">>, #{
+ limit => 6
+ }),
+ Expect = {ok, [
+ {row, [{id, <<"1">>}, {key, <<"1">>}, {value, 1}]},
+ {row, [{id, <<"1">>}, {key, <<"1">>}, {value, 2}]},
+ {row, [{id, <<"10">>}, {key, <<"10">>}, {value, 10}]},
+ {row, [{id, <<"10">>}, {key, <<"10">>}, {value, 11}]},
+ {row, [{id, <<"2">>}, {key, <<"2">>}, {value, 2}]},
+ {row, [{id, <<"2">>}, {key, <<"2">>}, {value, 3}]}
+ ]},
+ ?assertEqual(Expect, Result).
+
+
+should_map_update_is_false() ->
+ Expect = {ok, [
+ {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
+ {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
+ {row, [{id, <<"10">>}, {key, 10}, {value, 10}]}
+ ]},
+
+ Expect1 = {ok, [
+ {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
+ {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
+ {row, [{id, <<"10">>}, {key, 10}, {value, 10}]},
+ {row, [{id, <<"11">>}, {key, 11}, {value, 11}]}
+ ]},
+
+ Idx = <<"baz">>,
+ DbName = ?tempdb(),
+
+ {ok, Db} = fabric2_db:create(DbName, [{user_ctx, ?ADMIN_USER}]),
+
+ DDoc = create_ddoc(),
+ Docs = make_docs(10),
+ fabric2_db:update_docs(Db, [DDoc | Docs]),
+
+ Args1 = #{
+ start_key => 8
+ },
+
+ Result1 = couch_views:query(Db, DDoc, Idx, fun default_cb/2,
+ [], Args1),
+ ?assertEqual(Expect, Result1),
+
+ Doc = doc(11),
+ fabric2_db:update_doc(Db, Doc),
+
+ Args2 = #{
+ start_key => 8,
+ update => false
+ },
+
+ Result2 = couch_views:query(Db, DDoc, Idx, fun default_cb/2,
+ [], Args2),
+ ?assertEqual(Expect, Result2),
+
+ Result3 = couch_views:query(Db, DDoc, Idx, fun default_cb/2,
+ [], Args1),
+ ?assertEqual(Expect1, Result3).
+
+
+should_map_update_is_lazy() ->
+ Expect = {ok, [
+ {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
+ {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
+ {row, [{id, <<"10">>}, {key, 10}, {value, 10}]}
+ ]},
+
+ Idx = <<"baz">>,
+ DbName = ?tempdb(),
+
+ {ok, Db} = fabric2_db:create(DbName, [{user_ctx, ?ADMIN_USER}]),
+
+ DDoc = create_ddoc(),
+ Docs = make_docs(10),
+
+ fabric2_db:update_docs(Db, [DDoc | Docs]),
+
+ Args1 = #{
+ start_key => 8,
+ update => lazy
+ },
+
+ Result1 = couch_views:query(Db, DDoc, Idx, fun default_cb/2,
+ [], Args1),
+ ?assertEqual({ok, []}, Result1),
+
+ {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+ JobId = couch_views_jobs:job_id(Db, Mrst),
+ UpdateSeq = fabric2_db:get_update_seq(Db),
+ ok = couch_views_jobs:wait_for_job(JobId, DDoc#doc.id, UpdateSeq),
+
+ Args2 = #{
+ start_key => 8,
+ update => false
+ },
+
+ Result2 = couch_views:query(Db, DDoc, Idx, fun default_cb/2,
+ [], Args2),
+ ?assertEqual(Expect, Result2).
+
+
+should_map_wait_for_interactive() ->
+ DbName = ?tempdb(),
+ {ok, Db} = fabric2_db:create(DbName, [{user_ctx, ?ADMIN_USER}]),
+
+ DDoc = create_interactive_ddoc(),
+ Docs = make_docs(101),
+
+ fabric2_db:update_docs(Db, Docs),
+ fabric2_db:update_docs(Db, [DDoc]),
+
+ Result = couch_views:query(Db, DDoc, <<"idx_01">>, fun default_cb/2, [],
+ #{limit => 3}),
+ ?assertEqual({ok, [
+ {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}
+ ]}, Result).
+
+
+should_map_local_seq() ->
+ ExpectedTrue = [
+ {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}
+ ],
+ check_local_seq(true, ExpectedTrue),
+
+ ExpectedFalse = [],
+ check_local_seq(false, ExpectedFalse),
+
+ Error = {bad_request,invalid_design_doc,
+ <<"`options.local_seq` field must have boolean type">>},
+ ?assertThrow(Error, check_local_seq(something_else, null)).
+
+
+check_local_seq(Val, Expected) ->
+ DbName = ?tempdb(),
+ {ok, Db} = fabric2_db:create(DbName, [{user_ctx, ?ADMIN_USER}]),
+
+ DDoc = create_local_seq_ddoc(Val),
+ Docs = make_docs(5),
+ fabric2_db:update_docs(Db, [DDoc | Docs]),
+
+ {ok, Result} = couch_views:query(Db, DDoc, <<"idx_01">>, fun default_cb/2, [],
+ #{limit => 3}),
+
+ ?assertEqual(Expected, Result).
+
+
+% should_give_ext_size_seq_indexed_test(Db) ->
+% DDoc = couch_doc:from_json_obj({[
+% {<<"_id">>, <<"_design/seqdoc">>},
+% {<<"options">>, {[{<<"seq_indexed">>, true}]}},
+% {<<"views">>, {[
+% {<<"view1">>, {[
+% {<<"map">>, <<"function(doc){emit(doc._id, doc._id);}">>}
+% ]}}
+% ]}
+% }
+% ]}),
+% {ok, _} = couch_db:update_doc(Db, DDoc, []),
+% {ok, Db1} = couch_db:open_int(couch_db:name(Db), []),
+% {ok, DDoc1} = couch_db:open_doc(Db1, <<"_design/seqdoc">>, [ejson_body]),
+% couch_mrview:query_view(Db1, DDoc1, <<"view1">>, [{update, true}]),
+% {ok, Info} = couch_mrview:get_info(Db1, DDoc),
+% Size = couch_util:get_nested_json_value({Info}, [sizes, external]),
+% ok = couch_db:close(Db1),
+% ?assert(is_number(Size)).
+
+
+run_query(Idx, Args) ->
+ run_query(Idx, Args, false).
+
+
+run_query(Idx, Args, DebugCluster) ->
+ DbName = ?tempdb(),
+ {ok, Db} = fabric2_db:create(DbName, [{user_ctx, ?ADMIN_USER}]),
+ DDoc = create_ddoc(),
+ Docs = make_docs(10),
+ fabric2_db:update_docs(Db, [DDoc | Docs]),
+ if not DebugCluster -> ok; true ->
+ couch_views:query(Db, DDoc, Idx, fun default_cb/2, [], #{}),
+ fabric2_fdb:debug_cluster(),
+ ok
+ end,
+ couch_views:query(Db, DDoc, Idx, fun default_cb/2, [], Args).
+
+
+default_cb(complete, Acc) ->
+ {ok, lists:reverse(Acc)};
+default_cb({final, Info}, []) ->
+ {ok, [Info]};
+default_cb({final, _}, Acc) ->
+ {ok, Acc};
+default_cb({meta, _}, Acc) ->
+ {ok, Acc};
+default_cb(ok, ddoc_updated) ->
+ {ok, ddoc_updated};
+default_cb(Row, Acc) ->
+ {ok, [Row | Acc]}.
+
+
+create_ddoc() ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, <<"_design/bar">>},
+ {<<"views">>, {[
+ {<<"baz">>, {[
+ {<<"map">>, <<"function(doc) {emit(doc.val, doc.val);}">>}
+ ]}},
+ {<<"boom">>, {[
+ {<<"map">>, <<
+ "function(doc) {\n"
+ " emit([doc.val.toString(), doc.val], doc.val);\n"
+ "}"
+ >>}
+ ]}},
+ {<<"bing">>, {[
+ {<<"map">>, <<"function(doc) {}">>}
+ ]}},
+ {<<"doc_emit">>, {[
+ {<<"map">>, <<"function(doc) {emit(doc.val, doc)}">>}
+ ]}},
+ {<<"duplicate_keys">>, {[
+ {<<"map">>, <<
+ "function(doc) {\n"
+ " emit(doc._id, doc.val);\n"
+ " emit(doc._id, doc.val + 1);\n"
+ "}">>}
+ ]}},
+ {<<"zing">>, {[
+ {<<"map">>, <<
+ "function(doc) {\n"
+ " if(doc.foo !== undefined)\n"
+ " emit(doc.foo, 0);\n"
+ "}"
+ >>}
+ ]}}
+ ]}}
+ ]}).
+
+create_interactive_ddoc() ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, <<"_design/ddoc_interactive">>},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>, {[
+ {<<"idx_01">>, {[
+ {<<"map">>, <<
+ "function(doc) {"
+ "if (doc.val) {"
+ "emit(doc.val, doc.val);"
+ "}"
+ "}">>}
+ ]}}
+ ]}},
+ {<<"autoupdate">>, false},
+ {<<"interactive">>, true}
+ ]}).
+
+
+create_local_seq_ddoc(Val) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, <<"_design/ddoc_local_seq">>},
+ {<<"options">>, {[{<<"local_seq">>, Val}]}},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>, {[
+ {<<"idx_01">>, {[
+ {<<"map">>, <<
+ "function(doc) {"
+ "if (doc._local_seq) {"
+ "emit(doc.val, doc.val);"
+ "}"
+ "}">>}
+ ]}}
+ ]}}
+ ]}).
+
+
+make_docs(Count) ->
+ [doc(I) || I <- lists:seq(1, Count)].
+
+
+doc(Id) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, list_to_binary(integer_to_list(Id))},
+ {<<"val">>, Id}
+ ]}).
diff --git a/src/couch_views/test/couch_views_server_test.erl b/src/couch_views/test/couch_views_server_test.erl
new file mode 100644
index 000000000..23c807cc2
--- /dev/null
+++ b/src/couch_views/test/couch_views_server_test.erl
@@ -0,0 +1,218 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_server_test).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("fabric/test/fabric2_test.hrl").
+
+
+couch_views_server_test_() ->
+ {
+ "Test couch_views_server",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ {
+ foreach,
+ fun foreach_setup/0,
+ fun foreach_teardown/1,
+ [
+ ?TDEF_FE(max_acceptors_started),
+ ?TDEF_FE(acceptors_become_workers),
+ ?TDEF_FE(handle_worker_death),
+ ?TDEF_FE(handle_acceptor_death),
+ ?TDEF_FE(handle_unknown_process_death),
+ ?TDEF_FE(max_workers_limit_works),
+ ?TDEF_FE(max_acceptors_greater_than_max_workers)
+ ]
+ }
+ }
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([
+ fabric,
+ couch_jobs,
+ couch_rate,
+ couch_js,
+ couch_eval
+ ]),
+ Ctx.
+
+
+cleanup(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+
+foreach_setup() ->
+ config:set("couch_views", "max_acceptors", "2", false),
+ config:set("couch_views", "max_workers", "4", false),
+ meck:new(couch_views_server, [passthrough]),
+ meck:new(couch_views_indexer, [passthrough]),
+ meck:expect(couch_views_indexer, init, fun() ->
+ receive pls_accept -> ok end,
+ couch_views_server:accepted(self()),
+ receive pls_die -> ok end
+ end),
+ ok = application:start(couch_views).
+
+
+foreach_teardown(_) ->
+ ok = application:stop(couch_views),
+ meck:unload(),
+ config:delete("couch_views", "max_acceptors", false),
+ config:delete("couch_views", "max_workers", false),
+ ok.
+
+
+max_acceptors_started(_) ->
+ #{max_acceptors := MaxAcceptors, max_workers := MaxWorkers} = get_state(),
+ ?assertEqual(2, MaxAcceptors),
+ ?assertEqual(4, MaxWorkers),
+
+ ?assertEqual(0, maps:size(workers())),
+
+ [Pid1, Pid2] = maps:keys(acceptors()),
+ ?assert(is_pid(Pid1)),
+ ?assert(is_pid(Pid2)),
+ ?assert(is_process_alive(Pid1)),
+ ?assert(is_process_alive(Pid2)).
+
+
+acceptors_become_workers(_) ->
+ ?assertEqual(0, maps:size(workers())),
+
+ InitAcceptors = acceptors(),
+ accept_all(),
+
+ ?assertEqual(2, maps:size(acceptors())),
+ ?assertEqual(2, maps:size(workers())),
+
+ ?assertEqual(InitAcceptors, workers()).
+
+
+handle_worker_death(_) ->
+ [Pid1, Pid2] = maps:keys(acceptors()),
+ accept_all(),
+
+ % One worker exits normal
+ finish_normal([Pid1]),
+ ?assertEqual(2, maps:size(acceptors())),
+ ?assertEqual(1, maps:size(workers())),
+
+ % The other blows up with an error
+ finish_error([Pid2]),
+ ?assertEqual(2, maps:size(acceptors())),
+ ?assertEqual(0, maps:size(workers())).
+
+
+handle_acceptor_death(_) ->
+ [Pid1, Pid2] = maps:keys(acceptors()),
+ finish_error([Pid1]),
+
+ NewAcceptors = acceptors(),
+ ?assertEqual(2, maps:size(NewAcceptors)),
+ ?assert(lists:member(Pid2, maps:keys(NewAcceptors))),
+ ?assert(not lists:member(Pid1, maps:keys(NewAcceptors))).
+
+
+handle_unknown_process_death(_) ->
+ meck:reset(couch_views_server),
+ Pid = self(),
+ whereis(couch_views_server) ! {'EXIT', Pid, blah},
+ meck:wait(1, couch_views_server, terminate,
+ [{unknown_pid_exit, Pid}, '_'], 5000).
+
+
+max_workers_limit_works(_) ->
+ % Accept 2 jobs -> 2 workers
+ accept_all(),
+ ?assertEqual(2, maps:size(workers())),
+
+ % Accept 2 more jobs -> 4 workers
+ accept_all(),
+ ?assertEqual(0, maps:size(acceptors())),
+ ?assertEqual(4, maps:size(workers())),
+
+ % Kill 1 worker -> 1 acceptor and 3 workers
+ [Worker1 | _] = maps:keys(workers()),
+ finish_normal([Worker1]),
+ ?assertEqual(1, maps:size(acceptors())),
+ ?assertEqual(3, maps:size(workers())),
+
+ % Kill 2 more workers -> 2 acceptors and 1 worker
+ [Worker2, Worker3 | _] = maps:keys(workers()),
+ finish_normal([Worker2, Worker3]),
+ ?assertEqual(2, maps:size(acceptors())),
+ ?assertEqual(1, maps:size(workers())),
+
+ % Kill 1 last worker -> 2 acceptors and 0 workers
+ [Worker4] = maps:keys(workers()),
+ finish_normal([Worker4]),
+ ?assertEqual(2, maps:size(acceptors())),
+ ?assertEqual(0, maps:size(workers())).
+
+max_acceptors_greater_than_max_workers(_) ->
+ [Pid1, Pid2] = maps:keys(acceptors()),
+
+ sys:replace_state(couch_views_server, fun(#{} = St) ->
+ St#{max_workers := 1}
+ end),
+
+ accept_all(),
+
+ finish_normal([Pid1]),
+ finish_normal([Pid2]),
+
+ % Only 1 acceptor should start as it is effectively limited by max_workers
+ ?assertEqual(1, maps:size(acceptors())),
+ ?assertEqual(0, maps:size(workers())).
+
+
+% Utility functions
+
+accept_all() ->
+ Acceptors = acceptors(),
+ meck:reset(couch_views_server),
+ [Pid ! pls_accept || Pid <- maps:keys(Acceptors)],
+ meck:wait(maps:size(Acceptors), couch_views_server, handle_call, 3, 5000).
+
+
+acceptors() ->
+ #{acceptors := Acceptors} = get_state(),
+ Acceptors.
+
+
+workers() ->
+ #{workers := Workers} = get_state(),
+ Workers.
+
+
+get_state() ->
+ sys:get_state(couch_views_server, infinity).
+
+
+finish_normal(Workers) when is_list(Workers) ->
+ meck:reset(couch_views_server),
+ [Pid ! pls_die || Pid <- Workers],
+ meck:wait(length(Workers), couch_views_server, handle_info,
+ [{'_', '_', normal}, '_'], 5000).
+
+
+finish_error(Workers) when is_list(Workers) ->
+ meck:reset(couch_views_server),
+ [exit(Pid, badness) || Pid <- Workers],
+ meck:wait(length(Workers), couch_views_server, handle_info,
+ [{'_', '_', badness}, '_'], 5000).
diff --git a/src/couch_views/test/couch_views_size_test.erl b/src/couch_views/test/couch_views_size_test.erl
new file mode 100644
index 000000000..18fa9e628
--- /dev/null
+++ b/src/couch_views/test/couch_views_size_test.erl
@@ -0,0 +1,564 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_size_test).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("fabric/include/fabric2.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
+
+% N.B., we should move to couch_ejson_size instead
+% of erlang:external_size
+%
+% to calculate view size:
+% total = 0
+% for (fdb_k, fdb_v) in VIEW_MAP_RANGE:
+% {EncUserKey, EncUserval} = erlfdb_tuple:unpack(fdb_v),
+% UserKey = couch_views_encoding:decode(EncUserKey),
+% UserVal = couch_views_encoding:decode(EncUserVal),
+% total += erlang:external_size(UserKey),
+% total += erlang:external_size(UserVal)
+%
+% Our goal in checking the size calculations is that we cover
+% as much of the possible key mutation space as possible while
+% not relying on fuzzing out the edge cases. Conceptually we have
+% two sets of keys E and U. E is keys as currently exist in the
+% view, and U is the new set of keys corresponding to an update.
+%
+% Both sets E and U have the same possible set of state variables:
+%
+% 1. N unique keys, where 0 =< N =< infinity
+% 2. D keys with duplicates, where 0 =< D =< N,
+% 3. R repeats for each member of D, for 2 =< R =< infinity
+%
+% Given two sets S1 and S2, we then have a set of transition variables:
+%
+% 1. deltaN - shared unique keys, where 0 =< deltaN =< N
+% 2. deltaD - shared duplicates, where 0 =< deltaD =< N
+% 3. deltaR - shared repeats for each D, where 2 =< deltaR =< infinity
+%
+% To search our state transition space, we can create two functions to
+% first define our start and end states, and for each transition we have
+% a function that defines the shared overlap between states.
+%
+% Given a list of transitions are checks then become simple in that
+% we can iterate over each transition checking that our index is valid
+% after each one. Index validation will purely look at the existing
+% state of the index in fdb and validate correctness.
+
+-define(NUM_SINGLE_TESTS, 100).
+-define(NUM_MULTI_TESTS, 100).
+
+-define(N_DOMAIN, [0, 1, 2, 5]).
+-define(D_DOMAIN, [0, 1, 2, 5]).
+-define(R_DOMAIN, [2, 4]).
+
+-define(DELTA_N_DOMAIN, [0, 1, 2, 5]).
+-define(DELTA_D_DOMAIN, [0, 1, 2, 5]).
+-define(DELTA_R_DOMAIN, [1, 2, 4]).
+
+
+generate_sets() ->
+ permute(?N_DOMAIN, ?D_DOMAIN, ?R_DOMAIN, fun(N, D, R) ->
+ % We can't have more duplicates than total keys
+ case D > N of
+ true -> throw(skip);
+ false -> ok
+ end,
+
+ % Only include one of the repeat values
+ % for our zero sets
+ case D == 0 of
+ true when R == 2 -> ok;
+ true -> throw(skip);
+ false -> ok
+ end,
+
+ % Replace R with a sentinel value for sanity
+ % when there are no dupes to have repeats
+ ActualR = if D == 0 -> 0; true -> R end,
+
+ {N, D, ActualR}
+ end).
+
+
+generate_transitions() ->
+ Sets = generate_sets(),
+ Pairs = [{Set1, Set2} || Set1 <- Sets, Set2 <- Sets],
+ lists:flatmap(fun({{N1, D1, _R1} = S1, {N2, D2, _R2} = S2}) ->
+ Filter = fun(DeltaN, DeltaD, DeltaR) ->
+ % Can't share more keys than the smaller of the
+ % two sets
+ case DeltaN > min(N1, N2) of
+ true -> throw(skip);
+ false -> ok
+ end,
+
+ % For DeltaD == 0, all combinations of DeltaD and
+ % DeltaR are equivalent tests
+ case DeltaN == 0 of
+ true when DeltaD == 0, DeltaR == 1 -> ok;
+ true -> throw(skip);
+ false -> ok
+ end,
+
+ % Can't share more dupes than exist in either set
+ % or the total number of shared keys
+ case DeltaD > min(D1, D2) orelse DeltaD > DeltaN of
+ true -> throw(skip);
+ false -> ok
+ end,
+
+ % For DeltaD == 0, all DeltaR correspond to the
+ % same test so only include one instance
+ case DeltaD == 0 of
+ true when DeltaR == 1 -> ok;
+ true -> throw(skip);
+ false -> ok
+ end,
+
+ % If we have more non-repeated keys in our
+ % transition than there's "room" for in the target
+ % set it isn't a valid test case.
+ TransitionNonRepeats = DeltaN - DeltaD,
+ TargetNonRepeats = N2 - D2,
+ case TransitionNonRepeats > TargetNonRepeats of
+ true -> throw(skip);
+ false -> ok
+ end,
+
+ {S1, S2, {DeltaN, DeltaD, DeltaR}}
+ end,
+ permute(?DELTA_N_DOMAIN, ?DELTA_D_DOMAIN, ?DELTA_R_DOMAIN, Filter)
+ end, Pairs).
+
+
+permute(NList, DList, RList, Filter) ->
+ % Technically we could call into Filter in each
+ % outer loops to conditionally skip inner loops.
+ % If someone comes along looking to speed up the
+ % fixture setup time, this would likely be an
+ % easy win.
+ lists:foldl(fun(N, NAcc) ->
+ lists:foldl(fun(D, DAcc) ->
+ lists:foldl(fun(R, RAcc) ->
+ try
+ [Filter(N, D, R) | RAcc]
+ catch throw:skip ->
+ RAcc
+ end
+ end, DAcc, RList)
+ end, NAcc, DList)
+ end, [], NList).
+
+
+row_transition_test_() ->
+ {
+ "Test view size tracking",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ fun create_transition_tests/1
+ }
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([
+ fabric,
+ couch_jobs,
+ couch_js,
+ couch_views
+ ]),
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ {Ctx, Db}.
+
+
+cleanup({Ctx, Db}) ->
+ ok = fabric2_db:delete(fabric2_db:name(Db), []),
+ test_util:stop_couch(Ctx).
+
+
+create_transition_tests({_Ctx, Db}) ->
+ Transitions = generate_transitions(),
+ Single = lists:flatmap(fun(T) ->
+ Name = lists:flatten(io_lib:format("single ~s", [tname(T)])),
+ [{Name, fun() -> check_single_transition(Db, T) end}]
+ end, lists:sort(Transitions)),
+ Multi = lists:flatmap(fun(T) ->
+ Name = lists:flatten(io_lib:format("multi ~s", [tname(T)])),
+ [{Name, fun() -> check_multi_transition(Db, T) end}]
+ end, lists:sort(group(shuffle(Transitions)))),
+ subset(?NUM_SINGLE_TESTS, Single) ++ subset(?NUM_MULTI_TESTS, Multi).
+
+
+check_single_transition(Db, {Set1, Set2, Transition}) ->
+ clear_views(Db),
+ InitKVs = init_set(Set1, [a, b, c, d, e]),
+ CommonKVs = reduce_set(Transition, InitKVs),
+ FinalKVs = fill_set(Set2, CommonKVs, [v, w, x, y, z]),
+ {InitJSONKVs, Bindings} = unlabel(InitKVs, #{}),
+ {FinalJSONKVs, _} = unlabel(FinalKVs, Bindings),
+
+ Sig = couch_uuids:random(),
+ DocId = couch_uuids:random(),
+
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ write_docs(TxDb, Sig, [make_doc(DocId, InitJSONKVs)])
+ end),
+
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ write_docs(TxDb, Sig, [make_doc(DocId, FinalJSONKVs)])
+ end),
+
+ validate_index(Db, Sig, #{DocId => FinalJSONKVs}).
+
+
+check_multi_transition(Db, Transitions) ->
+ clear_views(Db),
+
+ {Docs, IdMap} = lists:mapfoldl(fun({Set1, Set2, Transition}, IdMapAcc) ->
+ DocId = couch_uuids:random(),
+ InitKVs = init_set(Set1, [a, b, c, d, e]),
+ CommonKVs = reduce_set(Transition, InitKVs),
+ FinalKVs = fill_set(Set2, CommonKVs, [v, w, x, y, z]),
+ {InitJSONKVs, Bindings} = unlabel(InitKVs, #{}),
+ {FinalJSONKVs, _} = unlabel(FinalKVs, Bindings),
+ InitDoc = make_doc(DocId, InitJSONKVs),
+ FinalDoc = make_doc(DocId, FinalJSONKVs),
+ {{InitDoc, FinalDoc}, maps:put(DocId, FinalJSONKVs, IdMapAcc)}
+ end, #{}, Transitions),
+
+ {InitDocs, FinalDocs} = lists:unzip(Docs),
+
+ Sig = couch_uuids:random(),
+
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ write_docs(TxDb, Sig, InitDocs)
+ end),
+
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ write_docs(TxDb, Sig, FinalDocs)
+ end),
+
+ validate_index(Db, Sig, IdMap).
+
+
+clear_views(Db) ->
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+ {Start, End} = erlfdb_tuple:range({?DB_VIEWS}, DbPrefix),
+ erlfdb:clear_range(Tx, Start, End),
+
+ GlobalKey = {?DB_STATS, <<"sizes">>, <<"views">>},
+ BinGlobalKey = erlfdb_tuple:pack(GlobalKey, DbPrefix),
+ erlfdb:set(Tx, BinGlobalKey, ?uint2bin(0))
+ end).
+
+
+write_docs(TxDb, Sig, Docs) ->
+ Mrst = #mrst{
+ sig = Sig,
+ views = [#mrview{
+ id_num = 1
+ }]
+ },
+ IdxState = #{
+ last_seq => <<"foo">>
+ },
+ couch_views_indexer:write_docs(TxDb, Mrst, Docs, IdxState).
+
+
+validate_index(Db, Sig, JSONRows) ->
+ #{
+ db_prefix := DbPrefix
+ } = Db,
+ Rows = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ #{
+ tx := Tx
+ } = TxDb,
+ {Start, End} = erlfdb_tuple:range({?DB_VIEWS}, DbPrefix),
+ erlfdb:get_range(Tx, Start, End)
+ end),
+
+ InitAcc = #{
+ row_count => 0,
+ kv_size => 0,
+ ids => #{},
+ rows => []
+ },
+
+ MapData = lists:foldl(fun({Key, Value}, Acc) ->
+ case erlfdb_tuple:unpack(Key, DbPrefix) of
+ {?DB_VIEWS, ?VIEW_INFO, ?VIEW_UPDATE_SEQ, Sig} ->
+ ?assertEqual(<<"foo">>, Value),
+ Acc;
+ {?DB_VIEWS, ?VIEW_INFO, ?VIEW_ROW_COUNT, Sig, 1} ->
+ maps:put(row_count, ?bin2uint(Value), Acc);
+ {?DB_VIEWS, ?VIEW_INFO, ?VIEW_KV_SIZE, Sig, 1} ->
+ maps:put(kv_size, ?bin2uint(Value), Acc);
+ {?DB_VIEWS, ?VIEW_DATA, Sig, ?VIEW_ID_RANGE, DocId, 1} ->
+ [
+ TotalKeys, TotalSize, UniqueKeys
+ ] = couch_views_encoding:decode(Value),
+ maps:update_with(ids, fun(Ids) ->
+ false = maps:is_key(DocId, Ids),
+ maps:put(DocId, {TotalKeys, TotalSize, UniqueKeys}, Ids)
+ end, Acc);
+ {?DB_VIEWS, ?VIEW_DATA, Sig, ?VIEW_MAP_RANGE, 1, MapKey, _DupeId} ->
+ {EncKey, DocId} = MapKey,
+ {UserKey, UserVal} = erlfdb_tuple:unpack(Value),
+
+ UserJsonKey = couch_views_encoding:decode(UserKey),
+ UserJsonVal = couch_views_encoding:decode(UserVal),
+
+ ?assertEqual(
+ EncKey,
+ couch_views_encoding:encode(UserJsonKey, key)
+ ),
+
+ maps:update_with(rows, fun(RAcc) ->
+ [{DocId, UserJsonKey, UserJsonVal} | RAcc]
+ end, Acc)
+ end
+ end, InitAcc, Rows),
+
+ #{
+ row_count := RowCount,
+ kv_size := KVSize,
+ ids := MapIds,
+ rows := MapRows
+ } = MapData,
+
+ SumFun = fun(_DocId, {TotalKVs, TotalSize, _UniqueKeys}, {KVAcc, SAcc}) ->
+ {KVAcc + TotalKVs, SAcc + TotalSize}
+ end,
+ {SumKVCount, SumKVSize} = maps:fold(SumFun, {0, 0}, MapIds),
+ ?assertEqual(RowCount, length(MapRows)),
+ ?assertEqual(RowCount, SumKVCount),
+ ?assertEqual(KVSize, SumKVSize),
+ ?assert(KVSize >= 0),
+
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ GlobalSize = get_global_size(TxDb),
+ ?assertEqual(KVSize, GlobalSize),
+
+ ViewSize = couch_views_fdb:get_kv_size(TxDb, #mrst{sig = Sig}, 1),
+ ?assertEqual(KVSize, ViewSize)
+ end),
+
+ % Compare our raw JSON rows to what was indexed
+ IdsFromJSONRows = maps:fold(fun(DocId, DocRows, IdAcc) ->
+ FinalAcc = lists:foldl(fun({JsonKey, JsonVal}, {CAcc, SAcc, UAcc}) ->
+ KeySize = erlang:external_size(JsonKey),
+ ValSize = erlang:external_size(JsonVal),
+ NewUnique = lists:usort([JsonKey | UAcc]),
+ {CAcc + 1, SAcc + KeySize + ValSize, NewUnique}
+ end, {0, 0, []}, DocRows),
+ if FinalAcc == {0, 0, []} -> IdAcc; true ->
+ maps:put(DocId, FinalAcc, IdAcc)
+ end
+ end, #{}, JSONRows),
+ ?assertEqual(MapIds, IdsFromJSONRows),
+
+ % Compare the found id entries to our row data
+ IdsFromMapRows = lists:foldl(fun({DocId, JsonKey, JsonVal}, Acc) ->
+ KeySize = erlang:external_size(JsonKey),
+ ValSize = erlang:external_size(JsonVal),
+ Default = {1, KeySize + ValSize, [JsonKey]},
+ maps:update_with(DocId, fun({TotalKVs, TotalSize, UniqueKeys}) ->
+ NewUnique = lists:usort([JsonKey | UniqueKeys]),
+ {TotalKVs + 1, TotalSize + KeySize + ValSize, NewUnique}
+ end, Default, Acc)
+ end, #{}, MapRows),
+ ?assertEqual(MapIds, IdsFromMapRows).
+
+
+make_doc(DocId, []) ->
+ case rand:uniform() < 0.5 of
+ true ->
+ #{
+ id => DocId,
+ deleted => true,
+ results => [[]]
+ };
+ false ->
+ #{
+ id => DocId,
+ deleted => false,
+ results => [[]]
+ }
+ end;
+make_doc(DocId, Results) ->
+ #{
+ id => DocId,
+ deleted => false,
+ results => [Results]
+ }.
+
+
+get_global_size(TxDb) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+ GlobalKey = {?DB_STATS, <<"sizes">>, <<"views">>},
+ BinGlobalKey = erlfdb_tuple:pack(GlobalKey, DbPrefix),
+ ?bin2uint(erlfdb:wait(erlfdb:get(Tx, BinGlobalKey))).
+
+
+init_set({N, D, R}, Labels) ->
+ {Dupes, RestLabels} = fill_keys(D, Labels, []),
+ {Unique, _} = fill_keys(N - D, RestLabels, []),
+ % Sanity assertions
+ N = length(Unique) + length(Dupes),
+ D = length(Dupes),
+ {Unique, [{Key, R} || Key <- Dupes]}.
+
+
+reduce_set({DeltaN, DeltaD, DeltaR}, {Unique, Dupes}) ->
+ NewDupes = lists:sublist(Dupes, DeltaD),
+ NewUnique = lists:sublist(Unique, DeltaN - DeltaD),
+ {NewUnique, [{Key, DeltaR} || {Key, _} <- NewDupes]}.
+
+
+fill_set({N, D, R}, {Unique, Dupes}, Labels) ->
+ AddDupes = D - length(Dupes),
+ {NewDupes, RestLabels} = fill_keys(AddDupes, Labels, Dupes),
+
+ AddUnique = N - length(Unique) - length(NewDupes),
+ {NewUnique, _} = fill_keys(AddUnique, RestLabels, Unique),
+ % Sanity assertions
+ N = length(NewUnique) + length(NewDupes),
+ D = length(NewDupes),
+ {NewUnique, lists:map(fun(Dupe) ->
+ case Dupe of
+ {_, _} -> Dupe;
+ A when is_atom(A) -> {A, R}
+ end
+ end, NewDupes)}.
+
+
+fill_keys(0, Labels, Acc) ->
+ {Acc, Labels};
+fill_keys(Count, [Label | RestLabels], Acc) when Count > 0 ->
+ fill_keys(Count - 1, RestLabels, [Label | Acc]).
+
+
+unlabel({Unique, Dupes}, Bindings) ->
+ lists:foldl(fun(Item, {KVAcc, BindingsAcc}) ->
+ {KVs, NewBindingsAcc} = unlabel_item(Item, BindingsAcc),
+ {KVs ++ KVAcc, NewBindingsAcc}
+ end, {[], Bindings}, Unique ++ Dupes).
+
+
+unlabel_item(Label, Bindings) when is_atom(Label) ->
+ NewBindings = maybe_bind(Label, Bindings),
+ KV = maps:get(Label, NewBindings),
+ {[KV], NewBindings};
+unlabel_item({Label, Count}, Bindings) when is_atom(Label), is_integer(Count) ->
+ NewBindings = maybe_bind(Label, Bindings),
+ {K, _} = KV = maps:get(Label, NewBindings),
+ ToAdd = lists:map(fun(_) ->
+ {K, gen_value()}
+ end, lists:seq(1, Count - 1)),
+ {[KV | ToAdd], NewBindings}.
+
+
+maybe_bind(Label, Bindings) ->
+ case maps:is_key(Label, Bindings) of
+ true ->
+ case rand:uniform() < 0.5 of
+ true ->
+ rebind(Label, Bindings);
+ false ->
+ Bindings
+ end;
+ false ->
+ bind(Label, Bindings)
+ end.
+
+
+bind(Label, Bindings) ->
+ maps:put(Label, {gen_key(), gen_value()}, Bindings).
+
+
+rebind(Label, Bindings) ->
+ {Key, _} = maps:get(Label, Bindings),
+ maps:put(Label, {Key, gen_value()}, Bindings).
+
+
+gen_key() ->
+ Unique = couch_uuids:random(),
+ case rand:uniform() of
+ N when N < 0.2 ->
+ [Unique, true, rand:uniform()];
+ N when N < 0.4 ->
+ {[{Unique, true}, {<<"foo">>, [<<"bar">>, null, 1, {[]}]}]};
+ _ ->
+ Unique
+ end.
+
+
+gen_value() ->
+ case rand:uniform() of
+ N when N < 0.2 ->
+ [false, rand:uniform(), {[]}];
+ N when N < 0.4 ->
+ {[{<<"a">>, 1}, {<<"b">>, 2}]};
+ N when N < 0.6 ->
+ rand:uniform(100);
+ N when N < 0.8 ->
+ rand:uniform();
+ _ ->
+ 1
+ end.
+
+
+group(Items) ->
+ case length(Items) > 5 of
+ true ->
+ {Group, Rest} = lists:split(5, Items),
+ [lists:sort(Group) | group(Rest)];
+ false when Items == [] ->
+ [];
+ false ->
+ [lists:sort(Items)]
+ end.
+
+
+shuffle(Items) ->
+ Tagged = [{rand:uniform(), I} || I <- Items],
+ Sorted = lists:sort(Tagged),
+ [I || {_T, I} <- Sorted].
+
+
+subset(Count, Items) ->
+ Random = shuffle(Items),
+ Take = lists:sublist(Random, Count),
+ lists:sort(Take).
+
+
+tname([]) ->
+ [];
+tname([Transition | RestTransitions]) ->
+ [tname(Transition) | tname(RestTransitions)];
+tname({{N1, D1, R1}, {N2, D2, R2}, {DN, DD, DR}}) ->
+ io_lib:format("~b~b~b~b~b~b~b~b~b", [N1, D1, R1, N2, D2, R2, DN, DD, DR]).
+
diff --git a/src/couch_views/test/couch_views_trace_index_test.erl b/src/couch_views/test/couch_views_trace_index_test.erl
new file mode 100644
index 000000000..f5ea37982
--- /dev/null
+++ b/src/couch_views/test/couch_views_trace_index_test.erl
@@ -0,0 +1,145 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-module(couch_views_trace_index_test).
+
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+
+
+% Steps for this to work
+% Run export FDB_NETWORK_OPTION_TRACE_ENABLE="" &&
+% make eunit apps=couch_views suites=couch_views_trace_index_test
+% look in src/couch_views/.eunit for trace file
+% Might need to add extra </Trace> to finish up file
+% Analyze!
+
+
+-define(EUNIT_FTW(Tests), [{with, [T]} || T <- Tests]).
+
+
+indexer_test_() ->
+ {
+ "Trace view indexing",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ {
+ foreach,
+ fun foreach_setup/0,
+ fun foreach_teardown/1,
+ ?EUNIT_FTW([
+ fun trace_single_doc/1
+ ])
+ }
+ }
+ }.
+
+
+setup() ->
+ test_util:start_couch([fabric, couch_js, couch_rate]).
+
+
+cleanup(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+
+foreach_setup() ->
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ Db.
+
+
+foreach_teardown(Db) ->
+ meck:unload(),
+ ok = fabric2_db:delete(fabric2_db:name(Db), []).
+
+
+trace_single_doc(Db) ->
+ DbName = fabric2_db:name(Db),
+ DDoc = create_ddoc(),
+ Doc = doc(0),
+
+ {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+ {ok, _} = fabric2_db:update_doc(Db, Doc, []),
+ {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+
+ HexSig = fabric2_util:to_hex(Mrst#mrst.sig),
+ JobData = #{
+ <<"db_name">> => DbName,
+ <<"db_uuid">> => fabric2_db:get_uuid(Db),
+ <<"ddoc_id">> => <<"_design/bar">>,
+ <<"sig">> => HexSig,
+ <<"retries">> => 0
+ },
+ meck:expect(couch_jobs, accept, 2, {ok, job, JobData}),
+ meck:expect(couch_jobs, update, 3, {ok, job}),
+ meck:expect(couch_jobs, finish, 3, ok),
+ meck:expect(couch_views_server, accepted, 1, ok),
+
+ put(erlfdb_trace, <<"views_write_one_doc">>),
+ couch_views_indexer:init(),
+
+ put(erlfdb_trace, <<"views_read_one_doc">>),
+ {ok, Out} = couch_views:query(
+ Db,
+ DDoc,
+ <<"map_fun1">>,
+ fun fold_fun/2,
+ [],
+ #mrargs{}
+ ),
+
+ ?assertEqual([{row, [
+ {id, <<"0">>},
+ {key, 0},
+ {value, 0}
+ ]}], Out).
+
+
+create_ddoc() ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, <<"_design/bar">>},
+ {<<"views">>, {[
+ {<<"map_fun1">>, {[
+ {<<"map">>, <<"function(doc) {emit(doc.val, doc.val);}">>}
+ ]}},
+ {<<"map_fun2">>, {[
+ {<<"map">>, <<"function(doc) {}">>}
+ ]}}
+ ]}}
+ ]}).
+
+
+doc(Id) ->
+ doc(Id, Id).
+
+
+doc(Id, Val) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, list_to_binary(integer_to_list(Id))},
+ {<<"val">>, Val}
+ ]}).
+
+
+fold_fun({meta, _Meta}, Acc) ->
+ {ok, Acc};
+
+fold_fun({row, _} = Row, Acc) ->
+ {ok, [Row | Acc]};
+
+fold_fun(complete, Acc) ->
+ {ok, lists:reverse(Acc)}.
diff --git a/src/couch_views/test/couch_views_updater_test.erl b/src/couch_views/test/couch_views_updater_test.erl
new file mode 100644
index 000000000..89c341a17
--- /dev/null
+++ b/src/couch_views/test/couch_views_updater_test.erl
@@ -0,0 +1,240 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_updater_test).
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("fabric/test/fabric2_test.hrl").
+-include_lib("mango/src/mango_idx.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
+
+
+indexer_test_() ->
+ {
+ "Test indexing",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ {
+ foreach,
+ fun foreach_setup/0,
+ fun foreach_teardown/1,
+ [
+ ?TDEF_FE(index_docs),
+ ?TDEF_FE(update_doc),
+ ?TDEF_FE(delete_doc),
+ ?TDEF_FE(includes_design_docs),
+ ?TDEF_FE(handle_erlfdb_errors, 15)
+ ]
+ }
+ }
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([
+ fabric,
+ couch_jobs,
+ couch_js,
+ couch_views,
+ mango
+ ]),
+ Ctx.
+
+
+cleanup(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+
+foreach_setup() ->
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+
+ DDoc = create_idx_ddoc(),
+ fabric2_db:update_docs(Db, [DDoc]),
+ % make sure the index is built for the first time so the background
+ % indexer doesn't build the index
+ wait_while_ddoc_builds(Db),
+
+ Docs = make_docs(3),
+ fabric2_db:update_docs(Db, Docs),
+ meck:new(couch_views_fdb, [passthrough]),
+ {Db, DDoc}.
+
+
+foreach_teardown({Db, _}) ->
+ meck:unload(),
+ ok = fabric2_db:delete(fabric2_db:name(Db), []).
+
+
+index_docs({Db, DDoc}) ->
+ Docs = run_query(Db, DDoc),
+ ?assertEqual([
+ [{id, <<"1">>}, {value, 1}],
+ [{id, <<"2">>}, {value, 2}],
+ [{id, <<"3">>}, {value, 3}]
+ ], Docs).
+
+
+update_doc({Db, DDoc}) ->
+ {ok, Doc} = fabric2_db:open_doc(Db, <<"2">>),
+ JsonDoc = couch_doc:to_json_obj(Doc, []),
+ JsonDoc2 = couch_util:json_apply_field({<<"value">>, 4}, JsonDoc),
+ Doc2 = couch_doc:from_json_obj(JsonDoc2),
+ fabric2_db:update_doc(Db, Doc2),
+
+ Docs = run_query(Db, DDoc),
+ ?assertEqual([
+ [{id, <<"1">>}, {value, 1}],
+ [{id, <<"3">>}, {value, 3}],
+ [{id, <<"2">>}, {value, 4}]
+ ], Docs).
+
+
+delete_doc({Db, DDoc}) ->
+ {ok, Doc} = fabric2_db:open_doc(Db, <<"2">>),
+ JsonDoc = couch_doc:to_json_obj(Doc, []),
+ JsonDoc2 = couch_util:json_apply_field({<<"_deleted">>, true}, JsonDoc),
+ Doc2 = couch_doc:from_json_obj(JsonDoc2),
+ fabric2_db:update_doc(Db, Doc2),
+
+ Docs = run_query(Db, DDoc),
+ ?assertEqual([
+ [{id, <<"1">>}, {value, 1}],
+ [{id, <<"3">>}, {value, 3}]
+ ], Docs).
+
+
+includes_design_docs({Db, _}) ->
+ DDoc = create_idx_include_ddocs(),
+ fabric2_db:update_docs(Db, [DDoc]),
+
+ IndexDDoc0 = create_idx_ddoc(),
+ IndexDDoc = IndexDDoc0#doc{
+ id = <<"_design/to_be_indexed">>
+ },
+
+ fabric2_db:update_docs(Db, [IndexDDoc]),
+
+ Docs = run_query(Db, DDoc),
+ ?assertEqual([
+ [{id, <<"_design/ddoc_that_indexes_ddocs">>}, {value, 1}],
+ [{id, <<"_design/to_be_indexed">>}, {value, 1}]
+ ], Docs).
+
+
+handle_erlfdb_errors({Db, _}) ->
+ meck:expect(couch_views_fdb, write_doc, fun(_, _, _, _) ->
+ error({erlfdb_error, 1009})
+ end),
+ ?assertError({erlfdb_error, 1009}, fabric2_db:update_docs(Db, [doc(4)])).
+
+
+run_query(Db, DDoc) ->
+ Args = #mrargs{
+ view_type = map,
+ reduce = false,
+ include_docs = true,
+ update = false
+ },
+ CB = fun query_cb/2,
+ {ok, Acc} = couch_views:query(Db, DDoc, <<"idx_01">>, CB, [], Args),
+ lists:map(fun ({Props}) ->
+ [
+ {id, couch_util:get_value(<<"_id">>, Props)},
+ {value, couch_util:get_value(<<"value">>, Props, 1)}
+ ]
+
+ end, Acc).
+
+
+create_idx_ddoc() ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, <<"_design/ddoc1">>},
+ {<<"language">>, <<"query">>},
+ {<<"views">>, {[
+ {<<"idx_01">>, {[
+ {<<"map">>, {[
+ {<<"fields">>, {[{<<"value">>, <<"asc">>}]}}
+ ]}},
+ {<<"reduce">>, <<"_count">>},
+ {<<"options">>, {[
+ {<<"def">>,
+ {[{<<"fields">>,
+ {[{<<"value">>, <<"asc">>}]}}]}}
+ ]}}
+ ]}}
+ ]}
+ },
+ {<<"autoupdate">>, false},
+ {<<"options">>, {[{<<"interactive">>, true}]}}
+ ]}).
+
+
+create_idx_include_ddocs() ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, <<"_design/ddoc_that_indexes_ddocs">>},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>, {[
+ {<<"idx_01">>, {[
+ {<<"map">>, <<
+ "function(doc) {"
+ "if (doc.language) {"
+ "emit(doc.language, 1);"
+ "}"
+ "}">>}
+ ]}}
+ ]}},
+ {<<"autoupdate">>, false},
+ {<<"options">>, {[
+ {<<"include_design">>, true},
+ {<<"interactive">>, true}
+ ]}}
+ ]}).
+
+
+wait_while_ddoc_builds(Db) ->
+ Fun = fun () ->
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ Ready = lists:filter(fun (Idx) ->
+ Idx#idx.build_status == ?INDEX_READY
+ end, mango_idx:list(TxDb)),
+
+ if length(Ready) > 1 -> ok; true ->
+ wait
+ end
+ end)
+ end,
+ test_util:wait(Fun).
+
+
+
+make_docs(Count) ->
+ [doc(I) || I <- lists:seq(1, Count)].
+
+
+doc(Id) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, list_to_binary(integer_to_list(Id))},
+ {<<"value">>, Id}
+ ]}).
+
+
+query_cb({row, Props}, Acc) ->
+ Doc = couch_util:get_value(doc, Props),
+ {ok, Acc ++ [Doc]};
+
+query_cb(_, Acc) ->
+ {ok, Acc}.
+
diff --git a/src/ctrace/README.md b/src/ctrace/README.md
new file mode 100644
index 000000000..4b0238b14
--- /dev/null
+++ b/src/ctrace/README.md
@@ -0,0 +1,308 @@
+Overview
+========
+
+This application provides an interface to opentracing compatible
+tracing systems.
+
+Open Tracing
+------------
+
+[//]: # (taken from https://github.com/opentracing/specification/blob/master/specification.md)
+Traces in OpenTracing are defined implicitly by their Spans.
+In particular, a Trace can be thought of as a directed acyclic
+graph (DAG) of Spans, where the edges between Spans are called
+References.
+
+Each Span encapsulates the following state:
+
+- An operation name
+- A start timestamp
+- A finish timestamp
+- A set of zero or more key:value Span Tags.
+- A set of zero or more Span Logs, each of which is
+ itself a key:value map paired with a timestamp.
+- A SpanContext
+- References to zero or more causally-related Spans
+
+Every trace is identified by unique trace_id. Every trace includes zero
+or more tracing spans which are identified by a span id.
+
+Jaeger
+------
+
+Jaeger is a distributed tracing system released as open source by
+Uber Technologies. It is one of implementations of open tracing specification.
+Jaeger supports Trace detail view where a single trace is represented as
+a tree of tracing span with detailed timing information about every span.
+In order to make this feature work all tracing spans should form a lineage
+from the same root span.
+
+
+Implementation
+==============
+
+Every operation has unique identifier. Example identifiers are:
+
+- all-dbs.read
+- database.delete
+- replication.trigger
+- view.compaction
+
+Tracing begins with a root span that can be filtered based on
+a set of configurable rules. When the root trace is created these
+rules are applied to see if the trace should be generated and logged.
+If a trace is disabled due to filtering then no trace data is generated.
+
+
+Code instrumentation
+--------------------
+
+The span lifecycle is controled by
+
+- `ctrace:start_span`
+- `ctrace:finish_span`
+- `ctrace:with_span`
+
+The instrumentation can add tags and logs to a span.
+
+Example of instrumentation:
+
+```
+ctrace:with_span('database.read', #{'db.name' => <<>>}, fun() ->
+ ctrace:tag(#{
+ peer => Peer,
+ 'http.method' => Method,
+ nonce => Nonce,
+ 'http.url' => Path,
+ 'span.kind' => <<"server">>,
+ component => <<"couchdb.chttpd">>
+ }),
+ ctrace:log(#{
+ field0 => "value0"
+ })
+
+ handle_request(HttpReq)
+end),
+```
+
+As you can see the `ctrace:with_span/3` function receives a function which
+wraps the operation we wanted to trace:
+
+- `ctrace:tag/1` to add new tags to the span
+- `ctrace:log/1` add log event to the span
+
+There are some informative functions as well:
+
+- `ctrace:refs/0` - returns all other spans we have references from the current
+- `ctrace:operation_name/0` - returns operation name for the current span
+- `ctrace:trace_id/0` - returns trace id for the current span
+- `ctrace:span_id/0` - returns span id for the current span
+
+Instrumentation guide
+---------------------
+
+- Start root span at system boundaries
+ - httpd
+ - internal trigger (replication or compaction jobs)
+- Start new child span when you cross layer boundaries
+- Start new child span when you cross node bounadary
+- Extend `<app>_httpd_handlers:handler_info/1` as needed to
+ have operation ids. (We as community might need to work on
+ naming conventions)
+- Use [span conventions](https://github.com/apache/couchdb-documentation/blob/master/rfcs/011-opentracing.md#conventions) https://github.com/opentracing/specification/blob/master/semantic_conventions.md
+- When in doubt consult open tracing spec
+ - [spec overview](https://github.com/opentracing/specification/blob/master/specification.md)
+ - [conventions](https://github.com/opentracing/specification/blob/master/semantic_conventions.md#standard-span-tags-and-log-fields)
+
+Configuration
+-------------
+
+Traces are configured using standard CouchDB ini file based configuration.
+There is a global toggle `[tracing] enabled = true | false` that switches
+tracing on or off completely. The `[tracing]` section also includes
+configuration for where to send trace data. There are two reporters which we
+support.
+
+The thrift over udp reporter (this is the default) has following configuration
+options:
+
+- protocol = udp
+- thrift_format = compact | binary
+- agent_host = 127.0.0.1
+- agent_port = 6831
+
+The thrift over http has following options
+
+- protocol = http
+- endpoint = http://127.0.0.1:14268
+
+An example of `[tracing]` section
+
+```ini
+[tracing]
+
+enabled = true
+thrift_format = compact ; compact | binary
+agent_host = 127.0.0.1
+agent_port = 6831
+app_name = couchdb ; Value to use for the `location.application` tag
+```
+
+In the `[tracing.filters]` section we can define a set of rules for
+whether to include a trace. Keys are the operation name of the root
+span and values are a simple DSL for whether to include the given
+span based on its tags. See below for a more thorough description
+of the DSL. The `all` key is special and is used when no other
+filter matches a given operation. If the `all` key is not present
+then ctrace behaves as if it were defined as `(#{}) -> false`. I.e.,
+any trace that doesn't have a configuration entry is not generated
+and logged.
+
+```ini
+[tracing.filters]
+; trace all events
+; all = (#{}) -> true
+; trace all events with X-B3-... headers
+; all = (#{external := External}) when External == true -> true
+; database-info.read = (#{'http.method' := Method}) when Method == 'GET' -> true
+; view.build = (#{'view.name' := Name}) when Name == "foo" -> 0.25
+```
+
+Filter DSL Description
+---
+
+```
+<operation_name> = ( #{<[arguments]>} ) when <[conditions]> -> <[actions]>
+```
+
+Where:
+ - operation_name is the name of the root span
+ - arguments is comma separated pairs of
+ `<tag_or_field_name> := <variable_name>`
+ - actions is a list which contains
+ - `report`
+ - conditions
+ - `<[condition]>`
+ - `| <[condition]> <[operator]> <[condition]>`
+ - condition:
+ - `<variable_name> <[operator]> <value>`
+ `| <[guard_function]>(<[variable_name]>)`
+ - `variable_name` - lowercase name without special characters
+ - guard_function: one of
+ - `is_atom`
+ - `is_float`
+ - `is_integer`
+ - `is_list`
+ - `is_number`
+ - `is_pid`
+ - `is_port`
+ - `is_reference`
+ - `is_tuple`
+ - `is_map`
+ - `is_binary`
+ - `is_function`
+ - `element` - `element(n, tuple)`
+ - `abs`
+ - `hd` - return head of the list
+ - `length`
+ - `map_get`
+ - `map_size`
+ - `round`
+ - `node`
+ - `size` - returns size of the tuple
+ - `bit_size` - returns number of bits in binary
+ - `byte_size` - returns number of bytes in binary
+ - `tl` - return tail of a list
+ - `trunc`
+ - `self`
+ - operator: one of
+ - `not`
+ - `and` - evaluates both expressions
+ - `andalso` - evaluates second only when first is true
+ - `or` - evaluates both expressions
+ - `orelse` - evaluates second only when first is false
+ - `xor`
+ - `+`
+ - `-`
+ - `*`
+ - `div`
+ - `rem`
+ - `band` - bitwise AND
+ - `bor` - bitwise OR
+ - `bxor` - bitwise XOR
+ - `bnot` - bitwise NOT
+ - `bsl` - arithmetic bitshift left
+ - `bsr` - bitshift right
+ - `>`
+ - `>=`
+ - `<`
+ - `=<`
+ - `=:=`
+ - `==`
+ - `=/=`
+ - `/=` - not equal
+
+
+b3 propagation
+--------------
+
+In order to correlate spans across multiple systems the information
+about parent span can be passed via headers. Currently the chttpd
+application is responsible for extracting and parsing the header.
+The ctrace application provides following facilities to enable this
+use case:
+
+- `{root, RootSpan}` option for `ctrace:start_span/2`
+- `ctrace:external_span/3` to convert references to a root span
+
+The span references could be set either via `b3` header of via
+individual headers. In case when individual headers are used the
+following set of headers is supported:
+
+- X-B3-TraceId (32 lower-hex characters)
+- X-B3-SpanId (16 lower-hex characters)
+ (has no effect if X-B3-TraceId is not set)
+- X-B3-ParentSpanId (16 lower-hex characters)
+ (has no effect if X-B3-TraceId is not set)
+
+Alternatively a single `b3` header could be used. It has to be
+in the following format:
+
+b3={TraceId}-{SpanId}-{SamplingState}-{ParentSpanId}
+
+Where SamplingState is either `0` or `1`. However we ignore the value.
+
+Note: We only support 128 bit TraceId's.
+
+Developing
+==========
+
+Here we provide a list frequently used commands
+useful while working on this application.
+
+
+1. Run all tests
+```
+make setup-eunit
+make && ERL_LIBS=`pwd`/src BUILDDIR=`pwd` mix test --trace src/chttpd/test/exunit/ src/ctrace/test/exunit/
+```
+
+2. Run tests selectively
+```
+make && ERL_LIBS=`pwd`/src BUILDDIR=`pwd` mix test --trace src/chttpd/test/exunit/ctrace_context_test.exs:59
+```
+
+3. Re-run only failed tests
+```
+make && ERL_LIBS=`pwd`/src BUILDDIR=`pwd` mix test --failed --trace src/chttpd/test/exunit/ src/ctrace/test/exunit/
+```
+
+4. Running jaeger in docker
+```
+docker run -d --net fdb-core --name jaeger.local -p 6831:6831/udp -p 16686:16686 jaegertracing/all-in-one:1.14
+```
+
+If Docker isn't your cup of tea, the Jaeger project also provides
+prebuilt binaries that can be downloaded. On macOS we can easily
+setup a development Jaeger instance by running the prebuilt
+`jaeger-all-in-one` binary without any arguments. \ No newline at end of file
diff --git a/src/ctrace/rebar.config b/src/ctrace/rebar.config
new file mode 100644
index 000000000..362c8785e
--- /dev/null
+++ b/src/ctrace/rebar.config
@@ -0,0 +1,14 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{cover_enabled, true}.
+{cover_print_enabled, true}.
diff --git a/src/ctrace/src/ctrace.app.src b/src/ctrace/src/ctrace.app.src
new file mode 100644
index 000000000..64f4fc5df
--- /dev/null
+++ b/src/ctrace/src/ctrace.app.src
@@ -0,0 +1,27 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+ {application, ctrace, [
+ {description, "Open tracer API for CouchDB"},
+ {vsn, git},
+ {registered, [
+ ]},
+ {applications, [
+ kernel,
+ stdlib,
+ syntax_tools,
+ config,
+ jaeger_passage,
+ passage
+ ]},
+ {mod, {ctrace_app, []}}
+]}.
diff --git a/src/ctrace/src/ctrace.erl b/src/ctrace/src/ctrace.erl
new file mode 100644
index 000000000..5521901fd
--- /dev/null
+++ b/src/ctrace/src/ctrace.erl
@@ -0,0 +1,361 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ctrace).
+
+-vsn(1).
+
+-export([
+ is_enabled/0,
+
+ with_span/2,
+ with_span/3,
+ start_span/1,
+ start_span/2,
+ finish_span/0,
+ finish_span/1,
+ has_span/0,
+ external_span/3,
+
+ tag/1,
+ log/1,
+
+ tags/0,
+ refs/0,
+ operation_name/0,
+ trace_id/0,
+ span_id/0,
+ tracer/0,
+ context/0,
+
+ match/2
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("passage/include/opentracing.hrl").
+-include("ctrace.hrl").
+
+
+-type operation()
+ :: atom()
+ | fun().
+
+-type tags()
+ :: #{atom() => term()}.
+
+-type log_fields()
+ :: #{atom() => term()}.
+
+-type start_span_options()
+ :: [start_span_option()].
+
+-type start_span_option()
+ :: {time, erlang:timespan()}
+ | {tags, tags()}.
+
+-type finish_span_options()
+ :: [finish_span_option()].
+
+-type finish_span_option()
+ :: {time, erlang:timespan()}.
+
+
+-spec is_enabled() -> boolean().
+
+is_enabled() ->
+ case get(?IS_ENABLED_KEY) of
+ undefined ->
+ Result = ctrace_config:is_enabled(),
+ put(?IS_ENABLED_KEY, Result),
+ Result;
+ IsEnabled ->
+ IsEnabled
+ end.
+
+
+%% @equiv with_span(Operation, [], Fun)
+-spec with_span(
+ Operation :: operation(),
+ Fun
+ ) -> Result when
+ Fun :: fun (() -> Result),
+ Result :: term().
+
+with_span(Operation, Fun) ->
+ with_span(Operation, #{}, Fun).
+
+-spec with_span(
+ Operation :: operation(),
+ TagsOrOptions :: tags() | start_span_options(),
+ Fun
+ ) -> Result when
+ Fun :: fun (() -> Result),
+ Result :: term().
+
+with_span(Operation, ExtraTags, Fun) when is_map(ExtraTags) ->
+ with_span(Operation, [{tags, ExtraTags}], Fun);
+
+with_span(Operation, Options, Fun) ->
+ try
+ start_span(Operation, Options),
+ Fun()
+ catch Type:Reason ->
+ Stack = erlang:get_stacktrace(),
+ log(#{
+ ?LOG_FIELD_ERROR_KIND => Type,
+ ?LOG_FIELD_MESSAGE => Reason,
+ ?LOG_FIELD_STACK => Stack
+ }, [error]),
+ erlang:raise(Type, Reason, Stack)
+ after
+ finish_span()
+ end.
+
+-spec start_span(
+ Operation :: operation()
+ ) -> ok.
+
+start_span(Operation) ->
+ start_span(Operation, []).
+
+-spec start_span(
+ Operation :: operation(),
+ Options :: start_span_options()
+ ) -> ok.
+
+start_span(Operation, Options) ->
+ case is_enabled() of
+ true ->
+ do_start_span(Operation, Options);
+ false ->
+ ok
+ end.
+
+do_start_span(Fun, Options) when is_function(Fun) ->
+ start_span(fun_to_op(Fun), Options);
+
+do_start_span(OperationName, Options0) ->
+ Options1 = add_time(Options0),
+ case passage_pd:current_span() of
+ undefined ->
+ put(?ORIGIN_KEY, atom_to_binary(OperationName, utf8)),
+ Tags = case lists:keyfind(tags, 1, Options0) of
+ {tags, T} ->
+ T;
+ false ->
+ #{}
+ end,
+ case match(OperationName, Tags) of
+ true ->
+ Options = [
+ {tracer, ?MAIN_TRACER}
+ | maybe_start_root(Options1)
+ ],
+ passage_pd:start_span(OperationName, Options);
+ false ->
+ ok
+ end;
+ Span ->
+ Options = add_tags([{child_of, Span} | Options1], #{
+ origin => get(?ORIGIN_KEY)
+ }),
+ passage_pd:start_span(OperationName, Options)
+ end.
+
+-spec finish_span() -> ok.
+
+finish_span() ->
+ finish_span([]).
+
+-spec finish_span(
+ Options :: finish_span_options()
+ ) -> ok.
+
+finish_span(Options0) ->
+ Options = add_time(Options0),
+ passage_pd:finish_span(Options).
+
+-spec tag(
+ Tags :: tags()
+ ) -> ok.
+
+tag(Tags) ->
+ passage_pd:set_tags(Tags).
+
+-spec log(
+ Fields :: log_fields() | fun (() -> log_fields())
+ ) -> ok.
+
+log(FieldsOrFun) ->
+ log(FieldsOrFun, []).
+
+log(FieldsOrFun, Options) ->
+ passage_pd:log(FieldsOrFun, Options).
+
+-spec tags() -> tags().
+
+tags() ->
+ case passage_pd:current_span() of
+ undefined ->
+ undefined;
+ Span ->
+ passage_span:get_tags(Span)
+ end.
+
+-spec refs() -> passage:refs().
+
+refs() ->
+ case passage_pd:current_span() of
+ undefined ->
+ undefined;
+ Span ->
+ passage_span:get_refs(Span)
+ end.
+
+-spec has_span() -> boolean().
+
+has_span() ->
+ passage_pd:current_span() =/= undefined.
+
+-spec operation_name() -> atom().
+
+operation_name() ->
+ case passage_pd:current_span() of
+ undefined ->
+ undefined;
+ Span ->
+ passage_span:get_operation_name(Span)
+ end.
+
+-spec trace_id() -> 0..16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF.
+
+trace_id() ->
+ case passage_pd:current_span() of
+ undefined ->
+ undefined;
+ Span ->
+ Context = passage_span:get_context(Span),
+ jaeger_passage_span_context:get_trace_id(Context)
+ end.
+
+-spec span_id() -> 0..16#FFFFFFFFFFFFFFFF.
+
+span_id() ->
+ case passage_pd:current_span() of
+ undefined ->
+ undefined;
+ Span ->
+ Context = passage_span:get_context(Span),
+ jaeger_passage_span_context:get_span_id(Context)
+ end.
+
+-spec tracer() -> passage:tracer_id().
+
+tracer() ->
+ case passage_pd:current_span() of
+ undefined ->
+ undefined;
+ Span ->
+ passage_span:get_tracer(Span)
+ end.
+
+-spec context() -> passage_span_contest:context().
+
+context() ->
+ case passage_pd:current_span() of
+ undefined ->
+ undefined;
+ Span ->
+ passage_span:get_context(Span)
+ end.
+
+-spec external_span(
+ TraceId :: passage:trace_id(),
+ SpanId :: undefined | passage:span_id(),
+ ParentSpanId :: undefined | passage:span_id()
+ ) -> passage:maybe_span().
+
+external_span(TraceId, undefined, ParentSpanId) ->
+ external_span(TraceId, rand:uniform(16#FFFFFFFFFFFFFFFF), ParentSpanId);
+external_span(TraceId, SpanId, undefined) ->
+ external_span(TraceId, SpanId, rand:uniform(16#FFFFFFFFFFFFFFFF));
+external_span(TraceId, SpanId, ParentSpanId) ->
+ IterFun = fun(Val) -> Val end,
+ Flags = <<0:32>>,
+ BaggageItems = <<0:32>>,
+ Binary = <<
+ TraceId:128,
+ SpanId:64,
+ ParentSpanId:64,
+ Flags/binary,
+ BaggageItems/binary
+ >>,
+ State = {ok, <<"binary">>, Binary, error},
+ passage:extract_span(?MAIN_TRACER, binary, IterFun, State).
+
+
+match(OperationId, Tags) ->
+ OpMod = ctrace_config:filter_module_name(OperationId),
+ case erlang:function_exported(OpMod, match, 1) of
+ true ->
+ do_match(OpMod, Tags);
+ false ->
+ AllMod = ctrace_config:filter_module_name("all"),
+ case erlang:function_exported(AllMod, match, 1) of
+ true -> do_match(AllMod, Tags);
+ false -> false
+ end
+ end.
+
+
+do_match(Mod, Tags) ->
+ case Mod:match(Tags) of
+ true ->
+ true;
+ false ->
+ false;
+ Rate when is_float(Rate) ->
+ rand:uniform() =< Rate
+ end.
+
+
+add_tags(Options, ExtraTags) ->
+ case lists:keytake(tags, 1, Options) of
+ {value, {tags, T}, Opts} ->
+ [{tags, maps:merge(T, ExtraTags)} | Opts];
+ false ->
+ [{tags, ExtraTags} | Options]
+ end.
+
+add_time(Options) ->
+ case lists:keymember(time, 1, Options) of
+ true ->
+ Options;
+ false ->
+ [{time, os:timestamp()} | Options]
+ end.
+
+maybe_start_root(Options) ->
+ case lists:keytake(root, 1, Options) of
+ {value, {root, Root}, NewOptions} ->
+ [{child_of, Root} | NewOptions];
+ false ->
+ Options
+ end.
+
+fun_to_op(Fun) ->
+ {module, M} = erlang:fun_info(Fun, module),
+ {name, F} = erlang:fun_info(Fun, name),
+ {arity, A} = erlang:fun_info(Fun, arity),
+ Str = io_lib:format("~s:~s/~b", [M, F, A]),
+ list_to_atom(lists:flatten(Str)).
diff --git a/src/ctrace/src/ctrace.hrl b/src/ctrace/src/ctrace.hrl
new file mode 100644
index 000000000..3819bbd50
--- /dev/null
+++ b/src/ctrace/src/ctrace.hrl
@@ -0,0 +1,15 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-define(MAIN_TRACER, jaeger_passage_reporter).
+-define(IS_ENABLED_KEY, ctrace_is_enabled).
+-define(ORIGIN_KEY, ctrace_origin_key).
diff --git a/src/ctrace/src/ctrace_app.erl b/src/ctrace/src/ctrace_app.erl
new file mode 100644
index 000000000..c98b897e0
--- /dev/null
+++ b/src/ctrace/src/ctrace_app.erl
@@ -0,0 +1,26 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ctrace_app).
+
+-behaviour(application).
+
+-export([
+ start/2,
+ stop/1
+]).
+
+start(_StartType, _StartArgs) ->
+ ctrace_sup:start_link().
+
+stop(_State) ->
+ ok.
diff --git a/src/ctrace/src/ctrace_config.erl b/src/ctrace/src/ctrace_config.erl
new file mode 100644
index 000000000..c63c77f1b
--- /dev/null
+++ b/src/ctrace/src/ctrace_config.erl
@@ -0,0 +1,153 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ctrace_config).
+
+-vsn(1).
+
+-behaviour(config_listener).
+
+-export([
+ is_enabled/0,
+ update/0,
+
+ filter_module_name/1
+]).
+
+-export([
+ handle_config_change/5,
+ handle_config_terminate/3
+]).
+
+-include("ctrace.hrl").
+
+
+-spec is_enabled() -> boolean().
+is_enabled() ->
+ config:get_boolean("tracing", "enabled", false).
+
+
+-spec update() -> ok.
+update() ->
+ case is_enabled() of
+ true ->
+ maybe_start_main_tracer(?MAIN_TRACER),
+
+ CompiledFilters = get_compiled_filters(),
+
+ RemovedFilters = lists:foldl(fun({OperationId, FilterDef}, Acc) ->
+ case compile_filter(OperationId, FilterDef) of
+ true -> Acc -- [OperationId];
+ false -> Acc
+ end
+ end, CompiledFilters, config:get("tracing.filters")),
+
+ lists:foreach(fun(OperationId) ->
+ ModName = filter_module_name(OperationId),
+ code:delete(ModName),
+ code:purge(ModName)
+ end, RemovedFilters),
+
+ case config:get("tracing.filters", "all") of
+ undefined -> compile_filter("all", "(#{}) -> false");
+ _ -> ok
+ end;
+
+ false ->
+ jaeger_passage:stop_tracer(?MAIN_TRACER)
+ end,
+ ok.
+
+
+-spec filter_module_name(atom() | string()) -> atom().
+filter_module_name(OperationId) when is_atom(OperationId) ->
+ filter_module_name(atom_to_list(OperationId));
+filter_module_name(OperationId) ->
+ list_to_atom("ctrace_filter_" ++ OperationId).
+
+
+handle_config_change("tracing", "enabled", _, _Persist, St) ->
+ update(),
+ {ok, St};
+handle_config_change("tracing.filters", _Key, _Val, _Persist, St) ->
+ update(),
+ {ok, St};
+handle_config_change(_Sec, _Key, _Val, _Persist, St) ->
+ {ok, St}.
+
+handle_config_terminate(_Server, _Reason, _State) ->
+ update().
+
+
+maybe_start_main_tracer(TracerId) ->
+ case passage_tracer_registry:get_reporter(TracerId) of
+ error ->
+ start_main_tracer(TracerId);
+ _ ->
+ true
+ end.
+
+
+start_main_tracer(TracerId) ->
+ MaxQueueLen = config:get_integer("tracing", "max_queue_len", 1024),
+ Sampler = jaeger_passage_sampler_queue_limit:new(
+ passage_sampler_all:new(), TracerId, MaxQueueLen),
+ ServiceName = list_to_atom(config:get("tracing", "app_name", "couchdb")),
+
+ ProtocolOptions = case config:get("tracing", "protocol", "udp") of
+ "udp" ->
+ [
+ {thrift_format, list_to_atom(
+ config:get("tracing", "thrift_format", "compact"))},
+ {agent_host,
+ config:get("tracing", "agent_host", "127.0.0.1")},
+ {agent_port,
+ config:get_integer("tracing", "agent_port", 6831)},
+ {protocol, udp},
+ {default_service_name, ServiceName}
+ ];
+ "http" ++ _ ->
+ [
+ {endpoint,
+ config:get("tracing", "endpoint", "http://127.0.0.1:14268")},
+ {protocol, http},
+ {http_client, fun http_client/5},
+ {default_service_name, ServiceName}
+ ]
+ end,
+ Options = [{default_service_name, ServiceName}|ProtocolOptions],
+ ok = jaeger_passage:start_tracer(TracerId, Sampler, Options).
+
+http_client(Endpoint, Method, Headers, Body, _ReporterOptions) ->
+ ibrowse:send_req(Endpoint, Headers, Method, Body, []).
+
+compile_filter(OperationId, FilterDef) ->
+ try
+ couch_log:info("Compiling filter : ~s", [OperationId]),
+ ctrace_dsl:compile(OperationId, FilterDef),
+ true
+ catch throw:{error, Reason} ->
+ couch_log:error("Cannot compile ~s :: ~s~n", [OperationId, Reason]),
+ false
+ end.
+
+
+get_compiled_filters() ->
+ lists:foldl(fun({Mod, _Path}, Acc) ->
+ ModStr = atom_to_list(Mod),
+ case ModStr of
+ "ctrace_filter_" ++ OpName ->
+ [OpName | Acc];
+ _ ->
+ Acc
+ end
+ end, [], code:all_loaded()).
diff --git a/src/ctrace/src/ctrace_dsl.erl b/src/ctrace/src/ctrace_dsl.erl
new file mode 100644
index 000000000..5e0b0f252
--- /dev/null
+++ b/src/ctrace/src/ctrace_dsl.erl
@@ -0,0 +1,106 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ctrace_dsl).
+-include_lib("syntax_tools/include/merl.hrl").
+
+-export([
+ compile/2,
+
+ % Debug
+ source/2
+]).
+
+
+-type ast() :: erl_syntax:syntaxTree().
+
+
+-spec compile(OperationId :: string(), FilterDef :: string()) -> ok.
+compile(OperationId, FilterDef) ->
+ AST = parse_filter(OperationId, FilterDef),
+ merl:compile_and_load(AST),
+ ok.
+
+
+-spec source(OperationId :: string(), FilterDef :: string()) -> string().
+source(OperationId, FilterDef) ->
+ AST = parse_filter(OperationId, FilterDef),
+ Options = [{paper, 160}, {ribbon, 80}],
+ erl_prettypr:format(erl_syntax:form_list(AST), Options).
+
+
+-spec parse_filter(OperationId :: string(), FilterDef :: string()) -> [ast()].
+parse_filter(OperationId, FilterDef) ->
+ AST = merl:quote("match" ++ FilterDef ++ "."),
+ case AST of
+ ?Q("match(_@Args) when _@__@Guard -> _@Return.")
+ when erl_syntax:type(Args) == map_expr ->
+ validate_args(Args),
+ validate_return(Return),
+ generate(OperationId, Args, Guard, Return);
+ ?Q("match(_@Args) when _@__@Guard -> _@@_.") ->
+ fail("The only argument of the filter should be map");
+ ?Q("match(_@@Args) when _@__@Guard -> _@@_.") ->
+ fail("The arity of the filter function should be 1");
+ _ ->
+ fail("Unknown shape of a filter function")
+ end.
+
+
+-spec validate_args(MapAST :: ast()) -> ok.
+validate_args(MapAST) ->
+ %% Unfortunatelly merl doesn't seem to support maps
+ %% so we had to do it manually
+ lists:foldl(fun(AST, Bindings) ->
+ erl_syntax:type(AST) == map_field_exact
+ orelse fail("Only #{field := Var} syntax is supported in the header"),
+ NameAST = erl_syntax:map_field_exact_name(AST),
+ erl_syntax:type(NameAST) == atom
+ orelse fail("Only atoms are supported as field names in the header"),
+ Name = erl_syntax:atom_value(NameAST),
+ VarAST = erl_syntax:map_field_exact_value(AST),
+ erl_syntax:type(VarAST) == variable
+ orelse fail("Only capitalized names are supported as matching variables in the header"),
+ Var = erl_syntax:variable_name(VarAST),
+ maps:is_key(Var, Bindings)
+ andalso fail("'~s' variable is already in use", [Var]),
+ Bindings#{Var => Name}
+ end, #{}, erl_syntax:map_expr_fields(MapAST)).
+
+
+-spec validate_return(Return :: [ast()]) -> ok.
+validate_return(Return) ->
+ case Return of
+ ?Q("true") -> ok;
+ ?Q("false") -> ok;
+ ?Q("_@AST") when erl_syntax:type(AST) == float -> ok;
+ _ ->
+ fail("Unsupported return value '~s'", [erl_prettypr:format(Return)])
+ end.
+
+
+generate(OperationId, Args, Guard, Return) ->
+ ModuleName = ctrace_config:filter_module_name(OperationId),
+ Module = ?Q("-module('@ModuleName@')."),
+ Export = ?Q("-export([match/1])."),
+ Function = erl_syntax:function(merl:term(match), [
+ ?Q("(_@Args) when _@__@Guard -> _@Return"),
+ ?Q("(_) -> false")
+ ]),
+ lists:flatten([Module, Export, Function]).
+
+
+fail(Msg) ->
+ throw({error, Msg}).
+
+fail(Msg, Args) ->
+ throw({error, lists:flatten(io_lib:format(Msg, Args))}). \ No newline at end of file
diff --git a/src/ctrace/src/ctrace_sup.erl b/src/ctrace/src/ctrace_sup.erl
new file mode 100644
index 000000000..70de3c586
--- /dev/null
+++ b/src/ctrace/src/ctrace_sup.erl
@@ -0,0 +1,41 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ctrace_sup).
+-behaviour(supervisor).
+-vsn(1).
+
+-export([
+ start_link/0,
+ init/1
+]).
+
+start_link() ->
+ ctrace_config:update(),
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Flags = #{
+ strategy => one_for_one,
+ intensity => 5,
+ period => 10
+ },
+ Children = [
+ #{
+ id => config_listener_mon,
+ type => worker,
+ restart => permanent,
+ shutdown => 5000,
+ start => {config_listener_mon, start_link, [ctrace_config, nil]}
+ }
+ ],
+ {ok, {Flags, Children}}. \ No newline at end of file
diff --git a/src/ctrace/test/ctrace_config_test.erl b/src/ctrace/test/ctrace_config_test.erl
new file mode 100644
index 000000000..0827013fd
--- /dev/null
+++ b/src/ctrace/test/ctrace_config_test.erl
@@ -0,0 +1,153 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ctrace_config_test).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("ctrace/src/ctrace.hrl").
+
+
+-define(TDEF(A), {atom_to_list(A), fun A/0}).
+
+
+ctrace_config_test_() ->
+ {
+ "Test ctrace_config",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ [
+ ?TDEF(ensure_main_tracer_started),
+ ?TDEF(ensure_all_supported),
+ ?TDEF(handle_all_syntax_error_supported),
+ ?TDEF(ensure_filter_updated),
+ ?TDEF(ensure_filter_removed),
+ ?TDEF(ensure_bad_filter_ignored)
+ ]
+ }
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([ctrace]),
+
+ config_set("tracing", "enabled", "true"),
+
+ Filter = "(#{method := M}) when M == get -> true",
+ config_set("tracing.filters", "base", Filter),
+
+ ctrace_config:update(),
+
+ Ctx.
+
+
+cleanup(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+
+ensure_main_tracer_started() ->
+ ?assertMatch(
+ {ok, _},
+ passage_tracer_registry:get_reporter(?MAIN_TRACER)
+ ).
+
+
+ensure_all_supported() ->
+ config:delete("tracing.filters", "all", false),
+ test_util:wait_value(fun() ->
+ config:get("tracing.filters", "all")
+ end, undefined),
+ ctrace_config:update(),
+
+ ?assertEqual(false, ctrace:match(bam, #{gee => whiz})),
+
+ Filter = "(#{}) -> true",
+ config_set("tracing.filters", "all", Filter),
+ ctrace_config:update(),
+
+ ?assertEqual(true, ctrace:match(bam, #{gee => whiz})).
+
+
+handle_all_syntax_error_supported() ->
+ couch_log:error("XKCD: TEST START", []),
+ config:delete("tracing.filters", "all", false),
+ test_util:wait_value(fun() ->
+ config:get("tracing.filters", "all")
+ end, undefined),
+ ctrace_config:update(),
+
+ ?assertEqual(false, ctrace:match(bam, #{gee => whiz})),
+
+ Filter = "( -> true.",
+ config_set("tracing.filters", "all", Filter),
+ ctrace_config:update(),
+
+ % If there's a syntax in the `all` handler
+ % then we default to not generating traces
+ ?assertEqual(false, ctrace:match(bam, #{gee => whiz})),
+
+ couch_log:error("XKCD: TEST END", []),
+ config:delete("tracing.filters", "all", false).
+
+
+ensure_filter_updated() ->
+ Filter1 = "(#{}) -> true",
+ config_set("tracing.filters", "bing", Filter1),
+ ctrace_config:update(),
+
+ ?assertEqual(true, ctrace:match(bing, #{gee => whiz})),
+
+ Filter2 = "(#{}) -> false",
+ config_set("tracing.filters", "bing", Filter2),
+ ctrace_config:update(),
+
+ ?assertEqual(false, ctrace:match(bing, #{gee => whiz})).
+
+
+ensure_filter_removed() ->
+ Filter = "(#{}) -> true",
+ config_set("tracing.filters", "bango", Filter),
+ ctrace_config:update(),
+
+ ?assertEqual(true, ctrace:match(bango, #{gee => whiz})),
+
+ config:delete("tracing.filters", "bango", false),
+ test_util:wait_value(fun() ->
+ config:get("tracing.filters", "bango")
+ end, undefined),
+ ctrace_config:update(),
+
+ FilterMod = ctrace_config:filter_module_name("bango"),
+ ?assertEqual(false, code:is_loaded(FilterMod)).
+
+
+ensure_bad_filter_ignored() ->
+ Filter = "#foo stuff",
+ config_set("tracing.filters", "compile_error", Filter),
+ ctrace_config:update(),
+
+ FilterMod = ctrace_config:filter_module_name("compile_error"),
+ ?assertEqual(false, code:is_loaded(FilterMod)),
+
+ AllMod = ctrace_config:filter_module_name(all),
+ ?assertMatch({file, _}, code:is_loaded(AllMod)).
+
+
+config_set(Section, Key, Value) ->
+ PrevValue = config:get(Section, Key),
+ if Value == PrevValue -> ok; true ->
+ config:set(Section, Key, Value, false),
+ test_util:wait_other_value(fun() ->
+ config:get(Section, Key)
+ end, PrevValue)
+ end.
diff --git a/src/ctrace/test/ctrace_dsl_test.erl b/src/ctrace/test/ctrace_dsl_test.erl
new file mode 100644
index 000000000..601e6cd17
--- /dev/null
+++ b/src/ctrace/test/ctrace_dsl_test.erl
@@ -0,0 +1,123 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ctrace_dsl_test).
+
+
+-include_lib("eunit/include/eunit.hrl").
+
+
+simple_parse_and_compile_test() ->
+ Filter = "(#{'http.method' := Method}) when Method == get -> 1.0",
+ ctrace_dsl:compile("foo", Filter),
+ ?assertEqual(1.0, run_filter("foo", #{'http.method' => get})),
+ ?assertEqual(false, run_filter("foo", #{'httpd.method' => put})).
+
+
+empty_map_test() ->
+ Filter = "(#{}) -> true",
+ ctrace_dsl:compile("foo", Filter),
+ ?assertEqual(true, run_filter("foo", #{})),
+ ?assertEqual(true, run_filter("foo", #{foo => bar})),
+ ?assertEqual(false, run_filter("foo", nil)).
+
+
+return_false_test() ->
+ Filter = "(#{}) -> false",
+ ctrace_dsl:compile("foo", Filter),
+ ?assertEqual(false, run_filter("foo", #{})),
+ ?assertEqual(false, run_filter("foo", nil)).
+
+
+return_float_test() ->
+ Filter = "(#{}) -> 0.2",
+ ctrace_dsl:compile("foo", Filter),
+ ?assertEqual(0.2, run_filter("foo", #{})),
+ ?assertEqual(false, run_filter("foo", nil)).
+
+
+bad_filter_body_is_list_test() ->
+ Filter = "(#{}) -> []",
+ Error = "Unsupported return value '[]'",
+ ?assertThrow({error, Error}, ctrace_dsl:compile("foo", Filter)).
+
+
+bad_filter_body_has_calls_test() ->
+ Filter = "(#{}) -> [module:function()]",
+ Error = "Unsupported return value '[module:function()]'",
+ ?assertThrow({error, Error}, ctrace_dsl:compile("foo", Filter)).
+
+
+bad_arg_list_too_few_test() ->
+ Filter = "() -> true",
+ Error = "The arity of the filter function should be 1",
+ ?assertThrow({error, Error}, ctrace_dsl:compile("foo", Filter)).
+
+
+bad_arg_list_too_many_test() ->
+ Filter = "(#{}, foo) -> true",
+ Error = "The arity of the filter function should be 1",
+ ?assertThrow({error, Error}, ctrace_dsl:compile("foo", Filter)).
+
+
+bad_arg_type_test() ->
+ Filters = [
+ "(atom) -> true",
+ "([atom]) -> true",
+ "(1) -> true",
+ "(1.0) -> true"
+ ],
+ Error = "The only argument of the filter should be map",
+ lists:foreach(fun(Filter) ->
+ ?assertThrow({error, Error}, ctrace_dsl:compile("foo", Filter))
+ end, Filters).
+
+
+bad_map_association_test() ->
+ Filter = "(#{foo => Var}) -> true",
+ Error = "Only #{field := Var} syntax is supported in the header",
+ ?assertThrow({error, Error}, ctrace_dsl:compile("foo", Filter)).
+
+
+bad_field_variable_test() ->
+ Filter = "(#{Var := Val}) -> false",
+ Error = "Only atoms are supported as field names in the header",
+ ?assertThrow({error, Error}, ctrace_dsl:compile("foo", Filter)).
+
+
+bad_field_match_test() ->
+ Filter = "(#{foo := 2}) -> true",
+ Error = "Only capitalized names are supported"
+ " as matching variables in the header",
+ ?assertThrow({error, Error}, ctrace_dsl:compile("foo", Filter)).
+
+
+repeated_variable_test() ->
+ Filter = "(#{foo := Val, bar := Val}) -> true",
+ Error = "'Val' variable is already in use",
+ ?assertThrow({error, Error}, ctrace_dsl:compile("foo", Filter)).
+
+
+code_coverage1_test() ->
+ Filter = "foo(#{}) -> bar",
+ Error = "Unknown shape of a filter function",
+ ?assertThrow({error, Error}, ctrace_dsl:compile("foo", Filter)).
+
+
+code_coverage2_test() ->
+ Filter = "(#{}) -> true",
+ ?assertMatch([_ | _], ctrace_dsl:source("foo", Filter)).
+
+
+run_filter(OperationId, Value) ->
+ ModName = ctrace_config:filter_module_name(OperationId),
+ ModName:match(Value).
diff --git a/src/ctrace/test/ctrace_test.erl b/src/ctrace/test/ctrace_test.erl
new file mode 100644
index 000000000..962f9aae3
--- /dev/null
+++ b/src/ctrace/test/ctrace_test.erl
@@ -0,0 +1,412 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ctrace_test).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("ctrace/src/ctrace.hrl").
+
+
+-define(TDEF(A), {atom_to_list(A), fun A/0}).
+
+
+ctrace_config_test_() ->
+ {
+ "Test ctrace",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ [
+ ?TDEF(is_enabled_cached),
+ ?TDEF(simple_with_span),
+ ?TDEF(with_span_exception),
+ ?TDEF(simple_start_finish_span),
+ ?TDEF(op_name_from_fun),
+ ?TDEF(skipped_when_disabled),
+ ?TDEF(include_or_skip_on_sampled),
+ ?TDEF(set_tags_on_start_span),
+ ?TDEF(set_time_on_start_span),
+ ?TDEF(skip_on_filtered),
+ ?TDEF(simple_child_span),
+ ?TDEF(update_tags),
+ ?TDEF(update_logs),
+ ?TDEF(current_span_getters),
+ ?TDEF(create_external_span),
+ ?TDEF(use_external_span)
+ ]
+ }
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([ctrace]),
+
+ config_set("tracing", "enabled", "true"),
+
+ Filter = "(#{}) -> true",
+ config_set("tracing.filters", "all", Filter),
+
+ ctrace_config:update(),
+
+ MainReporter = passage_tracer_registry:get_reporter(?MAIN_TRACER),
+
+ {MainReporter, Ctx}.
+
+
+cleanup({MainReporter, Ctx}) ->
+ passage_tracer_registry:set_reporter(?MAIN_TRACER, MainReporter),
+ test_util:stop_couch(Ctx).
+
+
+is_enabled_cached() ->
+ erase(?IS_ENABLED_KEY),
+ Result = ctrace:is_enabled(),
+ ?assertEqual(Result, get(?IS_ENABLED_KEY)),
+ ?assert(is_boolean(Result)),
+
+ % Fake override to test that we're using the cached value
+ put(?IS_ENABLED_KEY, not Result),
+ ?assertEqual(not Result, ctrace:is_enabled()),
+
+ % Revert to original to not mess with other tests
+ put(?IS_ENABLED_KEY, Result).
+
+
+simple_with_span() ->
+ set_self_reporter(),
+
+ Result = ctrace:with_span(zing, fun() ->
+ a_result
+ end),
+
+ ?assertEqual(a_result, Result),
+
+ receive
+ {span, Span} ->
+ ?assertEqual(zing, passage_span:get_operation_name(Span))
+ end.
+
+
+with_span_exception() ->
+ set_self_reporter(),
+
+ Result = try
+ ctrace:with_span(zab, fun() ->
+ throw(foo)
+ end)
+ catch T:R ->
+ {T, R}
+ end,
+
+ ?assertEqual({throw, foo}, Result),
+
+ receive
+ {span, Span} ->
+ ?assertEqual(zab, passage_span:get_operation_name(Span)),
+ ?assertMatch(
+ [
+ {#{
+ 'error.kind' := throw,
+ event := error,
+ message := foo,
+ stack := [_ | _]
+ }, _TimeStamp}
+ ],
+ passage_span:get_logs(Span)
+ )
+ end.
+
+
+simple_start_finish_span() ->
+ set_self_reporter(),
+
+ ctrace:start_span(foo),
+ ctrace:finish_span(),
+
+ receive
+ {span, Span} ->
+ ?assertEqual(foo, passage_span:get_operation_name(Span))
+ end.
+
+
+op_name_from_fun() ->
+ set_self_reporter(),
+
+ ctrace:start_span(fun ctrace:match/2),
+ ctrace:finish_span(),
+
+ receive
+ {span, Span} ->
+ OpName = passage_span:get_operation_name(Span),
+ ?assertEqual('ctrace:match/2', OpName)
+ end.
+
+
+skipped_when_disabled() ->
+ set_self_reporter(),
+
+ ?assert(not ctrace:has_span()),
+ ctrace:start_span(foo),
+ ?assert(ctrace:has_span()),
+ ctrace:finish_span(),
+ ?assert(not ctrace:has_span()),
+ receive {span, _Span} -> ok end,
+
+ IsEnabled = get(?IS_ENABLED_KEY),
+ try
+ put(?IS_ENABLED_KEY, false),
+
+ ?assert(not ctrace:has_span()),
+ ctrace:start_span(foo),
+ ?assert(not ctrace:has_span()),
+ ctrace:finish_span(),
+ ?assert(not ctrace:has_span())
+ after
+ put(?IS_ENABLED_KEY, IsEnabled)
+ end.
+
+
+set_tags_on_start_span() ->
+ set_self_reporter(),
+
+ Tags = #{foo => bar},
+ ctrace:start_span(bang, [{tags, Tags}]),
+ ctrace:finish_span(),
+
+ receive
+ {span, Span} ->
+ ?assertEqual(bang, passage_span:get_operation_name(Span)),
+ ?assertEqual(#{foo => bar}, passage_span:get_tags(Span))
+ end.
+
+
+set_time_on_start_span() ->
+ set_self_reporter(),
+
+ Time = os:timestamp(),
+ timer:sleep(100),
+ ctrace:start_span(bang, [{time, Time}]),
+ ctrace:finish_span(),
+
+ receive
+ {span, Span} ->
+ ?assertEqual(Time, passage_span:get_start_time(Span))
+ end.
+
+
+skip_on_filtered() ->
+ set_self_reporter(),
+
+ config_set("tracing.filters", "do_skip", "(#{}) -> false"),
+ ctrace_config:update(),
+
+ ?assert(not ctrace:has_span()),
+ ctrace:start_span(do_skip),
+ ?assert(not ctrace:has_span()),
+ ctrace:finish_span(),
+ ?assert(not ctrace:has_span()).
+
+
+include_or_skip_on_sampled() ->
+ set_self_reporter(),
+
+ config_set("tracing.filters", "sample", "(#{}) -> 0.0"),
+ ctrace_config:update(),
+
+ ?assert(not ctrace:has_span()),
+ ctrace:start_span(sample),
+ ?assert(not ctrace:has_span()),
+ ctrace:finish_span(),
+ ?assert(not ctrace:has_span()),
+
+ config_set("tracing.filters", "sample", "(#{}) -> 1.0"),
+ ctrace_config:update(),
+
+ ?assert(not ctrace:has_span()),
+ ctrace:start_span(sample),
+ ?assert(ctrace:has_span()),
+ ctrace:finish_span(),
+ ?assert(not ctrace:has_span()),
+
+ receive
+ {span, Span1} ->
+ ?assertEqual(sample, passage_span:get_operation_name(Span1))
+ end,
+
+ config_set("tracing.filters", "sample", "(#{}) -> 0.5"),
+ ctrace_config:update(),
+
+ ?assert(not ctrace:has_span()),
+ ctrace:start_span(sample),
+ IsSampled = ctrace:has_span(),
+ ctrace:finish_span(),
+ ?assert(not ctrace:has_span()),
+
+ if not IsSampled -> ok; true ->
+ receive
+ {span, Span2} ->
+ ?assertEqual(
+ sample,
+ passage_span:get_operation_name(Span2)
+ )
+ end
+ end.
+
+
+simple_child_span() ->
+ set_self_reporter(),
+
+ ctrace:start_span(parent),
+ ctrace:start_span(child),
+ ctrace:finish_span(),
+ ctrace:finish_span(),
+
+ receive
+ {span, CSpan} ->
+ ?assertEqual(child, passage_span:get_operation_name(CSpan))
+ end,
+
+ receive
+ {span, PSpan} ->
+ ?assertEqual(parent, passage_span:get_operation_name(PSpan))
+ end.
+
+
+update_tags() ->
+ set_self_reporter(),
+
+ ctrace:start_span(foo, [{tags, #{foo => bar}}]),
+ ctrace:tag(#{bango => bongo}),
+ ctrace:finish_span(),
+
+ receive
+ {span, Span} ->
+ ?assertEqual(
+ #{foo => bar, bango => bongo},
+ passage_span:get_tags(Span)
+ )
+ end.
+
+
+update_logs() ->
+ set_self_reporter(),
+
+ ctrace:start_span(foo),
+ ctrace:log(#{foo => bar}),
+ ctrace:finish_span(),
+
+ receive
+ {span, Span1} ->
+ ?assertMatch(
+ [{#{foo := bar}, _TimeStamp}],
+ passage_span:get_logs(Span1)
+ )
+ end,
+
+ ctrace:start_span(foo),
+ ctrace:log(fun() ->
+ #{foo => baz}
+ end),
+ ctrace:finish_span(),
+
+ receive
+ {span, Span2} ->
+ ?assertMatch(
+ [{#{foo := baz}, _TimeStamp}],
+ passage_span:get_logs(Span2)
+ )
+ end.
+
+
+current_span_getters() ->
+ ?assertEqual(false, ctrace:has_span()),
+ ?assertEqual(undefined, ctrace:tags()),
+ ?assertEqual(undefined, ctrace:refs()),
+ ?assertEqual(undefined, ctrace:operation_name()),
+ ?assertEqual(undefined, ctrace:trace_id()),
+ ?assertEqual(undefined, ctrace:span_id()),
+ ?assertEqual(undefined, ctrace:tracer()),
+ ?assertEqual(undefined, ctrace:context()),
+
+ ctrace:start_span(parent),
+ ctrace:start_span(child, [{tags, #{foo => oof}}]),
+
+ ?assertEqual(true, ctrace:has_span()),
+ ?assertEqual(#{foo => oof, origin => <<"parent">>}, ctrace:tags()),
+ ?assertMatch([{child_of, _} | _], ctrace:refs()),
+ ?assertEqual(child, ctrace:operation_name()),
+ ?assert(is_integer(ctrace:trace_id())),
+ ?assert(is_integer(ctrace:span_id())),
+ ?assertEqual(?MAIN_TRACER, ctrace:tracer()),
+ ?assertNotEqual(undefined, ctrace:context()),
+
+ ctrace:finish_span(),
+ ctrace:finish_span(),
+
+ receive
+ {span, CSpan} ->
+ ?assertEqual(child, passage_span:get_operation_name(CSpan))
+ end,
+
+ receive
+ {span, PSpan} ->
+ ?assertEqual(parent, passage_span:get_operation_name(PSpan))
+ end.
+
+
+create_external_span() ->
+ Span1 = ctrace:external_span(1, 2, 3),
+ Ctx1 = passage_span:get_context(Span1),
+ ?assertEqual(1, jaeger_passage_span_context:get_trace_id(Ctx1)),
+ ?assertEqual(2, jaeger_passage_span_context:get_span_id(Ctx1)),
+
+ Span2 = ctrace:external_span(42, undefined, undefined),
+ Ctx2 = passage_span:get_context(Span2),
+ ?assertEqual(42, jaeger_passage_span_context:get_trace_id(Ctx2)),
+ ?assert(is_integer(jaeger_passage_span_context:get_span_id(Ctx2))).
+
+
+use_external_span() ->
+ Parent = ctrace:external_span(1, 2, 3),
+
+ ?assert(not ctrace:has_span()),
+ ctrace:start_span(foo, [{root, Parent}]),
+ ?assert(ctrace:has_span()),
+ ctrace:finish_span(),
+ ?assert(not ctrace:has_span()),
+
+ receive
+ {span, Span} ->
+ Ctx = passage_span:get_context(Span),
+ TraceId = jaeger_passage_span_context:get_trace_id(Ctx),
+ ?assertEqual(1, TraceId)
+ end.
+
+
+config_set(Section, Key, Value) ->
+ PrevValue = config:get(Section, Key),
+ if Value == PrevValue -> ok; true ->
+ config:set(Section, Key, Value, false),
+ test_util:wait_other_value(fun() ->
+ config:get(Section, Key)
+ end, PrevValue)
+ end.
+
+
+set_self_reporter() ->
+ SelfReporter = passage_reporter_process:new(self(), span),
+ passage_tracer_registry:set_reporter(?MAIN_TRACER, SelfReporter),
+ test_util:wait_value(fun() ->
+ {ok, Result} = passage_tracer_registry:get_reporter(?MAIN_TRACER),
+ Result
+ end, SelfReporter). \ No newline at end of file
diff --git a/src/ddoc_cache/src/ddoc_cache_entry.erl b/src/ddoc_cache/src/ddoc_cache_entry.erl
index 4cc3d7e52..ed0311bbd 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry.erl
@@ -34,7 +34,8 @@
handle_call/3,
handle_cast/2,
handle_info/2,
- code_change/3
+ code_change/3,
+ format_status/2
]).
-export([
@@ -282,6 +283,24 @@ code_change(_, St, _) ->
{ok, St}.
+format_status(_Opt, [_PDict, State]) ->
+ #st{
+ key = Key,
+ val = Val,
+ opener = Opener,
+ waiters = Waiters,
+ ts = TS,
+ accessed = Accepted
+ } = State,
+ [{data, [{"State", [
+ {key, Key},
+ {val, Val},
+ {opener, Opener},
+ {waiters, {length, length(Waiters)}},
+ {ts, TS},
+ {accessed, Accepted}
+ ]}]}].
+
spawn_opener(Key) ->
{Pid, _} = erlang:spawn_monitor(?MODULE, do_open, [Key]),
Pid.
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
index 5248469fb..7c3dc6787 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
@@ -33,7 +33,7 @@ ddocid({_, DDocId}) ->
recover({DbName, DDocId}) ->
- fabric:open_doc(DbName, DDocId, [ejson_body, ?ADMIN_CTX]).
+ fabric2_db:open_doc(DbName, DDocId, [ejson_body, ?ADMIN_CTX]).
insert({DbName, DDocId}, {ok, #doc{revs = Revs} = DDoc}) ->
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
index 868fa7789..38445af96 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
@@ -34,7 +34,7 @@ ddocid({_, DDocId, _}) ->
recover({DbName, DDocId, Rev}) ->
Opts = [ejson_body, ?ADMIN_CTX],
- {ok, [Resp]} = fabric:open_revs(DbName, DDocId, [Rev], Opts),
+ {ok, [Resp]} = fabric2_db:open_doc_revs(DbName, DDocId, [Rev], Opts),
Resp.
diff --git a/src/dreyfus/src/dreyfus_index.erl b/src/dreyfus/src/dreyfus_index.erl
index 2bf560f37..7236eb16b 100644
--- a/src/dreyfus/src/dreyfus_index.erl
+++ b/src/dreyfus/src/dreyfus_index.erl
@@ -29,7 +29,7 @@
% gen_server api.
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
+ code_change/3, format_status/2]).
% private definitions.
-record(state, {
@@ -244,6 +244,30 @@ terminate(_Reason, _State) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
+format_status(_Opt, [_PDict, #state{index = #index{} = Index} = State]) ->
+ #index{
+ ddoc_id=Id,
+ name=IndexName,
+ sig=Sig
+ } = Index,
+ IndexScrubbed = [{
+ {ddoc_id, Id},
+ {name, IndexName},
+ {sig, Sig}
+ }],
+ Scrubbed = State#state{
+ index = IndexScrubbed,
+ waiting_list = {length, length(State#state.waiting_list)}
+ },
+ ?record_to_keyval(state, Scrubbed);
+
+format_status(_Opt, [_PDict, #state{} = State]) ->
+ Scrubbed = State#state{
+ index = nil,
+ waiting_list = {length, length(State#state.waiting_list)}
+ },
+ ?record_to_keyval(state, Scrubbed).
+
% private functions.
open_index(DbName, #index{analyzer=Analyzer, sig=Sig}) ->
diff --git a/src/ebtree/.gitignore b/src/ebtree/.gitignore
new file mode 100644
index 000000000..04f4f25d7
--- /dev/null
+++ b/src/ebtree/.gitignore
@@ -0,0 +1,3 @@
+.erlfdb/
+_build/
+rebar.lock
diff --git a/src/ebtree/README.md b/src/ebtree/README.md
new file mode 100644
index 000000000..9ce79a0c6
--- /dev/null
+++ b/src/ebtree/README.md
@@ -0,0 +1,19 @@
+A B+Tree (all values stored in leaves) with configurable order, where
+all data is stored in FoundationDB.
+
+The tree is balanced at all times. A bidirectional linked list is
+maintained between leaf nodes for efficient range queries in either
+direction. You can pass in an fdb Db or open Tx, the latter is vastly
+more efficient for multiple inserts, so batch if you can.
+
+A reduction function can be specified, the B+Tree calculates and stores
+intermediate reduction values on the inner nodes for performance.
+
+The FoundationDB keys start with a user defined prefix and the opaque
+node id.
+
+TODO
+
+1. Rewrite inner node ids (non-root, non-leaf) so we can safely cache
+ them outside of a transaction. (see "immutable" branch)
+2. Chunkify large values over multiple rows?
diff --git a/src/ebtree/rebar.config b/src/ebtree/rebar.config
new file mode 100644
index 000000000..edf6725c8
--- /dev/null
+++ b/src/ebtree/rebar.config
@@ -0,0 +1,17 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{erl_opts, [debug_info]}.
+{cover_enabled, true}.
+{deps, [
+ {erlfdb, ".*", {git, "https://github.com/apache/couchdb-erlfdb", {tag, "v1.2.2"}}}
+]}.
diff --git a/src/ebtree/src/ebtree.app.src b/src/ebtree/src/ebtree.app.src
new file mode 100644
index 000000000..d4966f6a5
--- /dev/null
+++ b/src/ebtree/src/ebtree.app.src
@@ -0,0 +1,27 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{application, ebtree,
+ [{description, "An OTP library"},
+ {vsn, git},
+ {registered, []},
+ {applications,
+ [kernel,
+ stdlib,
+ erlfdb
+ ]},
+ {env,[]},
+ {modules, []},
+
+ {licenses, ["Apache 2.0"]},
+ {links, []}
+ ]}.
diff --git a/src/ebtree/src/ebtree.erl b/src/ebtree/src/ebtree.erl
new file mode 100644
index 000000000..3cfb82030
--- /dev/null
+++ b/src/ebtree/src/ebtree.erl
@@ -0,0 +1,1803 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ebtree).
+
+-export([
+ open/3,
+ open/4,
+ min/0,
+ max/0,
+ insert/4,
+ insert_multi/3,
+ delete/3,
+ lookup/3,
+ lookup_multi/3,
+ range/6,
+ reverse_range/6,
+ fold/4,
+ fold/5,
+ reduce/4,
+ reduce/5,
+ full_reduce/2,
+ group_reduce/7,
+ group_reduce/8,
+ validate_tree/2
+]).
+
+-record(node, {
+ id,
+ level = 0,
+ prev,
+ next,
+ members = [] %% [{Key0, Value0} | {FirstKey0, LastKey0, Pointer0, Reduction0}, ...]
+}).
+
+-record(tree, {
+ prefix,
+ min,
+ max,
+ collate_fun,
+ reduce_fun,
+ encode_fun,
+ persist_fun,
+ cache_fun
+}).
+
+-define(META, 0).
+-define(META_ORDER, 0).
+
+-define(NODE, 1).
+-define(NODE_ROOT_ID, <<0>>).
+
+-define(underflow(Tree, Node), Tree#tree.min > length(Node#node.members)).
+-define(at_min(Tree, Node), Tree#tree.min == length(Node#node.members)).
+-define(is_full(Tree, Node), Tree#tree.max == length(Node#node.members)).
+
+-ifdef(TEST).
+-define(validate_node(Tree, Node), validate_node(Tree, Node)).
+-else.
+-define(validate_node(Tree, Node), ignore).
+-endif.
+
+%% two special 1-bit bitstrings that cannot appear in valid keys.
+-define(MIN, <<0:1>>).
+-define(MAX, <<1:1>>).
+
+
+%% @equiv open(Db, Prefix, Order, [])
+-spec open(term(), binary(), pos_integer()) -> #tree{}.
+open(Db, Prefix, Order) ->
+ open(Db, Prefix, Order, []).
+
+
+%% @doc Open a new ebtree, initialising it if doesn't already exist.
+%% @param Db An erlfdb database or transaction.
+%% @param Prefix The key prefix applied to all ebtree keys.
+%% @param Order The maximum number of items allowed in an ebtree node (must be an even number). Ignored
+%% if ebtree is already initialised.
+%% @param Options Supported options are {reduce_fun, Fun} and {collate_fun, Fun}.
+%% @returns A data structure representing the ebtree, to be passed to all other functions.
+-spec open(term(), binary(), pos_integer(), list()) -> #tree{}.
+open(Db, Prefix, Order, Options) when is_binary(Prefix), is_integer(Order), Order > 2, Order rem 2 == 0 ->
+ ReduceFun = proplists:get_value(reduce_fun, Options, fun reduce_noop/2),
+ CollateFun = proplists:get_value(collate_fun, Options, fun collate_raw/2),
+ EncodeFun = proplists:get_value(encode_fun, Options, fun encode_erlang/3),
+ PersistFun = proplists:get_value(persist_fun, Options, fun simple_persist/3),
+ CacheFun = proplists:get_value(cache_fun, Options, fun cache_noop/2),
+
+ Tree = #tree{
+ prefix = Prefix,
+ reduce_fun = ReduceFun,
+ collate_fun = CollateFun,
+ encode_fun = EncodeFun,
+ persist_fun = PersistFun,
+ cache_fun = CacheFun
+ },
+
+ erlfdb:transactional(Db, fun(Tx) ->
+ case get_meta(Tx, Tree, ?META_ORDER) of
+ not_found ->
+ erlfdb:clear_range_startswith(Tx, Prefix),
+ set_meta(Tx, Tree, ?META_ORDER, Order),
+ set_node(Tx, Tree, #node{id = ?NODE_ROOT_ID}),
+ init_order(Tree, Order);
+ ActualOrder when is_integer(ActualOrder) ->
+ init_order(Tree, ActualOrder)
+ end
+ end).
+
+
+%% @doc a special value guaranteed to be smaller than any value in an ebtree.
+min() ->
+ ?MIN.
+
+
+%% @doc a special value guaranteed to be larger than any value in an ebtree.
+max() ->
+ ?MAX.
+
+%% @doc Lookup a specific key in the ebtree.
+%% @param Db An erlfdb database or transaction.
+%% @param Tree the ebtree.
+%% @param Key the key to lookup
+%% @returns A key-value tuple if found, false if not present in the ebtree.
+-spec lookup(Db :: term(), Tree :: #tree{}, Key :: term()) ->
+ {Key :: term(), Value :: term()} | false.
+lookup(Db, #tree{} = Tree, Key) ->
+ Fun = fun
+ ({visit, K, V}, _Acc) when K =:= Key ->
+ {stop, {K, V}};
+ ({visit, K, _V}, Acc) ->
+ case collate(Tree, K, Key, [gt]) of
+ true ->
+ {stop, Acc};
+ false ->
+ {ok, Acc}
+ end;
+ ({traverse, F, L, _R}, Acc) ->
+ case {collate(Tree, F, Key, [gt]), collate(Tree, Key, L, [lt, eq])} of
+ {true, _} ->
+ {stop, Acc};
+ {false, true} ->
+ {ok, Acc};
+ {false, false} ->
+ {skip, Acc}
+ end
+ end,
+ fold(Db, Tree, Fun, false, []).
+
+
+%% @doc Lookup a list of keys in the ebtree.
+%% @param Db An erlfdb database or transaction.
+%% @param Tree the ebtree.
+%% @param Keys the list of keys to lookup
+%% @returns A list containing key/value tuples for keys that were found
+-spec lookup_multi(Db :: term(), Tree :: #tree{}, Key :: [term()]) ->
+ [{Key :: term(), Value :: term()}].
+lookup_multi(Db, #tree{} = Tree, Keys) ->
+ FoldFun = fun lookup_multi_fold/2,
+ Acc = {Tree, sort_keys(Tree, Keys), []},
+ {_, _, FoundKeys} = fold(Db, Tree, FoldFun, Acc, []),
+ FoundKeys.
+
+
+lookup_multi_fold(_, {_, [], _} = Acc) ->
+ % No more keys to find
+ {stop, Acc};
+
+lookup_multi_fold({visit, Key1, Value}, {Tree, [Key2 | Rest], Acc}) ->
+ {NewKeys, NewAcc} = case collate(Tree, Key1, Key2) of
+ lt ->
+ % Still looking for the next user key
+ {[Key2 | Rest], Acc};
+ eq ->
+ % Found a requested key
+ {Rest, [{Key2, Value} | Acc]};
+ gt ->
+ % The user key wasn't found so we drop it
+ {Rest, Acc}
+ end,
+ {ok, {Tree, NewKeys, NewAcc}};
+
+lookup_multi_fold({traverse, FKey, LKey, R}, {Tree, [UKey | Rest], Acc}) ->
+ case collate(Tree, FKey, UKey, [gt]) of
+ true ->
+ % We've passed by our first user key
+ lookup_multi_fold({traverse, FKey, LKey, R}, {Tree, Rest, Acc});
+ false ->
+ case collate(Tree, UKey, LKey, [lt, eq]) of
+ true ->
+ % Key might be in this range
+ {ok, {Tree, [UKey | Rest], Acc}};
+ false ->
+ % Next key is not in range
+ {skip, {Tree, [UKey | Rest], Acc}}
+ end
+ end.
+
+
+%% @equiv fold(Db, Tree, Fun, Acc, [])
+fold(Db, #tree{} = Tree, Fun, Acc) ->
+ fold(Db, Tree, Fun, Acc, []).
+
+
+%% @doc Custom traversal of the ebtree.
+%% @param Db An erlfdb database or transaction.
+%% @param Tree the ebtree.
+%% @param Fun A callback function as nodes are loaded that directs the traversal.
+%% @param Acc The initial accumulator.
+%% @param Options Options that control how the fold is executed.
+%% @returns the final accumulator.
+
+-type fold_args() ::
+ {visit, Key :: term(), Value :: term()} |
+ {traverse, First :: term(), Last :: term(), Reduction :: term()}.
+
+-type fold_option() :: [{dir, fwd | rev}].
+
+-spec fold(Db, Tree, Fun, Acc0, Options) -> Acc1 when
+ Db :: term(),
+ Tree :: #tree{},
+ Fun :: fun((fold_args(), Acc0) -> {ok | skip | stop, Acc1}),
+ Acc0 :: term(),
+ Options :: [fold_option()],
+ Acc1 :: term().
+fold(Db, #tree{} = Tree, Fun, Acc, Options) ->
+ {_, Reduce} = erlfdb:transactional(Db, fun(Tx) ->
+ Root = get_node(Tx, Tree, ?NODE_ROOT_ID),
+ fold(Db, Tree, Root, Fun, Acc, Options)
+ end),
+ Reduce.
+
+
+fold(Db, #tree{} = Tree, #node{} = Node, Fun, Acc, Options) ->
+ Dir = proplists:get_value(dir, Options, fwd),
+ Members = case Dir of
+ fwd -> Node#node.members;
+ rev -> lists:reverse(Node#node.members)
+ end,
+ fold(Db, #tree{} = Tree, Members, Fun, Acc, Options);
+
+
+fold(_Db, #tree{} = _Tree, [], _Fun, Acc, _Options) ->
+ {ok, Acc};
+
+fold(Db, #tree{} = Tree, [{K, V} | Rest], Fun, Acc0, Options) ->
+ case Fun({visit, K, V}, Acc0) of
+ {ok, Acc1} ->
+ fold(Db, Tree, Rest, Fun, Acc1, Options);
+ {stop, Acc1} ->
+ {stop, Acc1}
+ end;
+
+fold(Db, #tree{} = Tree, [{F, L, P, R} | Rest], Fun, Acc0, Options) ->
+ case Fun({traverse, F, L, R}, Acc0) of
+ {ok, Acc1} ->
+ Node = get_node(Db, Tree, P),
+ case fold(Db, Tree, Node, Fun, Acc1, Options) of
+ {ok, Acc2} ->
+ fold(Db, Tree, Rest, Fun, Acc2, Options);
+ {stop, Acc2} ->
+ {stop, Acc2}
+ end;
+ {skip, Acc1} ->
+ fold(Db, Tree, Rest, Fun, Acc1, Options);
+ {stop, Acc1} ->
+ {stop, Acc1}
+ end.
+
+
+%% @doc Calculate the final reduce value for the whole ebtree.
+%% @param Db An erlfdb database or transaction.
+%% @param Tree the ebtree.
+%% @returns the final reduce value
+-spec full_reduce(Db :: term(), Tree :: #tree{}) -> term().
+full_reduce(Db, #tree{} = Tree) ->
+ Fun = fun
+ ({visit, K, V}, {MapAcc, ReduceAcc}) ->
+ {ok, {[{K, V} | MapAcc], ReduceAcc}};
+ ({traverse, _F, _L, R}, {MapAcc, ReduceAcc}) ->
+ {skip, {MapAcc, [R | ReduceAcc]}}
+ end,
+ {MapValues, ReduceValues} = fold(Db, Tree, Fun, {[], []}, []),
+ do_reduce(Tree, MapValues, ReduceValues).
+
+
+%% @equiv reduce(Db, Tree, StartKey, EndKey, [])
+-spec reduce(Db :: term(), Tree :: #tree{}, StartKey :: term(), EndKey :: term()) -> term().
+reduce(Db, #tree{} = Tree, StartKey, EndKey) ->
+ reduce(Db, Tree, StartKey, EndKey, []).
+
+%% @doc Calculate the reduce value for all keys in the specified range.
+%% @param Db An erlfdb database or transaction.
+%% @param Tree The ebtree.
+%% @param StartKey The beginning of the range
+%% @param EndKey The end of the range
+%% @returns the reduce value for the specified range
+-spec reduce(Db :: term(), Tree :: #tree{}, StartKey :: term(),
+ EndKey :: term(), Options :: [reduce_option()]) -> term().
+reduce(Db, #tree{} = Tree, StartKey, EndKey, Options) ->
+ InclusiveStart = proplists:get_value(inclusive_start, Options, true),
+ InclusiveEnd = proplists:get_value(inclusive_end, Options, true),
+
+ Fun = fun
+ ({visit, Key, Value}, {MapAcc, ReduceAcc}) ->
+ BeforeStart = collate(Tree, Key, StartKey, if InclusiveStart -> [lt]; true -> [lt, eq] end),
+ AfterEnd = collate(Tree, Key, EndKey, if InclusiveEnd -> [gt]; true -> [gt, eq] end),
+ InRange = collate(Tree, Key, StartKey, if InclusiveStart -> [gt, eq]; true -> [gt] end)
+ andalso collate(Tree, Key, EndKey, if InclusiveEnd -> [lt, eq]; true -> [lt] end),
+ if
+ BeforeStart ->
+ {ok, {MapAcc, ReduceAcc}};
+ AfterEnd ->
+ {stop, {MapAcc, ReduceAcc}};
+ InRange ->
+ {ok, {[{Key, Value} | MapAcc], ReduceAcc}}
+ end;
+ ({traverse, FirstKey, LastKey, Reduction}, {MapAcc, ReduceAcc}) ->
+ BeforeStart = collate(Tree, LastKey, StartKey, if InclusiveStart -> [lt]; true -> [lt, eq] end),
+ AfterEnd = collate(Tree, FirstKey, EndKey, if InclusiveEnd -> [gt]; true -> [gt, eq] end),
+ Whole = collate(Tree, FirstKey, StartKey, if InclusiveStart -> [gt, eq]; true -> [gt] end)
+ andalso collate(Tree, LastKey, EndKey, if InclusiveEnd -> [lt, eq]; true -> [lt] end),
+ if
+ BeforeStart ->
+ {skip, {MapAcc, ReduceAcc}};
+ AfterEnd ->
+ {stop, {MapAcc, ReduceAcc}};
+ Whole ->
+ {skip, {MapAcc, [Reduction | ReduceAcc]}};
+ true ->
+ {ok, {MapAcc, ReduceAcc}}
+ end
+ end,
+ {MapValues, ReduceValues} = fold(Db, Tree, Fun, {[], []}, []),
+ do_reduce(Tree, MapValues, ReduceValues).
+
+
+do_reduce(#tree{} = Tree, [], []) ->
+ reduce_values(Tree, [], false);
+
+do_reduce(#tree{} = Tree, [], ReduceValues) when is_list(ReduceValues) ->
+ reduce_values(Tree, ReduceValues, true);
+
+do_reduce(#tree{} = Tree, MapValues, ReduceValues) when is_list(MapValues), is_list(ReduceValues) ->
+ do_reduce(Tree, [], [reduce_values(Tree, MapValues, false) | ReduceValues]).
+
+
+%% @equiv group_reduce(Db, Tree, StartKey, EndKey, GroupKeyFun, UserAccFun, UserAcc0, [])
+-spec group_reduce(
+ Db :: term(),
+ Tree :: #tree{},
+ StartKey :: term(),
+ EndKey :: term(),
+ GroupKeyFun :: fun((term()) -> group_key()),
+ UserAccFun :: fun(({group_key(), GroupValue :: term()}, Acc0 :: term()) -> Acc1 :: term()),
+ UserAcc0 :: term()) -> Acc1 :: term().
+group_reduce(Db, #tree{} = Tree, StartKey, EndKey, GroupKeyFun, UserAccFun, UserAcc0) ->
+ group_reduce(Db, Tree, StartKey, EndKey, GroupKeyFun, UserAccFun, UserAcc0, []).
+
+
+%% @doc Calculate the reduce value for all groups in the specified range.
+%% @param Db An erlfdb database or transaction.
+%% @param Tree The ebtree.
+%% @param StartKey The beginning of the range
+%% @param EndKey The end of the range
+%% @param GroupKeyFun A function that takes a key as a parameter and returns the group key.
+%% @param UserAccFun A function called when a new group reduction is calculated and returns an acc.
+%% @param UserAcc0 The initial accumulator.
+%% @param Options Currently supported options are {dir, fwd | rev}
+%% and {inclusive_start | inclusive_end, true | false}
+%% @returns the final accumulator.
+-type group_key() :: term().
+
+-type reduce_option() :: [{inclusive_start, boolean()} | {inclusive_end, boolean()}].
+
+-spec group_reduce(
+ Db :: term(),
+ Tree :: #tree{},
+ StartKey :: term(),
+ EndKey :: term(),
+ GroupKeyFun :: fun((term()) -> group_key()),
+ UserAccFun :: fun(({group_key(), GroupValue :: term()}, Acc0 :: term()) -> Acc1 :: term()),
+ UserAcc0 :: term(),
+ Options :: [fold_option() | reduce_option()]) -> Acc1 :: term().
+group_reduce(Db, #tree{} = Tree, StartKey, EndKey, GroupKeyFun, UserAccFun, UserAcc0, Options) ->
+ Dir = proplists:get_value(dir, Options, fwd),
+ InclusiveStart = proplists:get_value(inclusive_start, Options, true),
+ InclusiveEnd = proplists:get_value(inclusive_end, Options, true),
+ NoGroupYet = ?MIN,
+ Fun = fun
+ ({visit, Key, Value}, {CurrentGroup, UserAcc, MapAcc, ReduceAcc}) ->
+ BeforeStart = collate(Tree, Key, StartKey, if InclusiveStart -> [lt]; true -> [lt, eq] end),
+ AfterEnd = collate(Tree, Key, EndKey, if InclusiveEnd -> [gt]; true -> [gt, eq] end),
+ InRange =
+ collate(Tree, Key, StartKey, if InclusiveStart -> [gt, eq]; true -> [gt] end) andalso
+ collate(Tree, Key, EndKey, if InclusiveEnd -> [lt, eq]; true -> [lt] end),
+ KeyGroup = GroupKeyFun(Key),
+ SameGroup = collate(Tree, CurrentGroup, KeyGroup, [eq]),
+ if
+ Dir == fwd andalso BeforeStart ->
+ {ok, {CurrentGroup, UserAcc, MapAcc, ReduceAcc}};
+ Dir == rev andalso AfterEnd ->
+ {ok, {CurrentGroup, UserAcc, MapAcc, ReduceAcc}};
+ Dir == fwd andalso AfterEnd ->
+ {stop, {CurrentGroup, UserAcc, MapAcc, ReduceAcc}};
+ Dir == rev andalso BeforeStart ->
+ {stop, {CurrentGroup, UserAcc, MapAcc, ReduceAcc}};
+ SameGroup ->
+ {ok, {CurrentGroup, UserAcc, [{Key, Value} | MapAcc], ReduceAcc}};
+ InRange andalso CurrentGroup =:= NoGroupYet ->
+ {ok, {KeyGroup, UserAcc, [{Key, Value}], []}};
+ InRange ->
+ %% implicit end of current group and start of a new one
+ GroupValue = do_reduce(Tree, MapAcc, ReduceAcc),
+ {ok, {KeyGroup, UserAccFun({CurrentGroup, GroupValue}, UserAcc), [{Key, Value}], []}}
+ end;
+ ({traverse, FirstKey, LastKey, Reduction}, {CurrentGroup, UserAcc, MapAcc, ReduceAcc}) ->
+ BeforeStart = collate(Tree, LastKey, StartKey, if InclusiveStart -> [lt]; true -> [lt, eq] end),
+ AfterEnd = collate(Tree, FirstKey, EndKey, if InclusiveEnd -> [gt]; true -> [gt, eq] end),
+ Whole =
+ collate(Tree, CurrentGroup, GroupKeyFun(FirstKey), [eq]) andalso
+ collate(Tree, CurrentGroup, GroupKeyFun(LastKey), [eq]),
+ FirstInRange =
+ collate(Tree, FirstKey, StartKey, if InclusiveStart -> [gt, eq]; true -> [gt] end) andalso
+ collate(Tree, FirstKey, EndKey, if InclusiveEnd -> [lt, eq]; true -> [lt] end),
+ LastInRange =
+ collate(Tree, LastKey, StartKey, if InclusiveStart -> [gt, eq]; true -> [gt] end) andalso
+ collate(Tree, LastKey, EndKey, if InclusiveEnd -> [lt, eq]; true -> [lt] end),
+ if
+ Dir == fwd andalso BeforeStart ->
+ {skip, {CurrentGroup, UserAcc, MapAcc, ReduceAcc}};
+ Dir == rev andalso AfterEnd ->
+ {skip, {CurrentGroup, UserAcc, MapAcc, ReduceAcc}};
+ Dir == fwd andalso AfterEnd ->
+ {stop, {CurrentGroup, UserAcc, MapAcc, ReduceAcc}};
+ Dir == rev andalso BeforeStart ->
+ {stop, {CurrentGroup, UserAcc, MapAcc, ReduceAcc}};
+ Whole andalso FirstInRange andalso LastInRange ->
+ {skip, {CurrentGroup, UserAcc, MapAcc, [Reduction | ReduceAcc]}};
+ true ->
+ {ok, {CurrentGroup, UserAcc, MapAcc, ReduceAcc}}
+ end
+ end,
+ {CurrentGroup, UserAcc1, MapValues, ReduceValues} = fold(Db, Tree, Fun, {NoGroupYet, UserAcc0, [], []}, Options),
+ if
+ MapValues /= [] orelse ReduceValues /= [] ->
+ FinalGroup = do_reduce(Tree, MapValues, ReduceValues),
+ UserAccFun({CurrentGroup, FinalGroup}, UserAcc1);
+ true ->
+ UserAcc1
+ end.
+
+
+%% @doc Finds all key-value pairs for the specified range in forward order.
+%% @param Db An erlfdb database or transaction.
+%% @param Tree The ebtree.
+%% @param StartKey The beginning of the range
+%% @param EndKey The end of the range
+%% @param AccFun A function that is called when a key-value pair is found, returning an accumulator.
+%% @param Acc0 The initial accumulator
+%% @returns the final accumulator
+-spec range(Db :: term(), Tree :: #tree{}, StartKey :: term(), EndKey :: term(),
+ AccFun :: fun(), Acc0 :: term()) -> term().
+range(Db, #tree{} = Tree, StartKey, EndKey, AccFun, Acc0) ->
+ erlfdb:transactional(Db, fun(Tx) ->
+ range(Tx, Tree, get_node(Tx, Tree, ?NODE_ROOT_ID), StartKey, EndKey, AccFun, Acc0)
+ end).
+
+
+range(_Tx, #tree{}, #node{id = ?NODE_ROOT_ID, members = []}, _StartKey, _EndKey, _AccFun, Acc0) ->
+ Acc0;
+
+range(Tx, #tree{} = Tree, #node{level = 0} = Node, StartKey, EndKey, AccFun, Acc0) ->
+ InRange = [{K, V} || {K, V} <- Node#node.members,
+ collate(Tree, StartKey, K, [lt, eq]), collate(Tree, K, EndKey, [lt, eq])],
+ Acc1 = AccFun(InRange, Acc0),
+ LastKey = last_key(Node),
+ case Node#node.next /= undefined andalso collate(Tree, LastKey, EndKey, [lt, eq]) of
+ true ->
+ range(Tx, Tree, get_node(Tx, Tree, Node#node.next), StartKey, EndKey, AccFun, Acc1);
+ false ->
+ Acc1
+ end;
+
+range(Tx, #tree{} = Tree, #node{} = Node, StartKey, EndKey, AccFun, Acc) ->
+ ChildId = find_child_id(Tree, Node, StartKey),
+ range(Tx, Tree, get_node(Tx, Tree, ChildId), StartKey, EndKey, AccFun, Acc).
+
+
+%% @doc Finds all key-value pairs for the specified range in reverse order.
+%% @param Db An erlfdb database or transaction.
+%% @param Tree The ebtree.
+%% @param StartKey The beginning of the range
+%% @param EndKey The end of the range
+%% @param AccFun A function that is called when a key-value pair is found, returning an accumulator.
+%% @param Acc0 The initial accumulator
+%% @returns the final accumulator
+-spec reverse_range(Db :: term(), Tree :: #tree{}, StartKey :: term(), EndKey :: term(),
+ AccFun :: fun(), Acc0 :: term()) -> term().
+reverse_range(Db, #tree{} = Tree, StartKey, EndKey, AccFun, Acc0) ->
+ erlfdb:transactional(Db, fun(Tx) ->
+ reverse_range(Tx, Tree, get_node(Tx, Tree, ?NODE_ROOT_ID), StartKey, EndKey, AccFun, Acc0)
+ end).
+
+
+reverse_range(_Tx, #tree{}, #node{id = ?NODE_ROOT_ID, members = []}, _StartKey, _EndKey, _AccFun, Acc0) ->
+ Acc0;
+
+reverse_range(Tx, #tree{} = Tree, #node{level = 0} = Node, StartKey, EndKey, AccFun, Acc0) ->
+ InRange = [{K, V} || {K, V} <- Node#node.members,
+ collate(Tree, StartKey, K, [lt, eq]), collate(Tree, K, EndKey, [lt, eq])],
+ Acc1 = AccFun(lists:reverse(InRange), Acc0),
+ FirstKey = first_key(Node),
+ case Node#node.prev /= undefined andalso collate(Tree, StartKey, FirstKey, [lt, eq]) of
+ true ->
+ reverse_range(Tx, Tree, get_node(Tx, Tree, Node#node.prev), StartKey, EndKey, AccFun, Acc1);
+ false ->
+ Acc1
+ end;
+
+reverse_range(Tx, #tree{} = Tree, #node{} = Node, StartKey, EndKey, AccFun, Acc) ->
+ ChildId = find_child_id(Tree, Node, EndKey),
+ reverse_range(Tx, Tree, get_node(Tx, Tree, ChildId), StartKey, EndKey, AccFun, Acc).
+
+
+%% @doc Inserts or updates a value in the ebtree
+%% @param Db An erlfdb database or transaction.
+%% @param Tree The ebtree.
+%% @param Key The key to store the value under.
+%% @param Value The value to store.
+%% @returns the tree.
+-spec insert(Db :: term(), Tree :: #tree{}, Key :: term(), Value :: term()) -> #tree{}.
+insert(_Db, #tree{} = _Tree, ?MIN, _Value) ->
+ erlang:error(min_not_allowed);
+
+insert(_Db, #tree{} = _Tree, ?MAX, _Value) ->
+ erlang:error(max_not_allowed);
+
+insert(Db, #tree{} = Tree, Key, Value) ->
+ erlfdb:transactional(Db, fun(Tx) ->
+ Root0 = get_node(Tx, Tree, ?NODE_ROOT_ID),
+ case ?is_full(Tree, Root0) of
+ true ->
+ OldRoot = Root0#node{id = new_node_id()},
+ FirstKey = first_key(OldRoot),
+ LastKey = last_key(OldRoot),
+ Root1 = #node{
+ id = ?NODE_ROOT_ID,
+ level = Root0#node.level + 1,
+ members = [{FirstKey, LastKey, OldRoot#node.id, []}]},
+ {Root2, _, _} = split_child(Tx, Tree, Root1, OldRoot),
+ insert_nonfull(Tx, Tree, Root2, Key, Value);
+ false ->
+ insert_nonfull(Tx, Tree, Root0, Key, Value)
+ end
+ end),
+ Tree.
+
+
+split_child(Tx, #tree{} = Tree, #node{} = Parent0, #node{} = Child) ->
+ {LeftMembers, RightMembers} = lists:split(Tree#tree.min, Child#node.members),
+
+ LeftId = new_node_id(),
+ RightId = new_node_id(),
+
+ LeftChild = remove_pointers_if_not_leaf(#node{
+ id = LeftId,
+ level = Child#node.level,
+ prev = Child#node.prev,
+ next = RightId,
+ members = LeftMembers
+ }),
+
+ RightChild = remove_pointers_if_not_leaf(#node{
+ id = RightId,
+ level = Child#node.level,
+ prev = LeftId,
+ next = Child#node.next,
+ members = RightMembers
+ }),
+
+ update_prev_neighbour(Tx, Tree, LeftChild),
+ update_next_neighbour(Tx, Tree, RightChild),
+
+ %% adjust parent members
+ FirstLeftKey = first_key(LeftMembers),
+ LastLeftKey = last_key(LeftMembers),
+ FirstRightKey = first_key(RightMembers),
+ LastRightKey = last_key(RightMembers),
+
+ %% adjust parent reductions
+ LeftReduction = reduce_node(Tree, LeftChild),
+ RightReduction = reduce_node(Tree, RightChild),
+
+ Parent1 = Parent0#node{
+ members =
+ umerge_members(Tree, Parent0#node.level, [{FirstLeftKey, LastLeftKey, LeftId, LeftReduction}],
+ umerge_members(Tree, Parent0#node.level, [{FirstRightKey, LastRightKey, RightId, RightReduction}],
+ lists:keydelete(Child#node.id, 3, Parent0#node.members)))
+ },
+ Parent2 = new_node_id_if_cacheable(Tx, Tree, Parent0, Parent1),
+ clear_node(Tx, Tree, Child),
+ set_nodes(Tx, Tree, [LeftChild, RightChild, Parent2]),
+ {Parent2, LeftChild, RightChild}.
+
+
+update_prev_neighbour(_Tx, #tree{} = _Tree, #node{prev = undefined} = _Node) ->
+ ok;
+
+update_prev_neighbour(Tx, #tree{} = Tree, #node{} = Node) ->
+ Left = get_node(Tx, Tree, Node#node.prev),
+ set_node(Tx, Tree, Left#node{next = Node#node.id}).
+
+
+update_next_neighbour(_Tx, #tree{} = _Tree, #node{next = undefined} = _Node) ->
+ ok;
+
+update_next_neighbour(Tx, #tree{} = Tree, #node{} = Node) ->
+ Left = get_node(Tx, Tree, Node#node.next),
+ set_node(Tx, Tree, Left#node{prev = Node#node.id}).
+
+
+insert_nonfull(Tx, #tree{} = Tree, #node{level = 0} = Node0, Key, Value) ->
+ Node1 = Node0#node{
+ members = umerge_members(Tree, 0, [{Key, Value}], Node0#node.members)
+ },
+ set_node(Tx, Tree, Node0, Node1),
+ {Node1#node.id, reduce_node(Tree, Node1)};
+
+insert_nonfull(Tx, #tree{} = Tree, #node{} = Node0, Key, Value) ->
+ ChildId0 = find_child_id(Tree, Node0, Key),
+ Child0 = get_node(Tx, Tree, ChildId0),
+ {Node1, Child1} = case ?is_full(Tree, Child0) of
+ true ->
+ {Parent, LeftChild, RightChild} = split_child(Tx, Tree, Node0, Child0),
+ ChildId = find_child_id(Tree, Parent, Key),
+ Child = if
+ ChildId =:= LeftChild#node.id ->
+ LeftChild;
+ ChildId =:= RightChild#node.id ->
+ RightChild
+ end,
+ {Parent, Child};
+ false ->
+ {Node0, Child0}
+ end,
+ ChildId1 = Child1#node.id,
+ {ChildId2, NewReduction} = insert_nonfull(Tx, Tree, Child1, Key, Value),
+ {CurrentFirstKey, CurrentLastKey, ChildId1, _OldReduction} = lists:keyfind(ChildId1, 3, Node1#node.members),
+ [NewFirstKey, _] = sort_keys(Tree, [Key, CurrentFirstKey]),
+ [_, NewLastKey] = sort_keys(Tree, [Key, CurrentLastKey]),
+ Node2 = Node1#node{
+ members = lists:keyreplace(ChildId1, 3, Node1#node.members,
+ {NewFirstKey, NewLastKey, ChildId2, NewReduction})
+ },
+ Node3 = new_node_id_if_cacheable(Tx, Tree, Node0, Node2),
+ set_node(Tx, Tree, Node0, Node3),
+ {Node3#node.id, reduce_node(Tree, Node2)}.
+
+
+%% @doc Inserts or updates multiple values in the ebtree
+%% @param Db An erlfdb database or transaction.
+%% @param Tree The ebtree.
+%% @param KeyValues A list of two-tuples representing the key/values to insert
+%% @returns the tree.
+-spec insert_multi(Db :: term(), Tree :: #tree{}, KeyValues :: [{term(), term()}]) -> #tree{}.
+insert_multi(_Db, #tree{} = Tree, []) ->
+ Tree;
+
+insert_multi(Db, #tree{} = Tree, KeyValues) when is_list(KeyValues) ->
+ % Sort our KeyValues so that we can insert in order
+ SortedKeyValues = usort_members(Tree, 0, KeyValues),
+ erlfdb:transactional(Db, fun(Tx) ->
+ Root0 = get_node(Tx, Tree, ?NODE_ROOT_ID),
+ Members = insert_multi(Tx, Tree, Root0, SortedKeyValues),
+ Root1 = grow_tree(Tx, Tree, Root0#node{members = Members}),
+ set_node(Tx, Tree, Root1)
+ end),
+ Tree.
+
+
+insert_multi(Tx, #tree{} = Tree, #node{level = L} = Node, KeyValues) when L > 0 ->
+ ChildKVsPairs = assign_kvs(Tree, Node#node.members, KeyValues),
+ NewMembers = lists:flatmap(fun({{_F, _L, P, _R} = Child, KVs}) ->
+ case KVs of
+ [] ->
+ [Child];
+ _ ->
+ ChildNode = get_node(Tx, Tree, P),
+ insert_multi(Tx, Tree, ChildNode, KVs)
+ end
+ end, ChildKVsPairs),
+ split_node_multi(Tx, Tree, Node#node{members = NewMembers});
+
+insert_multi(Tx, #tree{} = Tree, #node{level = 0} = Node, KeyValues) ->
+ NewMembers = umerge_members(Tree, 0, KeyValues, Node#node.members),
+ split_node_multi(Tx, Tree, Node#node{members = NewMembers}).
+
+
+assign_kvs(_Tree, [Child], KeyValues) ->
+ [{Child, KeyValues}];
+
+assign_kvs(Tree, [{_F, L, _P, _R} = Child | RestChildren], KeyValues) ->
+ {KVsInChild, RestKVs} = lists:splitwith(fun({Key, _}) ->
+ collate(Tree, Key, L, [lt, eq])
+ end, KeyValues),
+ [{Child, KVsInChild} | assign_kvs(Tree, RestChildren, RestKVs)].
+
+
+split_node_multi(Tx, Tree, Node) ->
+ NumMembers = length(Node#node.members),
+ % Not =< so that we don't leave full nodes
+ % in the tree after update.
+ case NumMembers < Tree#tree.max of
+ true when Node#node.id == ?NODE_ROOT_ID ->
+ Node#node.members;
+ true ->
+ NewNode = case node_is_cacheable(Node) of
+ true ->
+ Node#node{id = new_node_id()};
+ false ->
+ Node
+ end,
+ set_node(Tx, Tree, NewNode),
+ [to_member(Tree, NewNode)];
+ false ->
+ clear_node(Tx, Tree, Node),
+ Nodes0 = create_nodes(Tx, Tree, Node),
+ Nodes1 = if Node#node.level > 0 -> Nodes0; true ->
+ Nodes2 = update_next_ptrs(Nodes0),
+ Nodes3 = update_prev_ptrs(Nodes2),
+ Nodes4 = set_first_prev_ptr(Tx, Tree, Node#node.prev, Nodes3),
+ set_last_next_ptr(Tx, Tree, Node#node.next, Nodes4)
+ end,
+ set_nodes(Tx, Tree, Nodes1),
+ [to_member(Tree, N) || N <- Nodes1]
+ end.
+
+
+grow_tree(_Tx, _Tree, #node{level = 0, members = [{_, _} | _]} = Root) ->
+ Root;
+
+grow_tree(Tx, Tree, #node{level = 0, members = [{_, _, _, _} | _]} = Root) ->
+ grow_tree(Tx, Tree, Root#node{level = 1});
+
+grow_tree(Tx, Tree, Root) ->
+ case length(Root#node.members) < Tree#tree.max of
+ true ->
+ Root;
+ false ->
+ NewMembers = split_node_multi(Tx, Tree, Root),
+ NewRoot = Root#node{
+ level = Root#node.level + 1,
+ members = NewMembers
+ },
+ grow_tree(Tx, Tree, NewRoot)
+ end.
+
+
+to_member(Tree, Node) ->
+ FirstKey = first_key(Node#node.members),
+ LastKey = last_key(Node#node.members),
+ Reds = reduce_node(Tree, Node),
+ {FirstKey, LastKey, Node#node.id, Reds}.
+
+
+create_nodes(Tx, #tree{} = Tree, Node) ->
+ case length(Node#node.members) >= Tree#tree.max of
+ true ->
+ {Members, Rest} = lists:split(Tree#tree.min, Node#node.members),
+ NewNode = #node{
+ id = new_node_id(),
+ level = Node#node.level,
+ members = Members
+ },
+ [NewNode | create_nodes(Tx, Tree, Node#node{members = Rest})];
+ false ->
+ NewNode = #node{
+ id = new_node_id(),
+ level = Node#node.level,
+ members = Node#node.members
+ },
+ [NewNode]
+ end.
+
+
+update_next_ptrs([_] = Nodes) ->
+ Nodes;
+
+update_next_ptrs([N1, N2 | Rest]) ->
+ [N1#node{next = N2#node.id} | update_next_ptrs([N2 | Rest])].
+
+
+update_prev_ptrs([_] = Nodes) ->
+ Nodes;
+
+update_prev_ptrs([N1, N2 | Rest]) ->
+ [N1 | update_prev_ptrs([N2#node{prev = N1#node.id} | Rest])].
+
+
+set_first_prev_ptr(Tx, Tree, Prev, [Node | Rest]) ->
+ NewNode = Node#node{prev = Prev},
+ update_prev_neighbour(Tx, Tree, NewNode),
+ [NewNode | Rest].
+
+
+set_last_next_ptr(Tx, Tree, Next, [Node0]) ->
+ Node1 = Node0#node{next = Next},
+ update_next_neighbour(Tx, Tree, Node1),
+ [Node1];
+
+set_last_next_ptr(Tx, Tree, Next, [N | Rest]) ->
+ [N | set_last_next_ptr(Tx, Tree, Next, Rest)].
+
+
+%% @doc Deletes an entry from the ebtree
+%% @param Db An erlfdb database or transaction.
+%% @param Tree The ebtree.
+%% @param Key The key of the entry to delete.
+%% @returns the tree.
+-spec delete(Db :: term(), Tree :: #tree{}, Key :: term()) -> #tree{}.
+delete(Db, #tree{} = Tree, Key) ->
+ erlfdb:transactional(Db, fun(Tx) ->
+ Root0 = get_node(Tx, Tree, ?NODE_ROOT_ID),
+ case delete(Tx, Tree, Root0, Key) of
+ % if only one child, make it the new root.
+ #node{level = L, members = [_]} = Root1 when L > 0 ->
+ [{_, _, ChildId, _}] = Root1#node.members,
+ Root2 = get_node(Tx, Tree, ChildId),
+ clear_node(Tx, Tree, Root2),
+ set_node(Tx, Tree, Root2#node{id = ?NODE_ROOT_ID});
+ Root1 ->
+ set_node(Tx, Tree, Root0, Root1)
+ end
+ end),
+ Tree.
+
+
+delete(_Tx, #tree{} = _Tree, #node{level = 0} = Node, Key) ->
+ Node#node{
+ members = lists:keydelete(Key, 1, Node#node.members)
+ };
+
+delete(Tx, #tree{} = Tree, #node{} = Parent0, Key) ->
+ ChildId0 = find_child_id(Tree, Parent0, Key),
+ Child0 = get_node(Tx, Tree, ChildId0),
+ Child1 = delete(Tx, Tree, Child0, Key),
+ case ?underflow(Tree, Child1) of
+ true ->
+ SiblingId = find_sibling_id(Tree, Parent0, ChildId0, Key),
+ Sibling = get_node(Tx, Tree, SiblingId),
+ NewNodes = case ?at_min(Tree, Sibling) of
+ true ->
+ Merged = merge(Tree, Child1, Sibling),
+ update_prev_neighbour(Tx, Tree, Merged),
+ update_next_neighbour(Tx, Tree, Merged),
+ [Merged];
+ false ->
+ {Left, Right} = rebalance(Tree, Child1, Sibling),
+ update_prev_neighbour(Tx, Tree, Left),
+ update_next_neighbour(Tx, Tree, Right),
+ [Left, Right]
+ end,
+
+ %% remove old members and insert new members
+ Members0 = Parent0#node.members,
+ Members1 = lists:keydelete(ChildId0, 3, Members0),
+ Members2 = lists:keydelete(Sibling#node.id, 3, Members1),
+ Members3 = lists:foldl(fun(N, Acc) ->
+ umerge_members(Tree, Parent0#node.level,
+ [{first_key(N), last_key(N), N#node.id, reduce_node(Tree, N)}], Acc)
+ end, Members2, NewNodes),
+
+ Parent1 = Parent0#node{
+ members = Members3
+ },
+ Parent2 = new_node_id_if_cacheable(Tx, Tree, Parent0, Parent1),
+ clear_nodes(Tx, Tree, [Child0, Sibling]),
+ set_nodes(Tx, Tree, NewNodes),
+ Parent2;
+ false ->
+ set_node(Tx, Tree, Child0, Child1),
+ {_OldFirstKey, _OldLastKey, ChildId0, _OldReduction} = lists:keyfind(ChildId0, 3, Parent0#node.members),
+ Parent1 = Parent0#node{
+ members = lists:keyreplace(ChildId0, 3, Parent0#node.members,
+ {first_key(Child1), last_key(Child1), Child1#node.id, reduce_node(Tree, Child1)})
+ },
+ new_node_id_if_cacheable(Tx, Tree, Parent0, Parent1)
+ end.
+
+
+merge(#tree{} = Tree, #node{level = Level} = Node1, #node{level = Level} = Node2) ->
+ [Left, Right] = sort_nodes(Tree, [Node1, Node2]),
+
+ #node{
+ id = new_node_id(),
+ level = Level,
+ prev = Left#node.prev,
+ next = Right#node.next,
+ members = lists:append(Left#node.members, Right#node.members)
+ }.
+
+
+rebalance(#tree{} = Tree, #node{level = Level} = Node1, #node{level = Level} = Node2) ->
+ [Left0, Right0] = sort_nodes(Tree, [Node1, Node2]),
+
+ Members = lists:append(Left0#node.members, Right0#node.members),
+ {LeftMembers, RightMembers} = lists:split(length(Members) div 2, Members),
+
+ Left1Id = new_node_id(),
+ Right1Id = new_node_id(),
+
+ Left1 = remove_pointers_if_not_leaf(Left0#node{
+ id = Left1Id,
+ next = Right1Id,
+ members = LeftMembers
+ }),
+ Right1 = remove_pointers_if_not_leaf(Right0#node{
+ id = Right1Id,
+ prev = Left1Id,
+ members = RightMembers
+ }),
+ {Left1, Right1}.
+
+
+%% lookup functions
+
+find_child_id(#tree{} = Tree, #node{} = Node, Key) ->
+ element(3, find_child(Tree, Node, Key)).
+
+
+find_sibling_id(#tree{} = Tree, #node{level = L} = Node0, Id, Key) when L > 0 ->
+ Node1 = Node0#node{members = lists:keydelete(Id, 3, Node0#node.members)},
+ find_child_id(Tree, Node1, Key).
+
+
+find_child(#tree{} = Tree, #node{level = L} = Node, Key) when L > 0 ->
+ find_child_int(Tree, Node#node.members, Key).
+
+
+find_child_int(#tree{} = _Tree, [Child], _Key) ->
+ Child;
+
+find_child_int(#tree{} = Tree, [{_F, L, _P, _R} = Child| Rest], Key) ->
+ case collate(Tree, Key, L, [lt, eq]) of
+ true ->
+ Child;
+ false ->
+ find_child_int(Tree, Rest, Key)
+ end.
+
+
+%% metadata functions
+
+get_meta(Tx, #tree{} = Tree, MetaKey) ->
+ #tree{prefix = Prefix, encode_fun = EncodeFun} = Tree,
+ Key = meta_key(Prefix, MetaKey),
+ Future = erlfdb:get(Tx, Key),
+ case erlfdb:wait(Future) of
+ not_found ->
+ not_found;
+ Bin when is_binary(Bin) ->
+ EncodeFun(decode, Key, Bin)
+ end.
+
+
+set_meta(Tx, #tree{} = Tree, MetaKey, MetaValue) ->
+ #tree{prefix = Prefix, encode_fun = EncodeFun} = Tree,
+ Key = meta_key(Prefix, MetaKey),
+ erlfdb:set(
+ Tx,
+ Key,
+ EncodeFun(encode, Key, MetaValue)
+ ).
+
+
+meta_key(Prefix, MetaKey) when is_binary(Prefix) ->
+ erlfdb_tuple:pack({?META, MetaKey}, Prefix).
+
+%% node persistence functions
+
+get_node(Tx, #tree{} = Tree, Id) ->
+ case cache(Tree, get, Id) of
+ undefined ->
+ Key = node_key(Tree#tree.prefix, Id),
+ Value = persist(Tree, Tx, get, Key),
+ Node = decode_node(Tree, Id, Key, Value),
+ cache(Tree, set, [Id, Node]),
+ Node;
+ #node{} = Node ->
+ Node
+ end.
+
+clear_nodes(Tx, #tree{} = Tree, Nodes) ->
+ lists:foreach(fun(Node) ->
+ clear_node(Tx, Tree, Node)
+ end, Nodes).
+
+
+clear_node(Tx, #tree{} = Tree, #node{} = Node) ->
+ Key = node_key(Tree#tree.prefix, Node#node.id),
+ cache(Tree, clear, Node#node.id),
+ persist(Tree, Tx, clear, Key).
+
+
+set_nodes(Tx, #tree{} = Tree, Nodes) ->
+ lists:foreach(fun(Node) ->
+ set_node(Tx, Tree, Node)
+ end, Nodes).
+
+
+set_node(_Tx, #tree{} = _Tree, #node{} = Same, #node{} = Same) ->
+ ok;
+
+set_node(Tx, #tree{} = Tree, #node{} = _From, #node{} = To) ->
+ set_node(Tx, Tree, To).
+
+
+set_node(Tx, #tree{} = Tree, #node{} = Node) ->
+ ?validate_node(Tree, Node),
+ Key = node_key(Tree#tree.prefix, Node#node.id),
+ Value = encode_node(Tree, Key, Node),
+ cache(Tree, set, [Node#node.id, Node]),
+ persist(Tree, Tx, set, [Key, Value]).
+
+
+node_key(Prefix, Id) when is_binary(Prefix), is_binary(Id) ->
+ erlfdb_tuple:pack({?NODE, Id}, Prefix).
+
+
+%% @doc Walks the whole tree and checks it for consistency.
+%% It also prints it to screen.
+validate_tree(Db, #tree{} = Tree) ->
+ erlfdb:transactional(Db, fun(Tx) ->
+ Root = get_node(Db, Tree, ?NODE_ROOT_ID),
+ validate_tree(Tx, Tree, Root)
+ end).
+
+
+validate_tree(_Tx, #tree{} = Tree, #node{level = 0} = Node) ->
+ print_node(Node),
+ validate_node(Tree, Node);
+
+validate_tree(Tx, #tree{} = Tree, #node{} = Node) ->
+ print_node(Node),
+ validate_node(Tree, Node),
+ validate_tree(Tx, Tree, Node#node.members);
+
+validate_tree(_Tx, #tree{} = _Tree, []) ->
+ ok;
+
+validate_tree(Tx, #tree{} = Tree, [{_F, _L, P, _R} | Rest]) ->
+ Node = get_node(Tx, Tree, P),
+ validate_tree(Tx, Tree, Node),
+ validate_tree(Tx, Tree, Rest).
+
+
+validate_node(#tree{} = Tree, #node{} = Node) ->
+ NumKeys = length(Node#node.members),
+ IsLeaf = Node#node.level =:= 0,
+ IsRoot = ?NODE_ROOT_ID == Node#node.id,
+ OutOfOrder = Node#node.members /= sort_members(Tree, Node#node.level, Node#node.members),
+ Duplicates = Node#node.members /= usort_members(Tree, Node#node.level, Node#node.members),
+ if
+ Node#node.id == undefined ->
+ erlang:error({node_without_id, Node});
+ not IsRoot andalso NumKeys < Tree#tree.min ->
+ erlang:error({too_few_keys, Node});
+ NumKeys > Tree#tree.max ->
+ erlang:error({too_many_keys, Node});
+ not IsLeaf andalso Node#node.prev /= undefined ->
+ erlang:error({non_leaf_with_prev, Node});
+ not IsLeaf andalso Node#node.next /= undefined ->
+ erlang:error({non_leaf_with_next, Node});
+ OutOfOrder ->
+ erlang:error({out_of_order, Node});
+ Duplicates ->
+ erlang:error({duplicates, Node});
+ true ->
+ ok
+ end.
+
+
+%% data marshalling functions (encodes unnecesary fields as a NIL_REF)
+
+encode_node(#tree{} = Tree, Key, #node{prev = undefined} = Node) ->
+ encode_node(Tree, Key, Node#node{prev = []});
+
+encode_node(#tree{} = Tree, Key, #node{next = undefined} = Node) ->
+ encode_node(Tree, Key, Node#node{next = []});
+
+encode_node(#tree{} = Tree, Key, #node{} = Node) ->
+ #tree{encode_fun = EncodeFun} = Tree,
+ EncodeFun(encode, Key, Node#node{id = []}).
+
+
+decode_node(#tree{} = Tree, Id, Key, Value) when is_binary(Value) ->
+ #tree{encode_fun = EncodeFun} = Tree,
+ Term = EncodeFun(decode, Key, Value),
+ decode_node(Id, Term).
+
+
+decode_node(Id, #node{prev = []} = Node) ->
+ decode_node(Id, Node#node{prev = undefined});
+
+decode_node(Id, #node{next = []} = Node) ->
+ decode_node(Id, Node#node{next = undefined});
+
+decode_node(Id, #node{} = Node) ->
+ Node#node{id = Id}.
+
+%% built-in reduce functions.
+
+reduce_noop(_KVs, _Rereduce) ->
+ [].
+
+
+reduce_node(#tree{} = Tree, #node{level = 0} = Node) ->
+ reduce_values(Tree, Node#node.members, false);
+
+reduce_node(#tree{} = Tree, #node{} = Node) ->
+ Rs = [R || {_F, _L, _P, R} <- Node#node.members],
+ reduce_values(Tree, Rs, true).
+
+
+reduce_values(#tree{} = Tree, Values, Rereduce) when is_list(Values) ->
+ #tree{reduce_fun = ReduceFun} = Tree,
+ ReduceFun(Values, Rereduce).
+
+
+%% collation functions
+
+
+collate(#tree{} = _Tree, ?MIN, _B) ->
+ lt;
+
+collate(#tree{} = _Tree, _A, ?MIN) ->
+ gt;
+
+collate(#tree{} = _Tree, ?MAX, _B) ->
+ gt;
+
+collate(#tree{} = _Tree, _A, ?MAX) ->
+ lt;
+
+collate(#tree{} = Tree, A, B) ->
+ #tree{collate_fun = CollateFun} = Tree,
+ case CollateFun(A, B) of
+ lt -> lt;
+ eq -> eq;
+ gt -> gt;
+ _ -> error(invalid_collation_result)
+ end.
+
+
+collate(#tree{} = Tree, A, B, Allowed) ->
+ lists:member(collate(Tree, A, B), Allowed).
+
+
+umerge_members(#tree{} = Tree, Level, List1, List2) ->
+ Collate = fun
+ ({K1, _V1}, {K2, _V2}) when Level == 0 ->
+ collate(Tree, K1, K2);
+ ({_F1, L1, _V1, _R1}, {_F2, L2, _V2, _R2}) when Level > 0 ->
+ collate(Tree, L1, L2)
+ end,
+ umerge_members_int(Collate, List1, List2, []).
+
+
+umerge_members_int(Collate, [], [H2 | T2], [HAcc | _] = Acc) ->
+ case Collate(H2, HAcc) of
+ lt -> erlang:error(unsorted_members);
+ eq -> lists:reverse(Acc, T2);
+ gt -> lists:reverse(Acc, [H2 | T2])
+ end;
+umerge_members_int(_Collate, List1, [], Acc) ->
+ lists:reverse(Acc, List1);
+umerge_members_int(Collate, [H1 | T1], [H2 | T2], Acc) ->
+ case Collate(H1, H2) of
+ lt -> umerge_members_int(Collate, T1, [H2 | T2], [H1 | Acc]);
+ eq -> umerge_members_int(Collate, T1, [H2 | T2], [H1 | Acc]);
+ gt -> umerge_members_int(Collate, [H1 | T1], T2, [H2 | Acc])
+ end.
+
+
+sort_keys(#tree{} = Tree, List) ->
+ CollateWrapper = fun
+ (K1, K2) ->
+ collate(Tree, K1, K2, [lt, eq])
+ end,
+ lists:sort(CollateWrapper, List).
+
+
+sort_nodes(#tree{} = Tree, List) ->
+ CollateWrapper = fun
+ (#node{} = N1, #node{} = N2) ->
+ collate(Tree, first_key(N1), first_key(N2), [lt, eq])
+ end,
+ lists:sort(CollateWrapper, List).
+
+
+sort_members(#tree{} = Tree, Level, List) ->
+ CollateWrapper = fun
+ ({K1, _V1}, {K2, _V2}) when Level == 0 ->
+ collate(Tree, K1, K2, [lt, eq]);
+ ({_F1, L1, _V1, _R1}, {_F2, L2, _V2, _R2}) when Level > 0 ->
+ collate(Tree, L1, L2, [lt, eq])
+ end,
+ lists:sort(CollateWrapper, List).
+
+
+usort_members(#tree{} = Tree, Level, List) ->
+ CollateWrapper = fun
+ ({K1, _V1}, {K2, _V2}) when Level == 0 ->
+ collate(Tree, K1, K2, [lt, eq]);
+ ({_F1, L1, _V1, _R1}, {_F2, L2, _V2, _R2}) when Level > 0 ->
+ collate(Tree, L1, L2, [lt, eq])
+ end,
+ lists:usort(CollateWrapper, List).
+
+
+collate_raw(A, B) when A < B ->
+ lt;
+
+collate_raw(A, B) when A > B ->
+ gt;
+
+collate_raw(A, A) ->
+ eq.
+
+
+%% encoding function
+
+encode_erlang(encode, _Key, Value) ->
+ term_to_binary(Value, [{minor_version, 2}]);
+
+
+encode_erlang(decode, _Key, Value) ->
+ binary_to_term(Value, [safe]).
+
+%% persist function
+
+persist(#tree{} = Tree, Tx, Action, Args) ->
+ #tree{persist_fun = PersistFun} = Tree,
+ PersistFun(Tx, Action, Args).
+
+
+simple_persist(Tx, set, [Key, Value]) ->
+ erlfdb:set(Tx, Key, Value);
+
+simple_persist(Tx, get, Key) ->
+ erlfdb:wait(erlfdb:get(Tx, Key));
+
+simple_persist(Tx, clear, Key) ->
+ erlfdb:clear(Tx, Key).
+
+
+%% cache functions
+
+cache_noop(set, _) ->
+ ok;
+cache_noop(clear, _) ->
+ ok;
+cache_noop(get, _) ->
+ undefined.
+
+
+cache(#tree{} = Tree, set, [Id, #node{} = Node]) ->
+ #tree{cache_fun = CacheFun} = Tree,
+ case node_is_cacheable(Node) of
+ true ->
+ CacheFun(set, [Id, Node]);
+ false ->
+ ok
+ end;
+
+cache(#tree{} = Tree, clear, Id) ->
+ #tree{cache_fun = CacheFun} = Tree,
+ CacheFun(clear, Id);
+
+cache(#tree{} = _Tree, get, ?NODE_ROOT_ID) ->
+ undefined;
+
+cache(#tree{} = Tree, get, Id) ->
+ #tree{cache_fun = CacheFun} = Tree,
+ CacheFun(get, Id).
+
+
+%% private functions
+
+init_order(#tree{} = Tree, Order)
+ when is_integer(Order), Order > 2, Order rem 2 == 0 ->
+ Tree#tree{
+ min = Order div 2,
+ max = Order
+ }.
+
+
+first_key(#node{} = Node) ->
+ first_key(Node#node.members);
+
+first_key(Members) when is_list(Members) ->
+ element(1, hd(Members)).
+
+
+last_key(#node{} = Node) ->
+ last_key(Node#node.members);
+
+last_key(Members) when is_list(Members) ->
+ case lists:last(Members) of
+ {K, _V} ->
+ K;
+ {_F, L, _P, _R} ->
+ L
+ end.
+
+
+new_node_id_if_cacheable(Tx, #tree{} = Tree, #node{} = Old, #node{} = New) ->
+ MembersChanged = Old#node.members /= New#node.members,
+ NodeIsCacheable = node_is_cacheable(New),
+ if
+ MembersChanged andalso NodeIsCacheable ->
+ clear_node(Tx, Tree, New),
+ New#node{id = new_node_id()};
+ true ->
+ New
+ end.
+
+
+node_is_cacheable(#node{id = ?NODE_ROOT_ID}) ->
+ false;
+
+node_is_cacheable(#node{level = 0}) ->
+ false;
+
+node_is_cacheable(#node{}) ->
+ true.
+
+
+new_node_id() ->
+ crypto:strong_rand_bytes(16).
+
+
+%% remove prev/next pointers for nonleaf nodes
+remove_pointers_if_not_leaf(#node{level = 0} = Node) ->
+ Node;
+
+remove_pointers_if_not_leaf(#node{} = Node) ->
+ Node#node{prev = undefined, next = undefined}.
+
+
+
+print_node(#node{level = 0} = Node) ->
+ io:format("#node{id = ~s, level = ~w, prev = ~s, next = ~s, members = ~w}~n~n",
+ [b64(Node#node.id), Node#node.level, b64(Node#node.prev), b64(Node#node.next),
+ Node#node.members]);
+
+print_node(#node{} = Node) ->
+ io:format("#node{id = ~s, level = ~w, prev = ~s, next = ~s, members = ~s}~n~n",
+ [base64:encode(Node#node.id), Node#node.level, b64(Node#node.prev), b64(Node#node.next),
+ [io_lib:format("{~w, ~w, ~s, ~w}, ", [F, L, b64(P), R]) || {F, L, P, R} <- Node#node.members]]).
+
+
+b64(undefined) ->
+ undefined;
+
+b64(Bin) ->
+ base64:encode(Bin).
+
+%% tests
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+reduce_sum(KVs, false) ->
+ {_, Vs} = lists:unzip(KVs),
+ lists:sum(Vs);
+
+reduce_sum(Rs, true) ->
+ lists:sum(Rs).
+
+
+reduce_count(KVs, false) ->
+ length(KVs);
+
+reduce_count(Rs, true) ->
+ lists:sum(Rs).
+
+
+reduce_stats(KVs, false) ->
+ {_, Vs} = lists:unzip(KVs),
+ {
+ lists:sum(Vs),
+ lists:min(Vs),
+ lists:max(Vs),
+ length(Vs),
+ lists:sum([V * V || V <- Vs])
+ };
+
+reduce_stats(Rs, true) ->
+ lists:foldl(
+ fun({Sum, Min, Max, Count, SumSqr},
+ {SumAcc, MinAcc, MaxAcc, CountAcc, SumSqrAcc}) ->
+ {
+ Sum + SumAcc,
+ erlang:min(Min, MinAcc),
+ erlang:max(Max, MaxAcc),
+ Count + CountAcc,
+ SumSqr + SumSqrAcc
+ } end, hd(Rs), tl(Rs)).
+
+
+collation_fun_test_() ->
+ Tree = #tree{collate_fun = fun collate_raw/2},
+ [
+ ?_test(?assertEqual(gt, collate(Tree, 4, 3))),
+ ?_test(?assertEqual(lt, collate(Tree, 3, 4))),
+ ?_test(?assertEqual(eq, collate(Tree, 3, 3)))
+ ].
+
+
+collate_validation_test() ->
+ Tree = #tree{collate_fun = fun(_A, _B) -> foo end},
+ ?assertError(invalid_collation_result, collate(Tree, 1, 2)).
+
+
+order_is_preserved_test() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ open(Db, <<1,2,3>>, 4),
+ Tree = open(Db, <<1,2,3>>, 8),
+ ?assertEqual(4, Tree#tree.max).
+
+
+min_not_allowed_test() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1,2,3>>, 4),
+ ?assertError(min_not_allowed, ebtree:insert(Db, Tree, ebtree:min(), foo)).
+
+
+max_not_allowed_test() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1,2,3>>, 4),
+ ?assertError(max_not_allowed, ebtree:insert(Db, Tree, ebtree:max(), foo)).
+
+
+lookup_test() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1,2,3>>, 4),
+ Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, 16)])],
+ lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key + 1) end, Keys),
+ lists:foreach(fun(Key) -> ?assertEqual({Key, Key + 1}, lookup(Db, Tree, Key)) end, Keys),
+ ?assertEqual(false, lookup(Db, Tree, 101)).
+
+
+lookup_multi_test() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1,2,3>>, 4),
+ Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, 16)])],
+ lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key + 1) end, Keys),
+ validate_tree(Db, Tree),
+ ?assertEqual([{1, 2}], lookup_multi(Db, Tree, [1])),
+ ?assertEqual([{15, 16}, {2, 3}], lookup_multi(Db, Tree, [2, 15])),
+ ?assertEqual([{15, 16}, {4, 5}, {2, 3}], lookup_multi(Db, Tree, [2, 101, 15, 4, -3])).
+
+
+insert_multi_test() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1, 2, 3>>, 4),
+ AllKVs = lists:foldl(fun(_Seq, Acc) ->
+ KVs = [{rand:uniform(), rand:uniform()} || _ <- lists:seq(1, 16)],
+ insert_multi(Db, Tree, KVs),
+ KVs ++ Acc
+ end, [], lists:seq(1, 16)),
+ lists:foreach(fun({K, V}) ->
+ ?assertEqual({K, V}, lookup(Db, Tree, K))
+ end, AllKVs),
+ validate_tree(Db, Tree).
+
+
+delete_test() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1,2,3>>, 4),
+ Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, 16)])],
+ lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key + 1) end, Keys),
+ lists:foreach(fun(Key) -> ?assertEqual({Key, Key + 1}, lookup(Db, Tree, Key)) end, Keys),
+ lists:foreach(fun(Key) -> delete(Db, Tree, Key) end, Keys),
+ lists:foreach(fun(Key) -> ?assertEqual(false, lookup(Db, Tree, Key)) end, Keys).
+
+
+range_after_delete_test() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1,2,3>>, 4),
+ Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, 16)])],
+ lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key + 1) end, Keys),
+ lists:foreach(fun(Key) -> ?assertEqual({Key, Key + 1}, lookup(Db, Tree, Key)) end, Keys),
+ lists:foreach(fun(Key) -> delete(Db, Tree, Key) end, lists:seq(1, 16, 2)),
+ ?assertEqual(8, range(Db, Tree, 1, 16, fun(E, A) -> length(E) + A end, 0)),
+ ?assertEqual(8, reverse_range(Db, Tree, 1, 16, fun(E, A) -> length(E) + A end, 0)).
+
+
+full_reduce_empty_test() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1,2,3>>, 4, [{reduce_fun, fun reduce_sum/2}]),
+ ?assertEqual(0, full_reduce(Db, Tree)).
+
+
+full_reduce_test_() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1,2,3>>, 4, [{reduce_fun, fun reduce_sum/2}]),
+ TestFun = fun(Max) ->
+ Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key) end, Keys),
+ ?assertEqual(round(Max * ((1 + Max) / 2)), full_reduce(Db, Tree))
+ end,
+ [
+ ?_test(TestFun(4)),
+ ?_test(TestFun(8))
+ ].
+
+
+full_reduce_after_delete_test() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1,2,3>>, 4, [{reduce_fun, fun reduce_sum/2}]),
+ Max = 16,
+ Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key) end, Keys),
+ ?assertEqual(round(Max * ((1 + Max) / 2)), full_reduce(Db, Tree)),
+ lists:foreach(fun(Key) -> delete(Db, Tree, Key) end, Keys),
+ ?assertEqual(0, full_reduce(Db, Tree)).
+
+
+count_reduce_test_() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1,2,3>>, 4, [{reduce_fun, fun reduce_count/2}]),
+ Max = 100,
+ Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key) end, Keys),
+ Expected = fun(S, E) -> E - S + 1 end,
+ [
+ ?_test(?assertEqual(Expected(1, 5), reduce(Db, Tree, 1, 5))),
+ ?_test(?assertEqual(Expected(50, 60), reduce(Db, Tree, 50, 60))),
+ ?_test(?assertEqual(Expected(21, 83), reduce(Db, Tree, 21, 83))),
+ ?_test(?assertEqual(Expected(1, 1), reduce(Db, Tree, 1, 1))),
+ ?_test(?assertEqual(Expected(1, 100), reduce(Db, Tree, 0, 200))),
+ ?_test(?assertEqual(Expected(5, 7), reduce(Db, Tree, 5, 7))),
+ ?_test(?assertEqual(Expected(6, 7), reduce(Db, Tree, 5, 7,
+ [{inclusive_start, false}]))),
+ ?_test(?assertEqual(Expected(5, 6), reduce(Db, Tree, 5, 7,
+ [{inclusive_end, false}]))),
+ ?_test(?assertEqual(Expected(6, 6), reduce(Db, Tree, 5, 7,
+ [{inclusive_start, false}, {inclusive_end, false}])))
+ ].
+
+sum_reduce_test_() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1,2,3>>, 4, [{reduce_fun, fun reduce_sum/2}]),
+ Max = 100,
+ Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key) end, Keys),
+ Expected = fun(S, E) -> lists:sum(lists:seq(S, E)) end,
+ [
+ ?_test(?assertEqual(Expected(1, 5), reduce(Db, Tree, 1, 5))),
+ ?_test(?assertEqual(Expected(50, 60), reduce(Db, Tree, 50, 60))),
+ ?_test(?assertEqual(Expected(21, 83), reduce(Db, Tree, 21, 83))),
+ ?_test(?assertEqual(Expected(1, 1), reduce(Db, Tree, 1, 1))),
+ ?_test(?assertEqual(Expected(1, 100), reduce(Db, Tree, 0, 200))),
+ ?_test(?assertEqual(Expected(5, 7), reduce(Db, Tree, 5, 7)))
+ ].
+
+
+stats_reduce_test_() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1,2,3>>, 4, [{reduce_fun, fun reduce_stats/2}]),
+ Max = 100,
+ Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key) end, Keys),
+ [
+ ?_test(?assertEqual({15,1,5,5,55}, reduce(Db, Tree, 1, 5))),
+ ?_test(?assertEqual({605,50,60,11,33385}, reduce(Db, Tree, 50, 60))),
+ ?_test(?assertEqual({3276,21,83,63,191184}, reduce(Db, Tree, 21, 83))),
+ ?_test(?assertEqual({1,1,1,1,1}, reduce(Db, Tree, 1, 1))),
+ ?_test(?assertEqual({5050,1,100,100,338350}, reduce(Db, Tree, 0, 200))),
+ ?_test(?assertEqual({18,5,7,3,110}, reduce(Db, Tree, 5, 7)))
+ ].
+
+
+group_reduce_level_test_() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1,2,3>>, 4, [{reduce_fun, fun reduce_sum/2}]),
+ Max = 100,
+ Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ GroupKeyFun = fun(Key) -> lists:sublist(Key, 2) end,
+ UserAccFun = fun({K,V}, Acc) -> Acc ++ [{K, V}] end,
+ lists:foreach(fun(Key) -> insert(Db, Tree, [Key rem 4, Key rem 3, Key], Key) end, Keys),
+ [
+ ?_test(?assertEqual([{[1, 0], 408}, {[1, 1], 441}, {[1, 2], 376}],
+ group_reduce(Db, Tree, [1], [2], GroupKeyFun, UserAccFun, []))),
+
+ ?_test(?assertEqual([{[1, 0], 408}, {[1, 1], 441}, {[1, 2], 376}],
+ group_reduce(Db, Tree, [1], [2], GroupKeyFun, UserAccFun, [], [{dir, fwd}]))),
+
+ ?_test(?assertEqual([{[1, 2], 376}, {[1, 1], 441}, {[1, 0], 408}],
+ group_reduce(Db, Tree, [1], [2], GroupKeyFun, UserAccFun, [], [{dir, rev}]))),
+
+ ?_test(?assertEqual([{[0,0],432}, {[0,1],468}, {[0,2],400}, {[1,0],408}, {[1,1],441}, {[1,2],376},
+ {[2,0],384}, {[2,1],416}, {[2,2],450}, {[3,0],459}, {[3,1],392}, {[3,2],424}],
+ group_reduce(Db, Tree, ebtree:min(), ebtree:max(), GroupKeyFun, UserAccFun, [])))
+ ].
+
+
+group_reduce_int_test_() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1,2,3>>, 4, [{reduce_fun, fun reduce_count/2}]),
+ Max = 100,
+ Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ GroupKeyFun = fun(_Key) -> null end,
+ UserAccFun = fun({K,V}, Acc) -> Acc ++ [{K, V}] end,
+ lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key) end, Keys),
+ [
+ ?_test(?assertEqual([{null, 100}], group_reduce(Db, Tree,
+ ebtree:min(), ebtree:max(), GroupKeyFun, UserAccFun, []))),
+ ?_test(?assertEqual([{null, 99}], group_reduce(Db, Tree, 2, ebtree:max(), GroupKeyFun, UserAccFun, []))),
+ ?_test(?assertEqual([{null, 96}], group_reduce(Db, Tree, 3, 98, GroupKeyFun, UserAccFun, []))),
+ ?_test(?assertEqual([{null, 95}], group_reduce(Db, Tree, 3, 98, GroupKeyFun, UserAccFun, [], [{inclusive_start, false}]))),
+ ?_test(?assertEqual([{null, 95}], group_reduce(Db, Tree, 3, 98, GroupKeyFun, UserAccFun, [], [{inclusive_end, false}]))),
+ ?_test(?assertEqual([{null, 94}], group_reduce(Db, Tree, 3, 98, GroupKeyFun, UserAccFun, [],
+ [{inclusive_start, false}, {inclusive_end, false}])))
+ ].
+
+
+raw_collation_test() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1,2,3>>, 4),
+ insert(Db, Tree, null, null),
+ insert(Db, Tree, 1, 1),
+ ?assertEqual([{1, 1}, {null, null}], range(Db, Tree, 1, null, fun(E, A) -> A ++ E end, [])).
+
+
+custom_collation_test() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ CollateFun = fun(A, B) -> collate_raw(B, A) end,
+ Tree = open(Db, <<1,2,3>>, 4, [{collate_fun, CollateFun}]),
+ insert(Db, Tree, 1, 1),
+ insert(Db, Tree, 2, 2),
+ ?assertEqual([{2, 2}, {1, 1}], range(Db, Tree, 3, 0, fun(E, A) -> A ++ E end, [])).
+
+
+empty_range_test() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1, 2, 3>>, 10),
+ ?assertEqual(
+ blah,
+ range(Db, Tree, min(), max(), fun(_, A) -> A end, blah)
+ ).
+
+
+range_test_() ->
+ {timeout, 1000, fun() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Max = 100,
+ Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ Tree = lists:foldl(fun(Key, T) -> insert(Db, T, Key, Key + 1) end, open(Db, <<1,2,3>>, 10), Keys),
+ lists:foreach(
+ fun(_) ->
+ [StartKey, EndKey] = lists:sort([rand:uniform(Max), rand:uniform(Max)]),
+ ?assertEqual([{K, K + 1} || K <- lists:seq(StartKey, EndKey)],
+ range(Db, Tree, StartKey, EndKey, fun(E, A) -> A ++ E end, [])
+ ) end,
+ lists:seq(1, 100))
+ end}.
+
+
+empty_reverse_range_test() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1, 2, 3>>, 10),
+ ?assertEqual(
+ blah,
+ reverse_range(Db, Tree, min(), max(), fun(_, A) -> A end, blah)
+ ).
+
+
+reverse_range_test_() ->
+ {timeout, 1000, fun() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Max = 100,
+ Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ Tree = lists:foldl(fun(Key, T) -> insert(Db, T, Key, Key + 1) end, open(Db, <<1,2,3>>, 8), Keys),
+ lists:foreach(
+ fun(_) ->
+ [StartKey, EndKey] = lists:sort([rand:uniform(Max), rand:uniform(Max)]),
+ ?assertEqual([{K, K + 1} || K <- lists:seq(EndKey, StartKey, -1)],
+ reverse_range(Db, Tree, StartKey, EndKey, fun(E, A) -> A ++ E end, [])
+ ) end,
+ lists:seq(1, 100))
+ end}.
+
+
+custom_collation_range_test_() ->
+ {timeout, 1000, fun() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Max = 100,
+ Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ CollateFun = fun(A, B) -> collate_raw(B, A) end,
+ Tree = open(Db, <<1,2,3>>, 6, [{collate_fun, CollateFun}]),
+ lists:foldl(fun(Key, T) -> insert(Db, T, Key, Key + 1) end, Tree, Keys),
+ lists:foreach(
+ fun(_) ->
+ [StartKey, EndKey] = sort_keys(Tree, [rand:uniform(Max), rand:uniform(Max)]),
+ Seq = if
+ StartKey < EndKey ->
+ lists:seq(StartKey, EndKey);
+ true ->
+ lists:seq(StartKey, EndKey, -1)
+ end,
+ ?assertEqual([{K, K + 1} || K <- Seq],
+ range(Db, Tree, StartKey, EndKey, fun(E, A) -> A ++ E end, [])
+ ) end,
+ lists:seq(1, 100))
+ end}.
+
+
+custom_collation_reverse_range_test_() ->
+ {timeout, 1000, fun() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Max = 100,
+ Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ CollateFun = fun(A, B) -> collate_raw(B, A) end,
+ Tree = open(Db, <<1,2,3>>, 6, [{collate_fun, CollateFun}]),
+ lists:foldl(fun(Key, T) -> insert(Db, T, Key, Key + 1) end, Tree, Keys),
+ lists:foreach(
+ fun(_) ->
+ [StartKey, EndKey] = sort_keys(Tree, [rand:uniform(Max), rand:uniform(Max)]),
+ Seq = if
+ StartKey < EndKey ->
+ lists:seq(StartKey, EndKey);
+ true ->
+ lists:seq(StartKey, EndKey, -1)
+ end,
+ ?assertEqual([{K, K + 1} || K <- lists:reverse(Seq)],
+ reverse_range(Db, Tree, StartKey, EndKey, fun(E, A) -> A ++ E end, [])
+ ) end,
+ lists:seq(1, 100))
+ end}.
+
+
+validate_tree_test() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ Tree = open(Db, <<1,2,3>>, 4),
+ [ebtree:insert(Db, Tree, I, I) || I <- lists:seq(1, 16)],
+ validate_tree(Db, Tree).
+
+
+validate_node_test_() ->
+ [
+ ?_test(?assertError({node_without_id, _}, validate_node(
+ #tree{}, #node{id = undefined}))),
+ ?_test(?assertError({too_few_keys, _}, validate_node(
+ #tree{collate_fun = fun collate_raw/2, min = 2},
+ #node{id = 1, members = [{1, 1}]}))),
+ ?_test(?assertError({too_many_keys, _}, validate_node(
+ #tree{collate_fun = fun collate_raw/2, min = 2, max = 2},
+ #node{id = 1, members = [{1, 1}, {2, 2}, {3, 3}]}))),
+ ?_test(?assertError({non_leaf_with_prev, _}, validate_node(
+ #tree{min = 0}, #node{id = 1, level = 1, prev = 1}))),
+ ?_test(?assertError({non_leaf_with_next, _}, validate_node(
+ #tree{min = 0}, #node{id = 1, level = 1, next = 1}))),
+ ?_test(?assertError({out_of_order, _}, validate_node(
+ #tree{min = 0, collate_fun = fun collate_raw/2},
+ #node{id = 1, members = [{2, 2}, {1, 1}]}))),
+ ?_test(?assertError({duplicates, _}, validate_node(
+ #tree{min = 0, collate_fun = fun collate_raw/2},
+ #node{id = 1, members = [{1, 1}, {1, 1}]})))
+ ].
+
+
+cache_test_() ->
+ {spawn, [fun() ->
+ Db = erlfdb_util:get_test_db([empty]),
+ CacheFun = fun
+ (set, [Id, Node]) ->
+ erlang:put(Id, Node);
+ (clear, Id) ->
+ erlang:erase(Id);
+ (get, Id) ->
+ erlang:get(Id)
+ end,
+ Tree = open(Db, <<1,2,3>>, 4, [{cache_fun, CacheFun}]),
+ [ebtree:insert(Db, Tree, I, I) || I <- lists:seq(1, 16)],
+ ?assertEqual({1, 1}, ebtree:lookup(Db, Tree, 1)),
+ NodeCache = [V || {_K, V} <- erlang:get(), is_record(V, node)],
+ ?assertEqual(3, length(NodeCache))
+ end]}.
+
+-endif.
diff --git a/src/fabric/include/fabric2.hrl b/src/fabric/include/fabric2.hrl
new file mode 100644
index 000000000..2eba4d5eb
--- /dev/null
+++ b/src/fabric/include/fabric2.hrl
@@ -0,0 +1,87 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-define(uint2bin(I), binary:encode_unsigned(I, little)).
+-define(bin2uint(I), binary:decode_unsigned(I, little)).
+-define(bin2int(V), binary_to_integer(V)).
+-define(METADATA_VERSION_KEY, <<16#FF, "/metadataVersion">>).
+
+% Prefix Definitions
+
+% Layer Level: (LayerPrefix, X, ...)
+
+-define(CLUSTER_CONFIG, 0).
+-define(ALL_DBS, 1).
+-define(DB_HCA, 2).
+-define(DELETED_DBS, 3).
+-define(DBS, 15).
+-define(EXPIRING_CACHE, 53).
+-define(TX_IDS, 255).
+
+% Cluster Level: (LayerPrefix, ?CLUSTER_CONFIG, X, ...)
+
+-define(AEGIS, 0).
+
+% Database Level: (LayerPrefix, ?DBS, DbPrefix, X, ...)
+
+-define(DB_VERSION, 0).
+-define(DB_CONFIG, 16).
+-define(DB_STATS, 17).
+-define(DB_ALL_DOCS, 18).
+-define(DB_CHANGES, 19).
+-define(DB_REVS, 20).
+-define(DB_DOCS, 21).
+-define(DB_LOCAL_DOCS, 22).
+-define(DB_ATTS, 23).
+-define(DB_VIEWS, 24).
+-define(DB_LOCAL_DOC_BODIES, 25).
+-define(DB_ATT_NAMES, 26).
+-define(DB_SEARCH, 27).
+-define(DB_AEGIS, 28).
+
+
+% Versions
+
+% 0 - Initial implementation
+% 1 - Added attachment hash
+% 2 - Added size information
+
+-define(CURR_REV_FORMAT, 2).
+
+% 0 - Adding local doc versions
+
+-define(CURR_LDOC_FORMAT, 0).
+
+% 0 - Attachment storage version
+
+-define(CURR_ATT_STORAGE_VER, 0).
+
+% Misc constants
+
+-define(PDICT_DB_KEY, '$fabric_db_handle').
+-define(PDICT_LAYER_CACHE, '$fabric_layer_id').
+-define(PDICT_CHECKED_DB_IS_CURRENT, '$fabric_checked_db_is_current').
+-define(PDICT_CHECKED_MD_IS_CURRENT, '$fabric_checked_md_is_current').
+-define(PDICT_TX_ID_KEY, '$fabric_tx_id').
+-define(PDICT_TX_RES_KEY, '$fabric_tx_result').
+-define(PDICT_FOLD_ACC_STATE, '$fabric_fold_acc_state').
+
+% Let's keep these in ascending order
+-define(TRANSACTION_TOO_OLD, 1007).
+-define(FUTURE_VERSION, 1009).
+-define(COMMIT_UNKNOWN_RESULT, 1021).
+-define(TRANSACTION_CANCELLED, 1025).
+-define(TRANSACTION_TOO_LARGE, 2101).
+
+
+-define(DEFAULT_BINARY_CHUNK_SIZE, 100000).
diff --git a/src/fabric/src/fabric.app.src b/src/fabric/src/fabric.app.src
index d7686ca1a..a7059fd10 100644
--- a/src/fabric/src/fabric.app.src
+++ b/src/fabric/src/fabric.app.src
@@ -13,15 +13,22 @@
{application, fabric, [
{description, "Routing and proxying layer for CouchDB cluster"},
{vsn, git},
- {registered, []},
+ {mod, {fabric2_app, []}},
+ {registered, [
+ fabric_server
+ ]},
{applications, [
kernel,
stdlib,
config,
+ couch_epi,
couch,
+ ctrace,
rexi,
mem3,
couch_log,
- couch_stats
+ couch_stats,
+ erlfdb,
+ aegis
]}
]}.
diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl
index 27fa8c045..bb538e2db 100644
--- a/src/fabric/src/fabric.erl
+++ b/src/fabric/src/fabric.erl
@@ -668,53 +668,53 @@ set_namespace(NS, #mrargs{extra = Extra} = Args) ->
get_view_sig_from_filename(FilePath) ->
filename:basename(filename:basename(FilePath, ".view"), ".compact").
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-update_doc_test_() ->
- {
- "Update doc tests", {
- setup, fun setup/0, fun teardown/1,
- fun(Ctx) -> [
- should_throw_conflict(Ctx)
- ] end
- }
- }.
-
-should_throw_conflict(Doc) ->
- ?_test(begin
- ?assertThrow(conflict, update_doc(<<"test-db">>, Doc, []))
- end).
-
-
-setup() ->
- Doc = #doc{
- id = <<"test_doc">>,
- revs = {3, [<<5,68,252,180,43,161,216,223,26,119,71,219,212,229,
- 159,113>>]},
- body = {[{<<"foo">>,<<"asdf">>},{<<"author">>,<<"tom">>}]},
- atts = [], deleted = false, meta = []
- },
- ok = application:ensure_started(config),
- ok = meck:expect(mem3, shards, fun(_, _) -> [] end),
- ok = meck:expect(mem3, quorum, fun(_) -> 1 end),
- ok = meck:expect(rexi, cast, fun(_, _) -> ok end),
- ok = meck:expect(rexi_utils, recv,
- fun(_, _, _, _, _, _) ->
- {ok, {error, [{Doc, conflict}]}}
- end),
- ok = meck:expect(couch_util, reorder_results,
- fun(_, [{_, Res}]) ->
- [Res]
- end),
- ok = meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
- ok = meck:expect(rexi_monitor, stop, fun(_) -> ok end),
- Doc.
-
-
-teardown(_) ->
- meck:unload(),
- ok = application:stop(config).
-
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% update_doc_test_() ->
+%% {
+%% "Update doc tests", {
+%% setup, fun setup/0, fun teardown/1,
+%% fun(Ctx) -> [
+%% should_throw_conflict(Ctx)
+%% ] end
+%% }
+%% }.
+%%
+%% should_throw_conflict(Doc) ->
+%% ?_test(begin
+%% ?assertThrow(conflict, update_doc(<<"test-db">>, Doc, []))
+%% end).
+%%
+%%
+%% setup() ->
+%% Doc = #doc{
+%% id = <<"test_doc">>,
+%% revs = {3, [<<5,68,252,180,43,161,216,223,26,119,71,219,212,229,
+%% 159,113>>]},
+%% body = {[{<<"foo">>,<<"asdf">>},{<<"author">>,<<"tom">>}]},
+%% atts = [], deleted = false, meta = []
+%% },
+%% ok = application:ensure_started(config),
+%% ok = meck:expect(mem3, shards, fun(_, _) -> [] end),
+%% ok = meck:expect(mem3, quorum, fun(_) -> 1 end),
+%% ok = meck:expect(rexi, cast, fun(_, _) -> ok end),
+%% ok = meck:expect(rexi_utils, recv,
+%% fun(_, _, _, _, _, _) ->
+%% {ok, {error, [{Doc, conflict}]}}
+%% end),
+%% ok = meck:expect(couch_util, reorder_results,
+%% fun(_, [{_, Res}]) ->
+%% [Res]
+%% end),
+%% ok = meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
+%% ok = meck:expect(rexi_monitor, stop, fun(_) -> ok end),
+%% Doc.
+%%
+%%
+%% teardown(_) ->
+%% meck:unload(),
+%% ok = application:stop(config).
+%%
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric2_active_tasks.erl b/src/fabric/src/fabric2_active_tasks.erl
new file mode 100644
index 000000000..e706ebaa4
--- /dev/null
+++ b/src/fabric/src/fabric2_active_tasks.erl
@@ -0,0 +1,52 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-module(fabric2_active_tasks).
+
+
+-export([
+ get_active_tasks/0,
+ get_active_task_info/1,
+
+ update_active_task_info/2
+]).
+
+
+-define(ACTIVE_TASK_INFO, <<"active_task_info">>).
+
+
+get_active_tasks() ->
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(undefined), fun(JTx) ->
+ Types = couch_jobs:get_types(JTx),
+ lists:foldl(fun(Type, TaskAcc) ->
+ JobIds = couch_jobs:get_active_jobs_ids(JTx, Type),
+ Tasks = lists:filtermap(fun(JobId) ->
+ {ok, Data} = couch_jobs:get_job_data(JTx, Type, JobId),
+ case maps:get(?ACTIVE_TASK_INFO, Data, not_found) of
+ not_found -> false;
+ #{} = Map when map_size(Map) == 0 -> false;
+ #{} = Info -> {true, Info}
+ end
+ end, JobIds),
+ TaskAcc ++ Tasks
+ end, [], Types)
+ end).
+
+
+get_active_task_info(JobData) ->
+ #{?ACTIVE_TASK_INFO:= ActiveTaskInfo} = JobData,
+ ActiveTaskInfo.
+
+
+update_active_task_info(JobData, ActiveTaskInfo) ->
+ JobData#{?ACTIVE_TASK_INFO => ActiveTaskInfo}.
diff --git a/src/fabric/src/fabric2_app.erl b/src/fabric/src/fabric2_app.erl
new file mode 100644
index 000000000..da95acb53
--- /dev/null
+++ b/src/fabric/src/fabric2_app.erl
@@ -0,0 +1,32 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_app).
+-behaviour(application).
+
+
+-export([
+ start/2,
+ stop/1
+]).
+
+
+start(_Type, StartArgs) ->
+ fabric2_sup:start_link(StartArgs).
+
+
+stop(_State) ->
+ case application:get_env(erlfdb, test_cluster_pid) of
+ {ok, Pid} -> Pid ! close;
+ _ -> ok
+ end,
+ ok.
diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
new file mode 100644
index 000000000..b62f26ec8
--- /dev/null
+++ b/src/fabric/src/fabric2_db.erl
@@ -0,0 +1,2354 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_db).
+
+
+-export([
+ create/2,
+ open/2,
+ delete/2,
+ undelete/4,
+
+ list_dbs/0,
+ list_dbs/1,
+ list_dbs/3,
+
+ list_dbs_info/0,
+ list_dbs_info/1,
+ list_dbs_info/3,
+
+ list_deleted_dbs_info/0,
+ list_deleted_dbs_info/1,
+ list_deleted_dbs_info/3,
+
+ check_is_admin/1,
+ check_is_member/1,
+
+ name/1,
+ get_after_doc_read_fun/1,
+ get_before_doc_update_fun/1,
+ get_committed_update_seq/1,
+ get_compacted_seq/1,
+ get_compactor_pid/1,
+ get_db_info/1,
+ %% get_partition_info/2,
+ get_del_doc_count/1,
+ get_doc_count/1,
+ get_doc_count/2,
+ %% get_epochs/1,
+ %% get_filepath/1,
+ get_instance_start_time/1,
+ get_pid/1,
+ get_revs_limit/1,
+ get_revs_limit/2,
+ get_security/1,
+ get_security/2,
+ get_update_seq/1,
+ get_user_ctx/1,
+ get_uuid/1,
+ %% get_purge_seq/1,
+ %% get_oldest_purge_seq/1,
+ %% get_purge_infos_limit/1,
+
+ is_clustered/1,
+ is_db/1,
+ is_partitioned/1,
+ is_system_db/1,
+ is_system_db_name/1,
+ is_replicator_db/1,
+ is_users_db/1,
+
+ set_revs_limit/2,
+ %% set_purge_infos_limit/2,
+ set_security/2,
+ set_user_ctx/2,
+
+ ensure_full_commit/1,
+ ensure_full_commit/2,
+
+ %% load_validation_funs/1,
+ %% reload_validation_funs/1,
+
+ open_doc/2,
+ open_doc/3,
+ open_doc_revs/4,
+ %% open_doc_int/3,
+ get_doc_info/2,
+ get_full_doc_info/2,
+ get_full_doc_infos/2,
+ get_missing_revs/2,
+ get_design_docs/1,
+ %% get_purge_infos/2,
+
+ %% get_minimum_purge_seq/1,
+ %% purge_client_exists/3,
+
+ validate_docid/1,
+ %% doc_from_json_obj_validate/2,
+
+ update_doc/2,
+ update_doc/3,
+ update_docs/2,
+ update_docs/3,
+ %% delete_doc/3,
+
+ %% purge_docs/2,
+ %% purge_docs/3,
+
+ read_attachment/3,
+ write_attachment/3,
+
+ fold_docs/3,
+ fold_docs/4,
+ fold_docs/5,
+ fold_design_docs/4,
+ fold_local_docs/4,
+ fold_changes/4,
+ fold_changes/5,
+ %% count_changes_since/2,
+ %% fold_purge_infos/4,
+ %% fold_purge_infos/5,
+
+ %% calculate_start_seq/3,
+ %% owner_of/2,
+
+ %% start_compact/1,
+ %% cancel_compact/1,
+ %% wait_for_compaction/1,
+ %% wait_for_compaction/2,
+
+ dbname_suffix/1,
+ normalize_dbname/1,
+ validate_dbname/1,
+
+ %% make_doc/5,
+ new_revid/2,
+
+ apply_open_doc_opts/3
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include("fabric2.hrl").
+
+
+% Default max database name length is based on CouchDb < 4.x compatibility. See
+% default.ini entry for additional information.
+-define(DEFAULT_MAX_DATABASE_NAME_LENGTH, 238).
+
+-define(DBNAME_REGEX,
+ "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*" % use the stock CouchDB regex
+ "(\\.[0-9]{10,})?$" % but allow an optional shard timestamp at the end
+).
+
+-define(FIRST_DDOC_KEY, <<"_design/">>).
+-define(LAST_DDOC_KEY, <<"_design0">>).
+
+-define(RETURN(Term), throw({?MODULE, Term})).
+
+-define(DEFAULT_UPDATE_DOCS_BATCH_SIZE, 2500000).
+
+
+-record(bacc, {
+ db,
+ docs,
+ batch_size,
+ options,
+ rev_futures,
+ seen,
+ results
+}).
+
+
+create(DbName, Options) ->
+ case validate_dbname(DbName) of
+ ok ->
+ Result = fabric2_fdb:transactional(DbName, Options, fun(TxDb) ->
+ case fabric2_fdb:exists(TxDb) of
+ true ->
+ {error, file_exists};
+ false ->
+ fabric2_fdb:create(TxDb, Options)
+ end
+ end),
+ % We cache outside of the transaction so that we're sure
+ % that the transaction was committed.
+ case Result of
+ #{} = Db0 ->
+ Db1 = maybe_add_sys_db_callbacks(Db0),
+ ok = fabric2_server:store(Db1),
+ {ok, Db1#{tx := undefined}};
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+
+open(DbName, Options) ->
+ UUID = fabric2_util:get_value(uuid, Options),
+ case fabric2_server:fetch(DbName, UUID) of
+ #{} = Db ->
+ Db1 = maybe_set_user_ctx(Db, Options),
+ Db2 = maybe_set_interactive(Db1, Options),
+ {ok, require_member_check(Db2)};
+ undefined ->
+ Result = fabric2_fdb:transactional(DbName, Options, fun(TxDb) ->
+ fabric2_fdb:open(TxDb, Options)
+ end),
+ % Cache outside the transaction retry loop
+ case Result of
+ #{} = Db0 ->
+ Db1 = maybe_add_sys_db_callbacks(Db0),
+ ok = fabric2_server:store(Db1),
+ Db2 = Db1#{tx := undefined},
+ {ok, require_member_check(Db2)};
+ Error ->
+ Error
+ end
+ end.
+
+
+delete(DbName, Options) ->
+ % Delete doesn't check user_ctx, that's done at the HTTP API level
+ % here we just care to get the `database_does_not_exist` error thrown
+ Options1 = lists:keystore(user_ctx, 1, Options, ?ADMIN_CTX),
+ case lists:keyfind(deleted_at, 1, Options1) of
+ {deleted_at, TimeStamp} ->
+ fabric2_fdb:transactional(DbName, Options1, fun(TxDb) ->
+ fabric2_fdb:remove_deleted_db(TxDb, TimeStamp)
+ end);
+ false ->
+ {ok, Db} = open(DbName, Options1),
+ Resp = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ fabric2_fdb:delete(TxDb)
+ end),
+ if Resp /= ok -> Resp; true ->
+ fabric2_server:remove(DbName)
+ end
+ end.
+
+
+undelete(DbName, TgtDbName, TimeStamp, Options) ->
+ case validate_dbname(TgtDbName) of
+ ok ->
+ fabric2_fdb:transactional(DbName, Options, fun(TxDb) ->
+ fabric2_fdb:undelete(TxDb, TgtDbName, TimeStamp)
+ end);
+ Error ->
+ Error
+ end.
+
+
+list_dbs() ->
+ list_dbs([]).
+
+
+list_dbs(Options) ->
+ Callback = fun(DbName, Acc) -> [DbName | Acc] end,
+ DbNames = fabric2_fdb:transactional(fun(Tx) ->
+ fabric2_fdb:list_dbs(Tx, Callback, [], Options)
+ end),
+ lists:reverse(DbNames).
+
+
+list_dbs(UserFun, UserAcc0, Options) ->
+ FoldFun = fun
+ (DbName, Acc) -> maybe_stop(UserFun({row, [{id, DbName}]}, Acc))
+ end,
+ fabric2_fdb:transactional(fun(Tx) ->
+ try
+ UserAcc1 = maybe_stop(UserFun({meta, []}, UserAcc0)),
+ UserAcc2 = fabric2_fdb:list_dbs(
+ Tx,
+ FoldFun,
+ UserAcc1,
+ Options
+ ),
+ {ok, maybe_stop(UserFun(complete, UserAcc2))}
+ catch throw:{stop, FinalUserAcc} ->
+ {ok, FinalUserAcc}
+ end
+ end).
+
+
+list_dbs_info() ->
+ list_dbs_info([]).
+
+
+list_dbs_info(Options) ->
+ Callback = fun(Value, Acc) ->
+ NewAcc = case Value of
+ {meta, _} -> Acc;
+ {row, DbInfo} -> [DbInfo | Acc];
+ complete -> Acc
+ end,
+ {ok, NewAcc}
+ end,
+ {ok, DbInfos} = list_dbs_info(Callback, [], Options),
+ {ok, lists:reverse(DbInfos)}.
+
+
+list_dbs_info(UserFun, UserAcc0, Options) ->
+ FoldFun = fun(DbName, InfoFuture, {FutureQ, Count, Acc}) ->
+ NewFutureQ = queue:in({DbName, InfoFuture}, FutureQ),
+ drain_info_futures(NewFutureQ, Count + 1, UserFun, Acc)
+ end,
+ fabric2_fdb:transactional(fun(Tx) ->
+ try
+ UserAcc1 = maybe_stop(UserFun({meta, []}, UserAcc0)),
+ InitAcc = {queue:new(), 0, UserAcc1},
+ {FinalFutureQ, _, UserAcc2} = fabric2_fdb:list_dbs_info(
+ Tx,
+ FoldFun,
+ InitAcc,
+ Options
+ ),
+ UserAcc3 = drain_all_info_futures(FinalFutureQ, UserFun, UserAcc2),
+ {ok, maybe_stop(UserFun(complete, UserAcc3))}
+ catch throw:{stop, FinalUserAcc} ->
+ {ok, FinalUserAcc}
+ end
+ end).
+
+
+list_deleted_dbs_info() ->
+ list_deleted_dbs_info([]).
+
+
+list_deleted_dbs_info(Options) ->
+ Callback = fun(Value, Acc) ->
+ NewAcc = case Value of
+ {meta, _} -> Acc;
+ {row, DbInfo} -> [DbInfo | Acc];
+ complete -> Acc
+ end,
+ {ok, NewAcc}
+ end,
+ {ok, DbInfos} = list_deleted_dbs_info(Callback, [], Options),
+ {ok, lists:reverse(DbInfos)}.
+
+
+list_deleted_dbs_info(UserFun, UserAcc0, Options0) ->
+ Dir = fabric2_util:get_value(dir, Options0, fwd),
+ StartKey0 = fabric2_util:get_value(start_key, Options0),
+ EndKey0 = fabric2_util:get_value(end_key, Options0),
+
+ {FirstBinary, LastBinary} = case Dir of
+ fwd -> {<<>>, <<255>>};
+ rev -> {<<255>>, <<>>}
+ end,
+
+ StartKey1 = case StartKey0 of
+ undefined ->
+ {FirstBinary};
+ DbName0 when is_binary(DbName0) ->
+ {DbName0, FirstBinary};
+ [DbName0, TimeStamp0] when is_binary(DbName0), is_binary(TimeStamp0) ->
+ {DbName0, TimeStamp0};
+ BadStartKey ->
+ erlang:error({invalid_start_key, BadStartKey})
+ end,
+ EndKey1 = case EndKey0 of
+ undefined ->
+ {LastBinary};
+ DbName1 when is_binary(DbName1) ->
+ {DbName1, LastBinary};
+ [DbName1, TimeStamp1] when is_binary(DbName1), is_binary(TimeStamp1) ->
+ {DbName1, TimeStamp1};
+ BadEndKey ->
+ erlang:error({invalid_end_key, BadEndKey})
+ end,
+
+ Options1 = Options0 -- [{start_key, StartKey0}, {end_key, EndKey0}],
+ Options2 = [
+ {start_key, StartKey1},
+ {end_key, EndKey1},
+ {wrap_keys, false}
+ ] ++ Options1,
+
+ FoldFun = fun(DbName, TimeStamp, InfoFuture, {FutureQ, Count, Acc}) ->
+ NewFutureQ = queue:in({DbName, TimeStamp, InfoFuture}, FutureQ),
+ drain_deleted_info_futures(NewFutureQ, Count + 1, UserFun, Acc)
+ end,
+ fabric2_fdb:transactional(fun(Tx) ->
+ try
+ UserAcc1 = maybe_stop(UserFun({meta, []}, UserAcc0)),
+ InitAcc = {queue:new(), 0, UserAcc1},
+ {FinalFutureQ, _, UserAcc2} = fabric2_fdb:list_deleted_dbs_info(
+ Tx,
+ FoldFun,
+ InitAcc,
+ Options2
+ ),
+ UserAcc3 = drain_all_deleted_info_futures(
+ FinalFutureQ,
+ UserFun,
+ UserAcc2
+ ),
+ {ok, maybe_stop(UserFun(complete, UserAcc3))}
+ catch throw:{stop, FinalUserAcc} ->
+ {ok, FinalUserAcc}
+ end
+ end).
+
+
+is_admin(Db, {SecProps}) when is_list(SecProps) ->
+ case fabric2_db_plugin:check_is_admin(Db) of
+ true ->
+ true;
+ false ->
+ UserCtx = get_user_ctx(Db),
+ {Admins} = get_admins(SecProps),
+ is_authorized(Admins, UserCtx)
+ end.
+
+
+check_is_admin(Db) ->
+ check_is_admin(Db, get_security(Db)).
+
+
+check_is_admin(Db, SecDoc) ->
+ case is_admin(Db, SecDoc) of
+ true ->
+ ok;
+ false ->
+ UserCtx = get_user_ctx(Db),
+ Reason = <<"You are not a db or server admin.">>,
+ throw_security_error(UserCtx, Reason)
+ end.
+
+
+check_is_member(Db) ->
+ check_is_member(Db, get_security(Db)).
+
+
+check_is_member(Db, SecDoc) ->
+ case is_member(Db, SecDoc) of
+ true ->
+ ok;
+ false ->
+ UserCtx = get_user_ctx(Db),
+ throw_security_error(UserCtx)
+ end.
+
+
+require_admin_check(#{} = Db) ->
+ Db#{security_fun := fun check_is_admin/2}.
+
+
+require_member_check(#{} = Db) ->
+ Db#{security_fun := fun check_is_member/2}.
+
+
+name(#{name := DbName}) ->
+ DbName.
+
+
+get_after_doc_read_fun(#{after_doc_read := AfterDocRead}) ->
+ AfterDocRead.
+
+
+get_before_doc_update_fun(#{before_doc_update := BeforeDocUpdate}) ->
+ BeforeDocUpdate.
+
+get_committed_update_seq(#{} = Db) ->
+ get_update_seq(Db).
+
+
+get_compacted_seq(#{} = Db) ->
+ get_update_seq(Db).
+
+
+get_compactor_pid(#{} = _Db) ->
+ nil.
+
+
+get_db_info(#{} = Db) ->
+ DbProps = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ fabric2_fdb:get_info(TxDb)
+ end),
+ {ok, make_db_info(name(Db), DbProps)}.
+
+
+get_del_doc_count(#{} = Db) ->
+ get_doc_count(Db, <<"doc_del_count">>).
+
+
+get_doc_count(Db) ->
+ get_doc_count(Db, <<"doc_count">>).
+
+
+get_doc_count(Db, undefined) ->
+ get_doc_count(Db, <<"doc_count">>);
+
+get_doc_count(Db, <<"_all_docs">>) ->
+ get_doc_count(Db, <<"doc_count">>);
+
+get_doc_count(DbName, <<"_design">>) ->
+ get_doc_count(DbName, <<"doc_design_count">>);
+
+get_doc_count(DbName, <<"_local">>) ->
+ get_doc_count(DbName, <<"doc_local_count">>);
+
+get_doc_count(Db, Key) ->
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ fabric2_fdb:get_stat(TxDb, Key)
+ end).
+
+
+get_instance_start_time(#{}) ->
+ 0.
+
+
+get_pid(#{}) ->
+ nil.
+
+
+get_revs_limit(#{} = Db) ->
+ get_revs_limit(Db, []).
+
+
+get_revs_limit(#{} = Db, Opts) ->
+ CurrentDb = get_cached_db(Db, Opts),
+ maps:get(revs_limit, CurrentDb).
+
+
+get_security(#{} = Db) ->
+ get_security(Db, []).
+
+
+get_security(#{} = Db, Opts) ->
+ CurrentDb = get_cached_db(Db, Opts),
+ maps:get(security_doc, CurrentDb).
+
+
+get_update_seq(#{} = Db) ->
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ fabric2_fdb:get_last_change(TxDb)
+ end).
+
+
+get_user_ctx(#{user_ctx := UserCtx}) ->
+ UserCtx.
+
+
+get_uuid(#{uuid := UUID}) ->
+ UUID.
+
+
+is_clustered(#{}) ->
+ false.
+
+
+is_db(#{name := _}) ->
+ true;
+is_db(_) ->
+ false.
+
+
+is_partitioned(#{}) ->
+ false.
+
+
+is_system_db(#{name := DbName}) ->
+ is_system_db_name(DbName).
+
+
+is_system_db_name(DbName) when is_list(DbName) ->
+ is_system_db_name(?l2b(DbName));
+is_system_db_name(DbName) when is_binary(DbName) ->
+ Suffix = filename:basename(DbName),
+ case {filename:dirname(DbName), lists:member(Suffix, ?SYSTEM_DATABASES)} of
+ {<<".">>, Result} -> Result;
+ {_Prefix, false} -> false;
+ {Prefix, true} ->
+ ReOpts = [{capture,none}, dollar_endonly],
+ re:run(Prefix, ?DBNAME_REGEX, ReOpts) == match
+ end.
+
+
+is_replicator_db(#{name := DbName}) ->
+ is_replicator_db(DbName);
+
+is_replicator_db(DbName) when is_binary(DbName) ->
+ fabric2_util:dbname_ends_with(DbName, <<"_replicator">>).
+
+
+is_users_db(#{name := DbName}) ->
+ is_users_db(DbName);
+
+is_users_db(DbName) when is_binary(DbName) ->
+ AuthenticationDb = config:get("chttpd_auth", "authentication_db"),
+ CfgUsersSuffix = config:get("couchdb", "users_db_suffix", "_users"),
+
+ IsAuthCache = if AuthenticationDb == undefined -> false; true ->
+ DbName == ?l2b(AuthenticationDb)
+ end,
+ IsCfgUsersDb = fabric2_util:dbname_ends_with(DbName, ?l2b(CfgUsersSuffix)),
+ IsGlobalUsersDb = fabric2_util:dbname_ends_with(DbName, <<"_users">>),
+
+ IsAuthCache orelse IsCfgUsersDb orelse IsGlobalUsersDb.
+
+
+set_revs_limit(#{} = Db0, RevsLimit) when is_integer(RevsLimit) ->
+ Db1 = require_admin_check(Db0),
+ Resp = fabric2_fdb:transactional(Db1, fun(TxDb) ->
+ fabric2_fdb:set_config(TxDb, revs_limit, RevsLimit)
+ end),
+ case Resp of
+ {ok, #{} = Db2} -> fabric2_server:store(Db2);
+ Err -> Err
+ end.
+
+
+set_security(#{} = Db0, Security) ->
+ Db1 = require_admin_check(Db0),
+ ok = fabric2_util:validate_security_object(Security),
+ Resp = fabric2_fdb:transactional(Db1, fun(TxDb) ->
+ fabric2_fdb:set_config(TxDb, security_doc, Security)
+ end),
+ case Resp of
+ {ok, #{} = Db2} -> fabric2_server:store(Db2);
+ Err -> Err
+ end.
+
+
+set_user_ctx(#{} = Db, UserCtx) ->
+ Db#{user_ctx := UserCtx}.
+
+
+ensure_full_commit(#{}) ->
+ {ok, 0}.
+
+
+ensure_full_commit(#{}, _Timeout) ->
+ {ok, 0}.
+
+
+open_doc(#{} = Db, DocId) ->
+ open_doc(Db, DocId, []).
+
+
+open_doc(#{} = Db, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId, _Options) ->
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ case fabric2_fdb:get_local_doc(TxDb, DocId) of
+ #doc{} = Doc -> {ok, Doc};
+ Else -> Else
+ end
+ end);
+
+open_doc(#{} = Db, DocId, Options) ->
+ NeedsTreeOpts = [revs_info, conflicts, deleted_conflicts],
+ NeedsTree = (Options -- NeedsTreeOpts /= Options),
+ OpenDeleted = lists:member(deleted, Options),
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ Revs = case NeedsTree of
+ true -> fabric2_fdb:get_all_revs(TxDb, DocId);
+ false -> fabric2_fdb:get_winning_revs(TxDb, DocId, 1)
+ end,
+ if Revs == [] -> {not_found, missing}; true ->
+ #{winner := true} = RI = lists:last(Revs),
+ case fabric2_fdb:get_doc_body(TxDb, DocId, RI) of
+ #doc{deleted = true} when not OpenDeleted ->
+ {not_found, deleted};
+ #doc{} = Doc ->
+ apply_open_doc_opts(Doc, Revs, Options);
+ Else ->
+ Else
+ end
+ end
+ end).
+
+
+open_doc_revs(Db, DocId, Revs, Options) ->
+ Latest = lists:member(latest, Options),
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ AllRevInfos = fabric2_fdb:get_all_revs(TxDb, DocId),
+ RevTree = lists:foldl(fun(RI, TreeAcc) ->
+ RIPath = fabric2_util:revinfo_to_path(RI),
+ {Merged, _} = couch_key_tree:merge(TreeAcc, RIPath),
+ Merged
+ end, [], AllRevInfos),
+ {Found, Missing} = case Revs of
+ all ->
+ {couch_key_tree:get_all_leafs(RevTree), []};
+ _ when Latest ->
+ couch_key_tree:get_key_leafs(RevTree, Revs);
+ _ ->
+ couch_key_tree:get(RevTree, Revs)
+ end,
+ Docs = lists:map(fun({Value, {Pos, [Rev | RevPath]}}) ->
+ case Value of
+ ?REV_MISSING ->
+ % We have the rev in our list but know nothing about it
+ {{not_found, missing}, {Pos, Rev}};
+ _ ->
+ RevInfo = #{
+ rev_id => {Pos, Rev},
+ rev_path => RevPath
+ },
+ case fabric2_fdb:get_doc_body(TxDb, DocId, RevInfo) of
+ #doc{} = Doc ->
+ apply_open_doc_opts(Doc, AllRevInfos, Options);
+ Else ->
+ {Else, {Pos, Rev}}
+ end
+ end
+ end, Found),
+ MissingDocs = [{{not_found, missing}, MRev} || MRev <- Missing],
+ {ok, Docs ++ MissingDocs}
+ end).
+
+
+get_doc_info(Db, DocId) ->
+ case get_full_doc_info(Db, DocId) of
+ not_found -> not_found;
+ FDI -> couch_doc:to_doc_info(FDI)
+ end.
+
+
+get_full_doc_info(Db, DocId) ->
+ RevInfos = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ fabric2_fdb:get_all_revs(TxDb, DocId)
+ end),
+ if RevInfos == [] -> not_found; true ->
+ #{winner := true} = Winner = lists:last(RevInfos),
+ RevTree = lists:foldl(fun(RI, TreeAcc) ->
+ RIPath = fabric2_util:revinfo_to_path(RI),
+ {Merged, _} = couch_key_tree:merge(TreeAcc, RIPath),
+ Merged
+ end, [], RevInfos),
+ #full_doc_info{
+ id = DocId,
+ update_seq = fabric2_fdb:vs_to_seq(maps:get(sequence, Winner)),
+ deleted = maps:get(deleted, Winner),
+ rev_tree = RevTree
+ }
+ end.
+
+
+get_full_doc_infos(Db, DocIds) ->
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ lists:map(fun(DocId) ->
+ get_full_doc_info(TxDb, DocId)
+ end, DocIds)
+ end).
+
+
+get_missing_revs(Db, JsonIdRevs) ->
+ IdRevs = [idrevs(IdR) || IdR <- JsonIdRevs],
+ AllRevInfos = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ lists:foldl(fun({Id, _Revs}, Acc) ->
+ case maps:is_key(Id, Acc) of
+ true ->
+ Acc;
+ false ->
+ RevInfos = fabric2_fdb:get_all_revs(TxDb, Id),
+ Acc#{Id => RevInfos}
+ end
+ end, #{}, IdRevs)
+ end),
+ AllMissing = lists:flatmap(fun({Id, Revs}) ->
+ #{Id := RevInfos} = AllRevInfos,
+ Missing = try
+ lists:foldl(fun(RevInfo, RevAcc) ->
+ if RevAcc /= [] -> ok; true ->
+ throw(all_found)
+ end,
+ filter_found_revs(RevInfo, RevAcc)
+ end, Revs, RevInfos)
+ catch throw:all_found ->
+ []
+ end,
+ if Missing == [] -> []; true ->
+ PossibleAncestors = find_possible_ancestors(RevInfos, Missing),
+ [{Id, Missing, PossibleAncestors}]
+ end
+ end, IdRevs),
+ {ok, AllMissing}.
+
+
+get_design_docs(Db) ->
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ #{
+ db_prefix := DbPrefix
+ } = TxDb,
+
+ Prefix = erlfdb_tuple:pack({?DB_ALL_DOCS}, DbPrefix),
+ Options = set_design_doc_keys([]),
+ FoldFun = fun({Key, Val}, Acc) ->
+ {DocId} = erlfdb_tuple:unpack(Key, Prefix),
+ RevId = erlfdb_tuple:unpack(Val),
+ Rev = #{
+ rev_id => RevId,
+ rev_path => []
+ },
+ Future = fabric2_fdb:get_doc_body_future(TxDb, DocId, Rev),
+ [{DocId, Rev, Future} | Acc]
+ end,
+ Futures = fabric2_fdb:fold_range(TxDb, Prefix, FoldFun, [], Options),
+
+ % Using foldl instead of map means that the design
+ % docs come out in sorted order.
+ lists:foldl(fun({DocId, Rev, Future}, Acc) ->
+ [fabric2_fdb:get_doc_body_wait(TxDb, DocId, Rev, Future) | Acc]
+ end, [], Futures)
+ end).
+
+
+validate_docid(<<"">>) ->
+ throw({illegal_docid, <<"Document id must not be empty">>});
+validate_docid(<<"_design/">>) ->
+ throw({illegal_docid, <<"Illegal document id `_design/`">>});
+validate_docid(<<"_local/">>) ->
+ throw({illegal_docid, <<"Illegal document id `_local/`">>});
+validate_docid(Id) when is_binary(Id) ->
+ MaxLen = case config:get("couchdb", "max_document_id_length", "infinity") of
+ "infinity" -> infinity;
+ IntegerVal -> list_to_integer(IntegerVal)
+ end,
+ case MaxLen > 0 andalso byte_size(Id) > MaxLen of
+ true -> throw({illegal_docid, <<"Document id is too long">>});
+ false -> ok
+ end,
+ case couch_util:validate_utf8(Id) of
+ false -> throw({illegal_docid, <<"Document id must be valid UTF-8">>});
+ true -> ok
+ end,
+ case Id of
+ <<?DESIGN_DOC_PREFIX, _/binary>> -> ok;
+ <<?LOCAL_DOC_PREFIX, _/binary>> -> ok;
+ <<"_", _/binary>> ->
+ case fabric2_db_plugin:validate_docid(Id) of
+ true ->
+ ok;
+ false ->
+ throw(
+ {illegal_docid,
+ <<"Only reserved document ids may start with underscore.">>})
+ end;
+ _Else -> ok
+ end;
+validate_docid(Id) ->
+ couch_log:debug("Document id is not a string: ~p", [Id]),
+ throw({illegal_docid, <<"Document id must be a string">>}).
+
+
+update_doc(Db, Doc) ->
+ update_doc(Db, Doc, []).
+
+
+update_doc(Db, Doc, Options) ->
+ case update_docs(Db, [Doc], Options) of
+ {ok, [{ok, NewRev}]} ->
+ {ok, NewRev};
+ {ok, [{{_Id, _Rev}, Error}]} ->
+ throw(Error);
+ {error, [{{_Id, _Rev}, Error}]} ->
+ throw(Error);
+ {error, [Error]} ->
+ throw(Error);
+ {ok, []} ->
+ % replication success
+ {Pos, [RevId | _]} = Doc#doc.revs,
+ {ok, {Pos, RevId}}
+ end.
+
+
+update_docs(Db, Docs) ->
+ update_docs(Db, Docs, []).
+
+
+update_docs(Db, Docs0, Options) ->
+ Docs1 = apply_before_doc_update(Db, Docs0, Options),
+ try
+ validate_atomic_update(Docs0, lists:member(all_or_nothing, Options)),
+
+ Resps0 = batch_update_docs(Db, Docs1, Options),
+
+ % Notify index builder
+ fabric2_index:db_updated(name(Db)),
+
+ % Convert errors
+ Resps1 = lists:map(fun(Resp) ->
+ case Resp of
+ {#doc{} = Doc, Error} ->
+ #doc{
+ id = DocId,
+ revs = Revs
+ } = Doc,
+ RevId = case Revs of
+ {RevPos, [Rev | _]} -> {RevPos, Rev};
+ {0, []} -> {0, <<>>};
+ Else -> Else
+ end,
+ {{DocId, RevId}, Error};
+ Else ->
+ Else
+ end
+ end, Resps0),
+ case is_replicated(Options) of
+ true ->
+ {ok, lists:flatmap(fun(R) ->
+ case R of
+ {ok, []} -> [];
+ {{_, _}, {ok, []}} -> [];
+ Else -> [Else]
+ end
+ end, Resps1)};
+ false ->
+ {ok, Resps1}
+ end
+ catch throw:{aborted, Errors} ->
+ {aborted, Errors}
+ end.
+
+
+read_attachment(Db, DocId, AttId) ->
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ fabric2_fdb:read_attachment(TxDb, DocId, AttId)
+ end).
+
+
+write_attachment(Db, DocId, Att) ->
+ Data = couch_att:fetch(data, Att),
+ Encoding = couch_att:fetch(encoding, Att),
+ {ok, AttId} = fabric2_fdb:write_attachment(Db, DocId, Data, Encoding),
+ couch_att:store(data, {loc, Db, DocId, AttId}, Att).
+
+
+fold_docs(Db, UserFun, UserAcc) ->
+ fold_docs(Db, UserFun, UserAcc, []).
+
+
+fold_docs(Db, UserFun, UserAcc0, Options) ->
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ try
+ #{
+ db_prefix := DbPrefix
+ } = TxDb,
+
+ Prefix = erlfdb_tuple:pack({?DB_ALL_DOCS}, DbPrefix),
+ Meta = get_all_docs_meta(TxDb, Options),
+
+ UserAcc1 = maybe_stop(UserFun({meta, Meta}, UserAcc0)),
+
+ UserAcc2 = fabric2_fdb:fold_range(TxDb, Prefix, fun({K, V}, Acc) ->
+ {DocId} = erlfdb_tuple:unpack(K, Prefix),
+ RevId = erlfdb_tuple:unpack(V),
+ Row0 = [
+ {id, DocId},
+ {key, DocId},
+ {value, {[{rev, couch_doc:rev_to_str(RevId)}]}}
+ ],
+
+ DocOpts = couch_util:get_value(doc_opts, Options, []),
+ OpenOpts = [deleted | DocOpts],
+
+ Row1 = case lists:keyfind(include_docs, 1, Options) of
+ {include_docs, true} ->
+ Row0 ++ open_json_doc(TxDb, DocId, OpenOpts, DocOpts);
+ _ ->
+ Row0
+ end,
+
+ maybe_stop(UserFun({row, Row1}, Acc))
+ end, UserAcc1, Options),
+
+ {ok, maybe_stop(UserFun(complete, UserAcc2))}
+ catch throw:{stop, FinalUserAcc} ->
+ {ok, FinalUserAcc}
+ end
+ end).
+
+
+fold_docs(Db, DocIds, UserFun, UserAcc0, Options) ->
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ try
+ NeedsTreeOpts = [revs_info, conflicts, deleted_conflicts],
+ NeedsTree = (Options -- NeedsTreeOpts /= Options),
+
+ InitAcc = #{
+ revs_q => queue:new(),
+ revs_count => 0,
+ body_q => queue:new(),
+ body_count => 0,
+ doc_opts => Options,
+ user_acc => UserAcc0,
+ user_fun => UserFun
+ },
+
+ FinalAcc1 = lists:foldl(fun(DocId, Acc) ->
+ #{
+ revs_q := RevsQ,
+ revs_count := RevsCount
+ } = Acc,
+ Future = fold_docs_get_revs(TxDb, DocId, NeedsTree),
+ NewAcc = Acc#{
+ revs_q := queue:in({DocId, Future}, RevsQ),
+ revs_count := RevsCount + 1
+ },
+ drain_fold_docs_revs_futures(TxDb, NewAcc)
+ end, InitAcc, DocIds),
+
+ FinalAcc2 = drain_all_fold_docs_revs_futures(TxDb, FinalAcc1),
+ FinalAcc3 = drain_all_fold_docs_body_futures(TxDb, FinalAcc2),
+
+ #{
+ user_acc := FinalUserAcc
+ } = FinalAcc3,
+ {ok, FinalUserAcc}
+
+ catch throw:{stop, StopUserAcc} ->
+ {ok, StopUserAcc}
+ end
+ end).
+
+
+
+
+fold_design_docs(Db, UserFun, UserAcc0, Options1) ->
+ Options2 = set_design_doc_keys(Options1),
+ fold_docs(Db, UserFun, UserAcc0, Options2).
+
+
+fold_local_docs(Db, UserFun, UserAcc0, Options0) ->
+ % This is mostly for testing and sanity checking. When calling from a test
+ % namespace will be automatically set. We also assert when called from the
+ % API the correct namespace was set
+ Options = case lists:keyfind(namespace, 1, Options0) of
+ {namespace, <<"_local">>} -> Options0;
+ false -> [{namespace, <<"_local">>} | Options0]
+ end,
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ try
+ #{
+ db_prefix := DbPrefix
+ } = TxDb,
+
+ Prefix = erlfdb_tuple:pack({?DB_LOCAL_DOCS}, DbPrefix),
+ Meta = get_all_docs_meta(TxDb, Options),
+
+ UserAcc1 = maybe_stop(UserFun({meta, Meta}, UserAcc0)),
+
+ UserAcc2 = fabric2_fdb:fold_range(TxDb, Prefix, fun({K, V}, Acc) ->
+ {DocId} = erlfdb_tuple:unpack(K, Prefix),
+ Rev = fabric2_fdb:get_local_doc_rev(TxDb, DocId, V),
+ maybe_stop(UserFun({row, [
+ {id, DocId},
+ {key, DocId},
+ {value, {[{rev, couch_doc:rev_to_str({0, Rev})}]}}
+ ]}, Acc))
+ end, UserAcc1, Options),
+
+ {ok, maybe_stop(UserFun(complete, UserAcc2))}
+ catch throw:{stop, FinalUserAcc} ->
+ {ok, FinalUserAcc}
+ end
+ end).
+
+
+fold_changes(Db, SinceSeq, UserFun, UserAcc) ->
+ fold_changes(Db, SinceSeq, UserFun, UserAcc, []).
+
+
+fold_changes(Db, SinceSeq, UserFun, UserAcc, Options) ->
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ try
+ #{
+ db_prefix := DbPrefix
+ } = TxDb,
+
+ Prefix = erlfdb_tuple:pack({?DB_CHANGES}, DbPrefix),
+
+ Dir = case fabric2_util:get_value(dir, Options, fwd) of
+ rev -> rev;
+ _ -> fwd
+ end,
+
+ RestartTx = case fabric2_util:get_value(restart_tx, Options) of
+ undefined -> [{restart_tx, true}];
+ _AlreadySet -> []
+ end,
+
+ StartKey = get_since_seq(TxDb, Dir, SinceSeq),
+ EndKey = case Dir of
+ rev -> fabric2_util:seq_zero_vs();
+ _ -> fabric2_util:seq_max_vs()
+ end,
+ FoldOpts = [
+ {start_key, StartKey},
+ {end_key, EndKey}
+ ] ++ RestartTx ++ Options,
+
+ {ok, fabric2_fdb:fold_range(TxDb, Prefix, fun({K, V}, Acc) ->
+ {SeqVS} = erlfdb_tuple:unpack(K, Prefix),
+ {DocId, Deleted, RevId} = erlfdb_tuple:unpack(V),
+
+ Change = #{
+ id => DocId,
+ sequence => fabric2_fdb:vs_to_seq(SeqVS),
+ rev_id => RevId,
+ deleted => Deleted
+ },
+
+ maybe_stop(UserFun(Change, Acc))
+ end, UserAcc, FoldOpts)}
+ catch throw:{stop, FinalUserAcc} ->
+ {ok, FinalUserAcc}
+ end
+ end).
+
+
+dbname_suffix(DbName) ->
+ filename:basename(normalize_dbname(DbName)).
+
+
+normalize_dbname(DbName) ->
+ % Remove in the final cleanup. We don't need to handle shards prefix or
+ % remove .couch suffixes anymore. Keep it for now to pass all the existing
+ % tests.
+ couch_db:normalize_dbname(DbName).
+
+
+validate_dbname(DbName) when is_list(DbName) ->
+ validate_dbname(?l2b(DbName));
+
+validate_dbname(DbName) when is_binary(DbName) ->
+ Normalized = normalize_dbname(DbName),
+ fabric2_db_plugin:validate_dbname(
+ DbName, Normalized, fun validate_dbname_int/2).
+
+validate_dbname_int(DbName, Normalized) when is_binary(DbName) ->
+ case validate_dbname_length(DbName) of
+ ok -> validate_dbname_pat(DbName, Normalized);
+ {error, _} = Error -> Error
+ end.
+
+
+validate_dbname_length(DbName) ->
+ MaxLength = config:get_integer("couchdb", "max_database_name_length",
+ ?DEFAULT_MAX_DATABASE_NAME_LENGTH),
+ case byte_size(DbName) =< MaxLength of
+ true -> ok;
+ false -> {error, {database_name_too_long, DbName}}
+ end.
+
+
+validate_dbname_pat(DbName, Normalized) ->
+ DbNoExt = couch_util:drop_dot_couch_ext(DbName),
+ case re:run(DbNoExt, ?DBNAME_REGEX, [{capture,none}, dollar_endonly]) of
+ match ->
+ ok;
+ nomatch ->
+ case is_system_db_name(Normalized) of
+ true -> ok;
+ false -> {error, {illegal_database_name, DbName}}
+ end
+ end.
+
+
+maybe_add_sys_db_callbacks(Db) ->
+ IsReplicatorDb = is_replicator_db(Db),
+ IsUsersDb = is_users_db(Db),
+
+ {BDU, ADR} = if
+ IsReplicatorDb ->
+ {
+ fun couch_replicator_docs:before_doc_update/3,
+ fun couch_replicator_docs:after_doc_read/2
+ };
+ IsUsersDb ->
+ {
+ fun fabric2_users_db:before_doc_update/3,
+ fun fabric2_users_db:after_doc_read/2
+ };
+ true ->
+ {undefined, undefined}
+ end,
+
+ Db#{
+ before_doc_update := BDU,
+ after_doc_read := ADR
+ }.
+
+
+make_db_info(DbName, Props) ->
+ BaseProps = [
+ {cluster, {[{n, 0}, {q, 0}, {r, 0}, {w, 0}]}},
+ {compact_running, false},
+ {data_size, 0},
+ {db_name, DbName},
+ {disk_format_version, 0},
+ {disk_size, 0},
+ {instance_start_time, <<"0">>},
+ {purge_seq, 0}
+ ],
+
+ lists:foldl(fun({Key, Val}, Acc) ->
+ lists:keystore(Key, 1, Acc, {Key, Val})
+ end, BaseProps, Props).
+
+
+drain_info_futures(FutureQ, Count, _UserFun, Acc) when Count < 100 ->
+ {FutureQ, Count, Acc};
+
+drain_info_futures(FutureQ, Count, UserFun, Acc) when Count >= 100 ->
+ {{value, {DbName, Future}}, RestQ} = queue:out(FutureQ),
+ InfoProps = fabric2_fdb:get_info_wait(Future),
+ DbInfo = make_db_info(DbName, InfoProps),
+ NewAcc = maybe_stop(UserFun({row, DbInfo}, Acc)),
+ {RestQ, Count - 1, NewAcc}.
+
+
+drain_all_info_futures(FutureQ, UserFun, Acc) ->
+ case queue:out(FutureQ) of
+ {{value, {DbName, Future}}, RestQ} ->
+ InfoProps = fabric2_fdb:get_info_wait(Future),
+ DbInfo = make_db_info(DbName, InfoProps),
+ NewAcc = maybe_stop(UserFun({row, DbInfo}, Acc)),
+ drain_all_info_futures(RestQ, UserFun, NewAcc);
+ {empty, _} ->
+ Acc
+ end.
+
+
+drain_deleted_info_futures(FutureQ, Count, _UserFun, Acc) when Count < 100 ->
+ {FutureQ, Count, Acc};
+
+drain_deleted_info_futures(FutureQ, Count, UserFun, Acc) when Count >= 100 ->
+ {{value, {DbName, TimeStamp, Future}}, RestQ} = queue:out(FutureQ),
+ BaseProps = fabric2_fdb:get_info_wait(Future),
+ DeletedProps = BaseProps ++ [
+ {deleted, true},
+ {timestamp, TimeStamp}
+ ],
+ DbInfo = make_db_info(DbName, DeletedProps),
+ NewAcc = maybe_stop(UserFun({row, DbInfo}, Acc)),
+ {RestQ, Count - 1, NewAcc}.
+
+
+drain_all_deleted_info_futures(FutureQ, UserFun, Acc) ->
+ case queue:out(FutureQ) of
+ {{value, {DbName, TimeStamp, Future}}, RestQ} ->
+ BaseProps = fabric2_fdb:get_info_wait(Future),
+ DeletedProps = BaseProps ++ [
+ {deleted, true},
+ {timestamp, TimeStamp}
+ ],
+ DbInfo = make_db_info(DbName, DeletedProps),
+ NewAcc = maybe_stop(UserFun({row, DbInfo}, Acc)),
+ drain_all_deleted_info_futures(RestQ, UserFun, NewAcc);
+ {empty, _} ->
+ Acc
+ end.
+
+
+fold_docs_get_revs(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId, _) ->
+ fabric2_fdb:get_local_doc_rev_future(Db, DocId);
+
+fold_docs_get_revs(Db, DocId, true) ->
+ fabric2_fdb:get_all_revs_future(Db, DocId);
+
+fold_docs_get_revs(Db, DocId, false) ->
+ fabric2_fdb:get_winning_revs_future(Db, DocId, 1).
+
+
+fold_docs_get_revs_wait(_Db, <<?LOCAL_DOC_PREFIX, _/binary>>, RevsFuture) ->
+ Rev = fabric2_fdb:get_local_doc_rev_wait(RevsFuture),
+ [Rev];
+
+fold_docs_get_revs_wait(Db, _DocId, RevsFuture) ->
+ fabric2_fdb:get_revs_wait(Db, RevsFuture).
+
+
+fold_docs_get_doc_body_future(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId,
+ [Rev]) ->
+ fabric2_fdb:get_local_doc_body_future(Db, DocId, Rev);
+
+fold_docs_get_doc_body_future(Db, DocId, Revs) ->
+ Winner = get_rev_winner(Revs),
+ fabric2_fdb:get_doc_body_future(Db, DocId, Winner).
+
+
+fold_docs_get_doc_body_wait(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId, [Rev],
+ _DocOpts, BodyFuture) ->
+ case fabric2_fdb:get_local_doc_body_wait(Db, DocId, Rev, BodyFuture) of
+ {not_found, missing} -> {not_found, missing};
+ Doc -> {ok, Doc}
+ end;
+
+fold_docs_get_doc_body_wait(Db, DocId, Revs, DocOpts, BodyFuture) ->
+ RevInfo = get_rev_winner(Revs),
+ Base = fabric2_fdb:get_doc_body_wait(Db, DocId, RevInfo,
+ BodyFuture),
+ apply_open_doc_opts(Base, Revs, DocOpts).
+
+
+drain_fold_docs_revs_futures(_TxDb, #{revs_count := C} = Acc) when C < 100 ->
+ Acc;
+drain_fold_docs_revs_futures(TxDb, Acc) ->
+ drain_one_fold_docs_revs_future(TxDb, Acc).
+
+
+drain_all_fold_docs_revs_futures(_TxDb, #{revs_count := C} = Acc) when C =< 0 ->
+ Acc;
+drain_all_fold_docs_revs_futures(TxDb, #{revs_count := C} = Acc) when C > 0 ->
+ NewAcc = drain_one_fold_docs_revs_future(TxDb, Acc),
+ drain_all_fold_docs_revs_futures(TxDb, NewAcc).
+
+
+drain_one_fold_docs_revs_future(TxDb, Acc) ->
+ #{
+ revs_q := RevsQ,
+ revs_count := RevsCount,
+ body_q := BodyQ,
+ body_count := BodyCount
+ } = Acc,
+ {{value, {DocId, RevsFuture}}, RestRevsQ} = queue:out(RevsQ),
+
+ Revs = fold_docs_get_revs_wait(TxDb, DocId, RevsFuture),
+ DocFuture = case Revs of
+ [] ->
+ {DocId, [], not_found};
+ [_ | _] ->
+ BodyFuture = fold_docs_get_doc_body_future(TxDb, DocId, Revs),
+ {DocId, Revs, BodyFuture}
+ end,
+ NewAcc = Acc#{
+ revs_q := RestRevsQ,
+ revs_count := RevsCount - 1,
+ body_q := queue:in(DocFuture, BodyQ),
+ body_count := BodyCount + 1
+ },
+ drain_fold_docs_body_futures(TxDb, NewAcc).
+
+
+drain_fold_docs_body_futures(_TxDb, #{body_count := C} = Acc) when C < 100 ->
+ Acc;
+drain_fold_docs_body_futures(TxDb, Acc) ->
+ drain_one_fold_docs_body_future(TxDb, Acc).
+
+
+drain_all_fold_docs_body_futures(_TxDb, #{body_count := C} = Acc) when C =< 0 ->
+ Acc;
+drain_all_fold_docs_body_futures(TxDb, #{body_count := C} = Acc) when C > 0 ->
+ NewAcc = drain_one_fold_docs_body_future(TxDb, Acc),
+ drain_all_fold_docs_body_futures(TxDb, NewAcc).
+
+
+drain_one_fold_docs_body_future(TxDb, Acc) ->
+ #{
+ body_q := BodyQ,
+ body_count := BodyCount,
+ doc_opts := DocOpts,
+ user_fun := UserFun,
+ user_acc := UserAcc
+ } = Acc,
+ {{value, {DocId, Revs, BodyFuture}}, RestBodyQ} = queue:out(BodyQ),
+ Doc = case BodyFuture of
+ not_found ->
+ {not_found, missing};
+ _ ->
+ fold_docs_get_doc_body_wait(TxDb, DocId, Revs, DocOpts, BodyFuture)
+ end,
+ NewUserAcc = maybe_stop(UserFun(DocId, Doc, UserAcc)),
+ Acc#{
+ body_q := RestBodyQ,
+ body_count := BodyCount - 1,
+ user_acc := NewUserAcc
+ }.
+
+
+get_rev_winner(Revs) ->
+ [Winner] = lists:filter(fun(Rev) ->
+ maps:get(winner, Rev)
+ end, Revs),
+ Winner.
+
+
+new_revid(Db, Doc) ->
+ #doc{
+ id = DocId,
+ body = Body,
+ revs = {OldStart, OldRevs},
+ atts = Atts,
+ deleted = Deleted
+ } = Doc,
+
+ {NewAtts, AttSigInfo} = lists:mapfoldl(fun(Att, Acc) ->
+ [Name, Type, Data, Md5] = couch_att:fetch([name, type, data, md5], Att),
+ case Data of
+ {loc, _, _, _} ->
+ {Att, [{Name, Type, Md5} | Acc]};
+ _ ->
+ Att1 = couch_att:flush(Db, DocId, Att),
+ Att2 = couch_att:store(revpos, OldStart + 1, Att1),
+ {Att2, [{Name, Type, couch_att:fetch(md5, Att2)} | Acc]}
+ end
+ end, [], Atts),
+
+ Rev = case length(Atts) == length(AttSigInfo) of
+ true ->
+ OldRev = case OldRevs of [] -> 0; [OldRev0 | _] -> OldRev0 end,
+ SigTerm = [Deleted, OldStart, OldRev, Body, AttSigInfo],
+ couch_hash:md5_hash(term_to_binary(SigTerm, [{minor_version, 1}]));
+ false ->
+ erlang:error(missing_att_info)
+ end,
+
+ Doc#doc{
+ revs = {OldStart + 1, [Rev | OldRevs]},
+ atts = NewAtts
+ }.
+
+
+get_all_docs_meta(TxDb, Options) ->
+ NS = couch_util:get_value(namespace, Options),
+ DocCount = get_doc_count(TxDb, NS),
+ case lists:keyfind(update_seq, 1, Options) of
+ {_, true} ->
+ UpdateSeq = fabric2_db:get_update_seq(TxDb),
+ [{update_seq, UpdateSeq}];
+ _ ->
+ []
+ end ++ [{total, DocCount}, {offset, null}].
+
+
+maybe_set_interactive(#{} = Db, Options) ->
+ Interactive = fabric2_util:get_value(interactive, Options, false),
+ Db#{interactive := Interactive}.
+
+
+maybe_set_user_ctx(Db, Options) ->
+ case fabric2_util:get_value(user_ctx, Options) of
+ #user_ctx{} = UserCtx ->
+ set_user_ctx(Db, UserCtx);
+ undefined ->
+ Db
+ end.
+
+
+is_member(Db, {SecProps}) when is_list(SecProps) ->
+ case is_admin(Db, {SecProps}) of
+ true ->
+ true;
+ false ->
+ case is_public_db(SecProps) of
+ true ->
+ true;
+ false ->
+ {Members} = get_members(SecProps),
+ UserCtx = get_user_ctx(Db),
+ is_authorized(Members, UserCtx)
+ end
+ end.
+
+
+is_authorized(Group, UserCtx) ->
+ #user_ctx{
+ name = UserName,
+ roles = UserRoles
+ } = UserCtx,
+ Names = fabric2_util:get_value(<<"names">>, Group, []),
+ Roles = fabric2_util:get_value(<<"roles">>, Group, []),
+ case check_security(roles, UserRoles, [<<"_admin">> | Roles]) of
+ true ->
+ true;
+ false ->
+ check_security(names, UserName, Names)
+ end.
+
+
+check_security(roles, [], _) ->
+ false;
+check_security(roles, UserRoles, Roles) ->
+ UserRolesSet = ordsets:from_list(UserRoles),
+ RolesSet = ordsets:from_list(Roles),
+ not ordsets:is_disjoint(UserRolesSet, RolesSet);
+check_security(names, _, []) ->
+ false;
+check_security(names, null, _) ->
+ false;
+check_security(names, UserName, Names) ->
+ lists:member(UserName, Names).
+
+
+throw_security_error(#user_ctx{name = null} = UserCtx) ->
+ Reason = <<"You are not authorized to access this db.">>,
+ throw_security_error(UserCtx, Reason);
+throw_security_error(#user_ctx{name = _} = UserCtx) ->
+ Reason = <<"You are not allowed to access this db.">>,
+ throw_security_error(UserCtx, Reason).
+
+
+throw_security_error(#user_ctx{} = UserCtx, Reason) ->
+ Error = security_error_type(UserCtx),
+ throw({Error, Reason}).
+
+
+security_error_type(#user_ctx{name = null}) ->
+ unauthorized;
+security_error_type(#user_ctx{name = _}) ->
+ forbidden.
+
+
+is_public_db(SecProps) ->
+ {Members} = get_members(SecProps),
+ Names = fabric2_util:get_value(<<"names">>, Members, []),
+ Roles = fabric2_util:get_value(<<"roles">>, Members, []),
+ Names =:= [] andalso Roles =:= [].
+
+
+get_admins(SecProps) ->
+ fabric2_util:get_value(<<"admins">>, SecProps, {[]}).
+
+
+get_members(SecProps) ->
+ % we fallback to readers here for backwards compatibility
+ case fabric2_util:get_value(<<"members">>, SecProps) of
+ undefined ->
+ fabric2_util:get_value(<<"readers">>, SecProps, {[]});
+ Members ->
+ Members
+ end.
+
+
+apply_open_doc_opts(Doc0, Revs, Options) ->
+ IncludeRevsInfo = lists:member(revs_info, Options),
+ IncludeConflicts = lists:member(conflicts, Options),
+ IncludeDelConflicts = lists:member(deleted_conflicts, Options),
+ IncludeLocalSeq = lists:member(local_seq, Options),
+
+ % This revs_info becomes fairly useless now that we're
+ % not keeping old document bodies around...
+ Meta1 = if not IncludeRevsInfo -> []; true ->
+ {Pos, [Rev | RevPath]} = Doc0#doc.revs,
+ RevPathMissing = lists:map(fun(R) -> {R, missing} end, RevPath),
+ [{revs_info, Pos, [{Rev, available} | RevPathMissing]}]
+ end,
+
+ Meta2 = if not IncludeConflicts -> []; true ->
+ Conflicts = [RI || RI = #{winner := false, deleted := false} <- Revs],
+ if Conflicts == [] -> []; true ->
+ ConflictRevs = [maps:get(rev_id, RI) || RI <- Conflicts],
+ [{conflicts, ConflictRevs}]
+ end
+ end,
+
+ Meta3 = if not IncludeDelConflicts -> []; true ->
+ DelConflicts = [RI || RI = #{winner := false, deleted := true} <- Revs],
+ if DelConflicts == [] -> []; true ->
+ DelConflictRevs = [maps:get(rev_id, RI) || RI <- DelConflicts],
+ [{deleted_conflicts, DelConflictRevs}]
+ end
+ end,
+
+ Meta4 = if not IncludeLocalSeq -> []; true ->
+ #{winner := true, sequence := SeqVS} = lists:last(Revs),
+ [{local_seq, fabric2_fdb:vs_to_seq(SeqVS)}]
+ end,
+
+ Doc1 = case lists:keyfind(atts_since, 1, Options) of
+ {_, PossibleAncestors} ->
+ #doc{
+ revs = DocRevs,
+ atts = Atts0
+ } = Doc0,
+ RevPos = find_ancestor_rev_pos(DocRevs, PossibleAncestors),
+ Atts1 = lists:map(fun(Att) ->
+ [AttPos, Data] = couch_att:fetch([revpos, data], Att),
+ if AttPos > RevPos -> couch_att:store(data, Data, Att);
+ true -> couch_att:store(data, stub, Att)
+ end
+ end, Atts0),
+ Doc0#doc{atts = Atts1};
+ false ->
+ Doc0
+ end,
+
+ {ok, Doc1#doc{meta = Meta1 ++ Meta2 ++ Meta3 ++ Meta4}}.
+
+
+find_ancestor_rev_pos({_, []}, _PossibleAncestors) ->
+ 0;
+find_ancestor_rev_pos(_DocRevs, []) ->
+ 0;
+find_ancestor_rev_pos({RevPos, [RevId | Rest]}, AttsSinceRevs) ->
+ case lists:member({RevPos, RevId}, AttsSinceRevs) of
+ true -> RevPos;
+ false -> find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs)
+ end.
+
+
+filter_found_revs(RevInfo, Revs) ->
+ #{
+ rev_id := {Pos, Rev},
+ rev_path := RevPath
+ } = RevInfo,
+ FullRevPath = [Rev | RevPath],
+ lists:flatmap(fun({FindPos, FindRev} = RevIdToFind) ->
+ if FindPos > Pos -> [RevIdToFind]; true ->
+ % Add 1 because lists:nth is 1 based
+ Idx = Pos - FindPos + 1,
+ case Idx > length(FullRevPath) of
+ true ->
+ [RevIdToFind];
+ false ->
+ case lists:nth(Idx, FullRevPath) == FindRev of
+ true -> [];
+ false -> [RevIdToFind]
+ end
+ end
+ end
+ end, Revs).
+
+
+find_possible_ancestors(RevInfos, MissingRevs) ->
+ % Find any revinfos that are possible ancestors
+ % of the missing revs. A possible ancestor is
+ % any rev that has a start position less than
+ % any missing revision. Stated alternatively,
+ % find any revinfo that could theoretically
+ % extended to be one or more of the missing
+ % revisions.
+ %
+ % Since we are looking at any missing revision
+ % we can just compare against the maximum missing
+ % start position.
+ MaxMissingPos = case MissingRevs of
+ [] -> 0;
+ [_ | _] -> lists:max([Start || {Start, _Rev} <- MissingRevs])
+ end,
+ lists:flatmap(fun(RevInfo) ->
+ #{rev_id := {RevPos, _} = RevId} = RevInfo,
+ case RevPos < MaxMissingPos of
+ true -> [RevId];
+ false -> []
+ end
+ end, RevInfos).
+
+
+apply_before_doc_update(Db, Docs, Options) ->
+ UpdateType = case lists:member(replicated_changes, Options) of
+ true -> replicated_changes;
+ false -> interactive_edit
+ end,
+ lists:map(fun(Doc) ->
+ fabric2_db_plugin:before_doc_update(Db, Doc, UpdateType)
+ end, Docs).
+
+
+update_doc_int(#{} = Db, #doc{} = Doc, Options) ->
+ IsLocal = case Doc#doc.id of
+ <<?LOCAL_DOC_PREFIX, _/binary>> -> true;
+ _ -> false
+ end,
+ try
+ case {IsLocal, is_replicated(Options)} of
+ {false, false} -> update_doc_interactive(Db, Doc, Options);
+ {false, true} -> update_doc_replicated(Db, Doc, Options);
+ {true, _} -> update_local_doc(Db, Doc, Options)
+ end
+ catch throw:{?MODULE, Return} ->
+ Return
+ end.
+
+
+batch_update_docs(Db, Docs, Options) ->
+ BAcc = #bacc{
+ db = Db,
+ docs = Docs,
+ batch_size = get_batch_size(Options),
+ options = Options,
+ rev_futures = #{},
+ seen = [],
+ results = []
+ },
+ #bacc{results = Res} = batch_update_docs(BAcc),
+ lists:reverse(Res).
+
+
+batch_update_docs(#bacc{docs = []} = BAcc) ->
+ BAcc;
+
+batch_update_docs(#bacc{db = Db} = BAcc) ->
+ #bacc{
+ db = Db,
+ docs = Docs,
+ options = Options
+ } = BAcc,
+
+ BAccTx2 = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ BAccTx = BAcc#bacc{db = TxDb},
+ case is_replicated(Options) of
+ false ->
+ Tagged = tag_docs(Docs),
+ RevFutures = get_winning_rev_futures(TxDb, Tagged),
+ BAccTx1 = BAccTx#bacc{
+ docs = Tagged,
+ rev_futures = RevFutures
+ },
+ batch_update_interactive_tx(BAccTx1);
+ true ->
+ BAccTx1 = batch_update_replicated_tx(BAccTx),
+ % For replicated updates reset `seen` after every transaction
+ BAccTx1#bacc{seen = []}
+ end
+ end),
+
+ % Clean up after the transaction ends so we can recurse with a clean state
+ maps:map(fun(Tag, RangeFuture) when is_reference(Tag) ->
+ ok = erlfdb:cancel(RangeFuture, [flush])
+ end, BAccTx2#bacc.rev_futures),
+
+ BAcc1 = BAccTx2#bacc{
+ db = Db,
+ rev_futures = #{}
+ },
+
+ batch_update_docs(BAcc1).
+
+
+batch_update_interactive_tx(#bacc{docs = []} = BAcc) ->
+ BAcc;
+
+batch_update_interactive_tx(#bacc{} = BAcc) ->
+ #bacc{
+ db = TxDb,
+ docs = [Doc | Docs],
+ options = Options,
+ batch_size = MaxSize,
+ rev_futures = RevFutures,
+ seen = Seen,
+ results = Results
+ } = BAcc,
+ {Res, Seen1} = try
+ update_docs_interactive(TxDb, Doc, Options, RevFutures, Seen)
+ catch throw:{?MODULE, Return} ->
+ {Return, Seen}
+ end,
+ BAcc1 = BAcc#bacc{
+ docs = Docs,
+ results = [Res | Results],
+ seen = Seen1
+ },
+ case fabric2_fdb:get_approximate_tx_size(TxDb) > MaxSize of
+ true -> BAcc1;
+ false -> batch_update_interactive_tx(BAcc1)
+ end.
+
+
+batch_update_replicated_tx(#bacc{docs = []} = BAcc) ->
+ BAcc;
+
+batch_update_replicated_tx(#bacc{} = BAcc) ->
+ #bacc{
+ db = TxDb,
+ docs = [Doc | Docs],
+ options = Options,
+ batch_size = MaxSize,
+ seen = Seen,
+ results = Results
+ } = BAcc,
+ case lists:member(Doc#doc.id, Seen) of
+ true ->
+ % If we already updated this doc in the current transaction, wait
+ % till the next transaction to update it again.
+ BAcc;
+ false ->
+ Res = update_doc_int(TxDb, Doc, Options),
+ BAcc1 = BAcc#bacc{
+ docs = Docs,
+ results = [Res | Results],
+ seen = [Doc#doc.id | Seen]
+ },
+ case fabric2_fdb:get_approximate_tx_size(TxDb) > MaxSize of
+ true -> BAcc1;
+ false -> batch_update_replicated_tx(BAcc1)
+ end
+ end.
+
+
+update_docs_interactive(Db, #doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>} = Doc,
+ Options, _Futures, SeenIds) ->
+ {update_local_doc(Db, Doc, Options), SeenIds};
+
+update_docs_interactive(Db, Doc, Options, Futures, SeenIds) ->
+ case lists:member(Doc#doc.id, SeenIds) of
+ true ->
+ {conflict, SeenIds};
+ false ->
+ Future = maps:get(doc_tag(Doc), Futures),
+ case update_doc_interactive(Db, Doc, Future, Options) of
+ {ok, _} = Resp ->
+ {Resp, [Doc#doc.id | SeenIds]};
+ _ = Resp ->
+ {Resp, SeenIds}
+ end
+ end.
+
+
+update_doc_interactive(Db, Doc0, Options) ->
+ % Get the current winning revision. This is needed
+ % regardless of which branch we're updating. The extra
+ % revision we're grabbing is an optimization to
+ % save us a round trip if we end up deleting
+ % the winning revision branch.
+ NumRevs = if Doc0#doc.deleted -> 2; true -> 1 end,
+ Future = fabric2_fdb:get_winning_revs_future(Db, Doc0#doc.id, NumRevs),
+ update_doc_interactive(Db, Doc0, Future, Options).
+
+
+update_doc_interactive(Db, Doc0, Future, _Options) ->
+ RevInfos = fabric2_fdb:get_revs_wait(Db, Future),
+ {Winner, SecondPlace} = case RevInfos of
+ [] -> {not_found, not_found};
+ [WRI] -> {WRI, not_found};
+ [WRI, SPRI] -> {WRI, SPRI}
+ end,
+ WinnerRevId = case Winner of
+ not_found ->
+ {0, <<>>};
+ _ ->
+ case maps:get(deleted, Winner) of
+ true -> {0, <<>>};
+ false -> maps:get(rev_id, Winner)
+ end
+ end,
+
+ % Check that a revision was specified if required
+ Doc0RevId = doc_to_revid(Doc0),
+ HasRev = Doc0RevId =/= {0, <<>>},
+ if HasRev orelse WinnerRevId == {0, <<>>} -> ok; true ->
+ ?RETURN({Doc0, conflict})
+ end,
+
+ % Allow inserting new deleted documents. Only works when the document has
+ % never existed to match CouchDB 3.x
+ case not HasRev andalso Doc0#doc.deleted andalso is_map(Winner) of
+ true -> ?RETURN({Doc0, conflict});
+ false -> ok
+ end,
+
+ % Get the target revision to update
+ Target = case Doc0RevId == WinnerRevId of
+ true ->
+ Winner;
+ false ->
+ case fabric2_fdb:get_non_deleted_rev(Db, Doc0#doc.id, Doc0RevId) of
+ #{deleted := false} = Target0 ->
+ Target0;
+ not_found ->
+ % Either a missing revision or a deleted
+ % revision. Either way a conflict. Note
+ % that we get not_found for a deleted revision
+ % because we only check for the non-deleted
+ % key in fdb
+ ?RETURN({Doc0, conflict})
+ end
+ end,
+
+ Doc1 = case Winner of
+ #{deleted := true} when not Doc0#doc.deleted ->
+ % When recreating a deleted document we want to extend
+ % the winning revision branch rather than create a
+ % new branch. If we did not do this we could be
+ % recreating into a state that previously existed.
+ Doc0#doc{revs = fabric2_util:revinfo_to_revs(Winner)};
+ #{} ->
+ % Otherwise we're extending the target's revision
+ % history with this update
+ Doc0#doc{revs = fabric2_util:revinfo_to_revs(Target)};
+ not_found ->
+ % Creating a new doc means our revs start empty
+ Doc0
+ end,
+
+ % Validate the doc update and create the
+ % new revinfo map
+ Doc2 = prep_and_validate(Db, Doc1, Target),
+
+ Doc3 = new_revid(Db, Doc2),
+
+ #doc{
+ deleted = NewDeleted,
+ revs = {NewRevPos, [NewRev | NewRevPath]},
+ atts = Atts
+ } = Doc4 = stem_revisions(Db, Doc3),
+
+ NewRevInfo = #{
+ winner => undefined,
+ exists => false,
+ deleted => NewDeleted,
+ rev_id => {NewRevPos, NewRev},
+ rev_path => NewRevPath,
+ sequence => undefined,
+ branch_count => undefined,
+ att_hash => fabric2_util:hash_atts(Atts),
+ rev_size => fabric2_util:rev_size(Doc4)
+ },
+
+ % Gather the list of possible winnig revisions
+ Possible = case Target == Winner of
+ true when not Doc4#doc.deleted ->
+ [NewRevInfo];
+ true when Doc4#doc.deleted ->
+ case SecondPlace of
+ #{} -> [NewRevInfo, SecondPlace];
+ not_found -> [NewRevInfo]
+ end;
+ false ->
+ [NewRevInfo, Winner]
+ end,
+
+ % Sort the rev infos such that the winner is first
+ {NewWinner0, NonWinner} = case fabric2_util:sort_revinfos(Possible) of
+ [W] -> {W, not_found};
+ [W, NW] -> {W, NW}
+ end,
+
+ BranchCount = case Winner of
+ not_found -> 1;
+ #{branch_count := BC} -> BC
+ end,
+ NewWinner = NewWinner0#{branch_count := BranchCount},
+ ToUpdate = if NonWinner == not_found -> []; true -> [NonWinner] end,
+ ToRemove = if Target == not_found -> []; true -> [Target] end,
+
+ ok = fabric2_fdb:write_doc(
+ Db,
+ Doc4,
+ NewWinner,
+ Winner,
+ ToUpdate,
+ ToRemove
+ ),
+
+ {ok, {NewRevPos, NewRev}}.
+
+
+update_doc_replicated(Db, Doc0, _Options) ->
+ #doc{
+ id = DocId,
+ deleted = Deleted,
+ revs = {RevPos, [Rev | RevPath]}
+ } = Doc0,
+
+ DocRevInfo0 = #{
+ winner => undefined,
+ exists => false,
+ deleted => Deleted,
+ rev_id => {RevPos, Rev},
+ rev_path => RevPath,
+ sequence => undefined,
+ branch_count => undefined,
+ att_hash => <<>>,
+ rev_size => null
+ },
+
+ AllRevInfos = fabric2_fdb:get_all_revs(Db, DocId),
+
+ RevTree = lists:foldl(fun(RI, TreeAcc) ->
+ RIPath = fabric2_util:revinfo_to_path(RI),
+ {Merged, _} = couch_key_tree:merge(TreeAcc, RIPath),
+ Merged
+ end, [], AllRevInfos),
+
+ DocRevPath = fabric2_util:revinfo_to_path(DocRevInfo0),
+
+ {NewTree, Status} = couch_key_tree:merge(RevTree, DocRevPath),
+ if Status /= internal_node -> ok; true ->
+ % We already know this revision so nothing
+ % left to do.
+ ?RETURN({Doc0, {ok, []}})
+ end,
+
+ % Its possible to have a replication with fewer than $revs_limit
+ % revisions which extends an existing branch. To avoid
+ % losing revision history we extract the new node from the
+ % tree and use the combined path after stemming.
+ {[{_, {RevPos, UnstemmedRevs}}], []}
+ = couch_key_tree:get(NewTree, [{RevPos, Rev}]),
+
+ Doc1 = stem_revisions(Db, Doc0#doc{revs = {RevPos, UnstemmedRevs}}),
+
+ {RevPos, [Rev | NewRevPath]} = Doc1#doc.revs,
+ DocRevInfo1 = DocRevInfo0#{rev_path := NewRevPath},
+
+ % Find any previous revision we knew about for
+ % validation and attachment handling.
+ AllLeafsFull = couch_key_tree:get_all_leafs_full(NewTree),
+ LeafPath = get_leaf_path(RevPos, Rev, AllLeafsFull),
+ PrevRevInfo = find_prev_revinfo(RevPos, LeafPath),
+ Doc2 = prep_and_validate(Db, Doc1, PrevRevInfo),
+ Doc3 = flush_doc_atts(Db, Doc2),
+ DocRevInfo2 = DocRevInfo1#{
+ atts_hash => fabric2_util:hash_atts(Doc3#doc.atts),
+ rev_size => fabric2_util:rev_size(Doc3)
+ },
+
+ % Possible winners are the previous winner and
+ % the new DocRevInfo
+ Winner = case fabric2_util:sort_revinfos(AllRevInfos) of
+ [#{winner := true} = WRI | _] -> WRI;
+ [] -> not_found
+ end,
+ {NewWinner0, NonWinner} = case Winner == PrevRevInfo of
+ true ->
+ {DocRevInfo2, not_found};
+ false ->
+ [W, NW] = fabric2_util:sort_revinfos([Winner, DocRevInfo2]),
+ {W, NW}
+ end,
+
+ NewWinner = NewWinner0#{branch_count := length(AllLeafsFull)},
+ ToUpdate = if NonWinner == not_found -> []; true -> [NonWinner] end,
+ ToRemove = if PrevRevInfo == not_found -> []; true -> [PrevRevInfo] end,
+
+ ok = fabric2_fdb:write_doc(
+ Db,
+ Doc3,
+ NewWinner,
+ Winner,
+ ToUpdate,
+ ToRemove
+ ),
+
+ {ok, []}.
+
+
+update_local_doc(Db, Doc0, _Options) ->
+ Doc1 = case increment_local_doc_rev(Doc0) of
+ {ok, Updated} -> Updated;
+ {error, Error} -> ?RETURN({Doc0, Error})
+ end,
+
+ ok = fabric2_fdb:write_local_doc(Db, Doc1),
+
+ #doc{revs = {0, [Rev]}} = Doc1,
+ {ok, {0, integer_to_binary(Rev)}}.
+
+
+flush_doc_atts(Db, Doc) ->
+ #doc{
+ id = DocId,
+ atts = Atts
+ } = Doc,
+ NewAtts = lists:map(fun(Att) ->
+ case couch_att:fetch(data, Att) of
+ {loc, _, _, _} ->
+ Att;
+ _ ->
+ couch_att:flush(Db, DocId, Att)
+ end
+ end, Atts),
+ Doc#doc{atts = NewAtts}.
+
+
+get_winning_rev_futures(Db, Docs) ->
+ lists:foldl(fun(Doc, Acc) ->
+ #doc{
+ id = DocId,
+ deleted = Deleted
+ } = Doc,
+ IsLocal = case DocId of
+ <<?LOCAL_DOC_PREFIX, _/binary>> -> true;
+ _ -> false
+ end,
+ if IsLocal -> Acc; true ->
+ NumRevs = if Deleted -> 2; true -> 1 end,
+ Future = fabric2_fdb:get_winning_revs_future(Db, DocId, NumRevs),
+ DocTag = doc_tag(Doc),
+ Acc#{DocTag => Future}
+ end
+ end, #{}, Docs).
+
+
+prep_and_validate(Db, NewDoc, PrevRevInfo) ->
+ HasStubs = couch_doc:has_stubs(NewDoc),
+ HasVDUs = [] /= maps:get(validate_doc_update_funs, Db),
+ IsDDoc = case NewDoc#doc.id of
+ <<?DESIGN_DOC_PREFIX, _/binary>> -> true;
+ _ -> false
+ end,
+
+ WasDeleted = case PrevRevInfo of
+ not_found -> false;
+ #{deleted := D} -> D
+ end,
+
+ PrevDoc = case HasStubs orelse (HasVDUs and not IsDDoc) of
+ true when PrevRevInfo /= not_found, not WasDeleted ->
+ case fabric2_fdb:get_doc_body(Db, NewDoc#doc.id, PrevRevInfo) of
+ #doc{} = PDoc -> PDoc;
+ {not_found, _} -> nil
+ end;
+ _ ->
+ nil
+ end,
+
+ MergedDoc = if not HasStubs -> NewDoc; true ->
+ % This will throw an error if we have any
+ % attachment stubs missing data
+ couch_doc:merge_stubs(NewDoc, PrevDoc)
+ end,
+ check_duplicate_attachments(MergedDoc),
+ validate_doc_update(Db, MergedDoc, PrevDoc),
+ MergedDoc.
+
+
+validate_doc_update(Db, #doc{id = <<"_design/", _/binary>>} = Doc, _) ->
+ case catch check_is_admin(Db) of
+ ok -> validate_ddoc(Db, Doc);
+ Error -> ?RETURN({Doc, Error})
+ end;
+validate_doc_update(Db, Doc, PrevDoc) ->
+ #{
+ security_doc := Security,
+ validate_doc_update_funs := VDUs
+ } = Db,
+ Fun = fun() ->
+ JsonCtx = fabric2_util:user_ctx_to_json(Db),
+ lists:map(fun(VDU) ->
+ try
+ case VDU(Doc, PrevDoc, JsonCtx, Security) of
+ ok -> ok;
+ Error1 -> throw(Error1)
+ end
+ catch throw:Error2 ->
+ ?RETURN({Doc, Error2})
+ end
+ end, VDUs)
+ end,
+ Stat = [couchdb, query_server, vdu_process_time],
+ if VDUs == [] -> ok; true ->
+ couch_stats:update_histogram(Stat, Fun)
+ end.
+
+
+validate_ddoc(Db, DDoc) ->
+ try
+ ok = couch_index_server:validate(Db, couch_doc:with_ejson_body(DDoc))
+ catch
+ throw:{invalid_design_doc, Reason} ->
+ throw({bad_request, invalid_design_doc, Reason});
+ throw:{compilation_error, Reason} ->
+ throw({bad_request, compilation_error, Reason});
+ throw:Error ->
+ ?RETURN({DDoc, Error})
+ end.
+
+
+validate_atomic_update(_, false) ->
+ ok;
+validate_atomic_update(AllDocs, true) ->
+ % TODO actually perform the validation. This requires some hackery, we need
+ % to basically extract the prep_and_validate_updates function from couch_db
+ % and only run that, without actually writing in case of a success.
+ Error = {not_implemented, <<"all_or_nothing is not supported">>},
+ PreCommitFailures = lists:map(fun(#doc{id=Id, revs = {Pos,Revs}}) ->
+ case Revs of [] -> RevId = <<>>; [RevId|_] -> ok end,
+ {{Id, {Pos, RevId}}, Error}
+ end, AllDocs),
+ throw({aborted, PreCommitFailures}).
+
+
+check_duplicate_attachments(#doc{atts = Atts}) ->
+ lists:foldl(fun(Att, Names) ->
+ Name = couch_att:fetch(name, Att),
+ case ordsets:is_element(Name, Names) of
+ true -> throw({bad_request, <<"Duplicate attachments">>});
+ false -> ordsets:add_element(Name, Names)
+ end
+ end, ordsets:new(), Atts).
+
+
+get_since_seq(Db, rev, <<>>) ->
+ get_since_seq(Db, rev, now);
+
+get_since_seq(_Db, _Dir, Seq) when Seq == <<>>; Seq == <<"0">>; Seq == 0->
+ fabric2_util:seq_zero_vs();
+
+get_since_seq(Db, Dir, Seq) when Seq == now; Seq == <<"now">> ->
+ CurrSeq = fabric2_fdb:get_last_change(Db),
+ get_since_seq(Db, Dir, CurrSeq);
+
+get_since_seq(_Db, _Dir, Seq) when is_binary(Seq), size(Seq) == 24 ->
+ fabric2_fdb:next_vs(fabric2_fdb:seq_to_vs(Seq));
+
+get_since_seq(Db, Dir, List) when is_list(List) ->
+ get_since_seq(Db, Dir, list_to_binary(List));
+
+get_since_seq(_Db, _Dir, Seq) ->
+ erlang:error({invalid_since_seq, Seq}).
+
+
+get_leaf_path(Pos, Rev, [{Pos, [{Rev, _RevInfo} | LeafPath]} | _]) ->
+ LeafPath;
+get_leaf_path(Pos, Rev, [_WrongLeaf | RestLeafs]) ->
+ get_leaf_path(Pos, Rev, RestLeafs).
+
+
+find_prev_revinfo(_Pos, []) ->
+ not_found;
+find_prev_revinfo(Pos, [{_Rev, ?REV_MISSING} | RestPath]) ->
+ find_prev_revinfo(Pos - 1, RestPath);
+find_prev_revinfo(_Pos, [{_Rev, #{} = RevInfo} | _]) ->
+ RevInfo.
+
+
+increment_local_doc_rev(#doc{deleted = true} = Doc) ->
+ {ok, Doc#doc{revs = {0, [0]}}};
+increment_local_doc_rev(#doc{revs = {0, []}} = Doc) ->
+ {ok, Doc#doc{revs = {0, [1]}}};
+increment_local_doc_rev(#doc{revs = {0, [RevStr | _]}} = Doc) ->
+ try
+ PrevRev = binary_to_integer(RevStr),
+ {ok, Doc#doc{revs = {0, [PrevRev + 1]}}}
+ catch error:badarg ->
+ {error, <<"Invalid rev format">>}
+ end;
+increment_local_doc_rev(#doc{}) ->
+ {error, <<"Invalid rev format">>}.
+
+
+doc_to_revid(#doc{revs = Revs}) ->
+ case Revs of
+ {0, []} -> {0, <<>>};
+ {RevPos, [Rev | _]} -> {RevPos, Rev}
+ end.
+
+
+tag_docs([]) ->
+ [];
+tag_docs([#doc{meta = Meta} = Doc | Rest]) ->
+ Meta1 = lists:keystore(ref, 1, Meta, {ref, make_ref()}),
+ NewDoc = Doc#doc{meta = Meta1},
+ [NewDoc | tag_docs(Rest)].
+
+
+doc_tag(#doc{meta = Meta}) ->
+ fabric2_util:get_value(ref, Meta).
+
+
+idrevs({Id, Revs}) when is_list(Revs) ->
+ {docid(Id), [rev(R) || R <- Revs]}.
+
+
+docid(DocId) when is_list(DocId) ->
+ list_to_binary(DocId);
+docid(DocId) ->
+ DocId.
+
+
+rev(Rev) when is_list(Rev); is_binary(Rev) ->
+ couch_doc:parse_rev(Rev);
+rev({Seq, Hash} = Rev) when is_integer(Seq), is_binary(Hash) ->
+ Rev.
+
+
+maybe_stop({ok, Acc}) ->
+ Acc;
+maybe_stop({stop, Acc}) ->
+ throw({stop, Acc}).
+
+
+set_design_doc_keys(Options1) ->
+ Dir = couch_util:get_value(dir, Options1, fwd),
+ Options2 = set_design_doc_start_key(Options1, Dir),
+ set_design_doc_end_key(Options2, Dir).
+
+
+set_design_doc_start_key(Options, fwd) ->
+ Key1 = couch_util:get_value(start_key, Options, ?FIRST_DDOC_KEY),
+ Key2 = max(Key1, ?FIRST_DDOC_KEY),
+ lists:keystore(start_key, 1, Options, {start_key, Key2});
+
+set_design_doc_start_key(Options, rev) ->
+ Key1 = couch_util:get_value(start_key, Options, ?LAST_DDOC_KEY),
+ Key2 = min(Key1, ?LAST_DDOC_KEY),
+ lists:keystore(start_key, 1, Options, {start_key, Key2}).
+
+
+set_design_doc_end_key(Options, fwd) ->
+ case couch_util:get_value(end_key_gt, Options) of
+ undefined ->
+ Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY),
+ Key2 = min(Key1, ?LAST_DDOC_KEY),
+ lists:keystore(end_key, 1, Options, {end_key, Key2});
+ EKeyGT ->
+ Key2 = min(EKeyGT, ?LAST_DDOC_KEY),
+ lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
+ end;
+
+set_design_doc_end_key(Options, rev) ->
+ case couch_util:get_value(end_key_gt, Options) of
+ undefined ->
+ Key1 = couch_util:get_value(end_key, Options, ?FIRST_DDOC_KEY),
+ Key2 = max(Key1, ?FIRST_DDOC_KEY),
+ lists:keystore(end_key, 1, Options, {end_key, Key2});
+ EKeyGT ->
+ Key2 = max(EKeyGT, ?FIRST_DDOC_KEY),
+ lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
+ end.
+
+
+stem_revisions(#{} = Db, #doc{} = Doc) ->
+ #{revs_limit := RevsLimit} = Db,
+ #doc{revs = {RevPos, Revs}} = Doc,
+ case RevPos >= RevsLimit of
+ true -> Doc#doc{revs = {RevPos, lists:sublist(Revs, RevsLimit)}};
+ false -> Doc
+ end.
+
+
+open_json_doc(Db, DocId, OpenOpts, DocOpts) ->
+ case fabric2_db:open_doc(Db, DocId, OpenOpts) of
+ {not_found, missing} ->
+ [];
+ {ok, #doc{deleted = true}} ->
+ [{doc, null}];
+ {ok, #doc{} = Doc} ->
+ [{doc, couch_doc:to_json_obj(Doc, DocOpts)}]
+ end.
+
+
+get_cached_db(#{} = Db, Opts) when is_list(Opts) ->
+ MaxAge = fabric2_util:get_value(max_age, Opts, 0),
+ Now = erlang:monotonic_time(millisecond),
+ Age = Now - maps:get(check_current_ts, Db),
+ case Age < MaxAge of
+ true ->
+ Db;
+ false ->
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ fabric2_fdb:ensure_current(TxDb)
+ end)
+ end.
+
+
+is_replicated(Options) when is_list(Options) ->
+ lists:member(replicated_changes, Options).
+
+
+get_batch_size(Options) ->
+ case fabric2_util:get_value(batch_size, Options) of
+ undefined ->
+ config:get_integer("fabric", "update_docs_batch_size",
+ ?DEFAULT_UPDATE_DOCS_BATCH_SIZE);
+ Val when is_integer(Val) ->
+ Val
+ end.
diff --git a/src/fabric/src/fabric2_db_expiration.erl b/src/fabric/src/fabric2_db_expiration.erl
new file mode 100644
index 000000000..92f22e749
--- /dev/null
+++ b/src/fabric/src/fabric2_db_expiration.erl
@@ -0,0 +1,246 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_db_expiration).
+
+
+-behaviour(gen_server).
+
+
+-export([
+ start_link/0,
+ cleanup/1,
+ process_expirations/2
+]).
+
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("fabric/include/fabric2.hrl").
+
+-define(JOB_TYPE, <<"db_expiration">>).
+-define(JOB_ID, <<"db_expiration_job">>).
+-define(DEFAULT_JOB_Version, 1).
+-define(DEFAULT_RETENTION_SEC, 172800). % 48 hours
+-define(DEFAULT_SCHEDULE_SEC, 3600). % 1 hour
+-define(ERROR_RESCHEDULE_SEC, 5).
+-define(CHECK_ENABLED_SEC, 2).
+-define(JOB_TIMEOUT_SEC, 30).
+
+
+-record(st, {
+ job
+}).
+
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+init(_) ->
+ process_flag(trap_exit, true),
+ {ok, #st{job = undefined}, 0}.
+
+
+terminate(_M, _St) ->
+ ok.
+
+
+handle_call(Msg, _From, St) ->
+ {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(Msg, St) ->
+ {stop, {bad_cast, Msg}, St}.
+
+
+handle_info(timeout, #st{job = undefined} = St) ->
+ ok = wait_for_couch_jobs_app(),
+ ok = couch_jobs:set_type_timeout(?JOB_TYPE, ?JOB_TIMEOUT_SEC),
+ ok = maybe_add_job(),
+ Pid = spawn_link(?MODULE, cleanup, [is_enabled()]),
+ {noreply, St#st{job = Pid}};
+
+handle_info({'EXIT', Pid, Exit}, #st{job = Pid} = St) ->
+ case Exit of
+ normal -> ok;
+ Error -> couch_log:error("~p : job error ~p", [?MODULE, Error])
+ end,
+ NewPid = spawn_link(?MODULE, cleanup, [is_enabled()]),
+ {noreply, St#st{job = NewPid}};
+
+handle_info(Msg, St) ->
+ {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+ {ok, St}.
+
+
+wait_for_couch_jobs_app() ->
+ % Because of a circular dependency between couch_jobs and fabric apps, wait
+ % for couch_jobs to initialize before continuing. If we refactor the
+ % commits FDB utilities out we can remove this bit of code.
+ case lists:keysearch(couch_jobs, 1, application:which_applications()) of
+ {value, {couch_jobs, _, _}} ->
+ ok;
+ false ->
+ timer:sleep(100),
+ wait_for_couch_jobs_app()
+ end.
+
+
+maybe_add_job() ->
+ case couch_jobs:get_job_data(undefined, ?JOB_TYPE, job_id()) of
+ {error, not_found} ->
+ Now = erlang:system_time(second),
+ ok = couch_jobs:add(undefined, ?JOB_TYPE, job_id(), #{}, Now);
+ {ok, _JobData} ->
+ ok
+ end.
+
+
+cleanup(false) ->
+ timer:sleep(?CHECK_ENABLED_SEC * 1000),
+ exit(normal);
+
+cleanup(true) ->
+ Now = erlang:system_time(second),
+ ScheduleSec = schedule_sec(),
+ Opts = #{max_sched_time => Now + min(ScheduleSec div 3, 15)},
+ case couch_jobs:accept(?JOB_TYPE, Opts) of
+ {ok, Job, Data} ->
+ try
+ {ok, Job1, Data1} = ?MODULE:process_expirations(Job, Data),
+ ok = resubmit_job(Job1, Data1, schedule_sec())
+ catch
+ _Tag:Error ->
+ Stack = erlang:get_stacktrace(),
+ couch_log:error("~p : processing error ~p ~p ~p",
+ [?MODULE, Job, Error, Stack]),
+ ok = resubmit_job(Job, Data, ?ERROR_RESCHEDULE_SEC),
+ exit({job_error, Error, Stack})
+ end;
+ {error, not_found} ->
+ timer:sleep(?CHECK_ENABLED_SEC * 1000),
+ ?MODULE:cleanup(is_enabled())
+ end.
+
+
+resubmit_job(Job, Data, After) ->
+ Now = erlang:system_time(second),
+ SchedTime = Now + After,
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
+ {ok, Job1} = couch_jobs:resubmit(JTx, Job, SchedTime),
+ ok = couch_jobs:finish(JTx, Job1, Data)
+ end),
+ ok.
+
+
+process_expirations(#{} = Job, #{} = Data) ->
+ Start = now_sec(),
+ Callback = fun(Value, LastUpdateAt) ->
+ case Value of
+ {meta, _} -> ok;
+ {row, DbInfo} -> process_row(DbInfo);
+ complete -> ok
+ end,
+ {ok, maybe_report_progress(Job, LastUpdateAt)}
+ end,
+ {ok, _Infos} = fabric2_db:list_deleted_dbs_info(
+ Callback,
+ Start,
+ [{restart_tx, true}]
+ ),
+ {ok, Job, Data}.
+
+
+process_row(DbInfo) ->
+ DbName = proplists:get_value(db_name, DbInfo),
+ TimeStamp = proplists:get_value(timestamp, DbInfo),
+ Now = now_sec(),
+ Retention = retention_sec(),
+ Since = Now - Retention,
+ case Since >= timestamp_to_sec(TimeStamp) of
+ true ->
+ couch_log:notice("Permanently deleting ~s database with"
+ " timestamp ~s", [DbName, TimeStamp]),
+ ok = fabric2_db:delete(DbName, [{deleted_at, TimeStamp}]);
+ false ->
+ ok
+ end.
+
+
+maybe_report_progress(Job, LastUpdateAt) ->
+ % Update periodically the job so it doesn't expire
+ Now = now_sec(),
+ Progress = #{
+ <<"processed_at">> => Now
+
+ },
+ case (Now - LastUpdateAt) > (?JOB_TIMEOUT_SEC div 2) of
+ true ->
+ couch_jobs:update(undefined, Job, Progress),
+ Now;
+ false ->
+ LastUpdateAt
+ end.
+
+
+job_id() ->
+ JobVersion = job_version(),
+ <<?JOB_ID/binary, "-", JobVersion:16/integer>>.
+
+
+now_sec() ->
+ Now = os:timestamp(),
+ Nowish = calendar:now_to_universal_time(Now),
+ calendar:datetime_to_gregorian_seconds(Nowish).
+
+
+timestamp_to_sec(TimeStamp) ->
+ <<Year:4/binary, "-", Month:2/binary, "-", Day:2/binary,
+ "T",
+ Hour:2/binary, ":", Minutes:2/binary, ":", Second:2/binary,
+ "Z">> = TimeStamp,
+
+ calendar:datetime_to_gregorian_seconds(
+ {{?bin2int(Year), ?bin2int(Month), ?bin2int(Day)},
+ {?bin2int(Hour), ?bin2int(Minutes), ?bin2int(Second)}}
+ ).
+
+
+is_enabled() ->
+ config:get_boolean("couchdb", "db_expiration_enabled", false).
+
+
+job_version() ->
+ config:get_integer("couchdb", "db_expiration_job_version",
+ ?DEFAULT_JOB_Version).
+
+
+retention_sec() ->
+ config:get_integer("couchdb", "db_expiration_retention_sec",
+ ?DEFAULT_RETENTION_SEC).
+
+
+schedule_sec() ->
+ config:get_integer("couchdb", "db_expiration_schedule_sec",
+ ?DEFAULT_SCHEDULE_SEC).
diff --git a/src/fabric/src/fabric2_db_plugin.erl b/src/fabric/src/fabric2_db_plugin.erl
new file mode 100644
index 000000000..1d923dd96
--- /dev/null
+++ b/src/fabric/src/fabric2_db_plugin.erl
@@ -0,0 +1,102 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_db_plugin).
+
+-export([
+ validate_dbname/3,
+ before_doc_update/3,
+ after_doc_write/6,
+ after_doc_read/2,
+ validate_docid/1,
+ check_is_admin/1,
+ is_valid_purge_client/2
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+
+-define(SERVICE_ID, fabric2_db).
+
+
+%% ------------------------------------------------------------------
+%% API Function Definitions
+%% ------------------------------------------------------------------
+
+validate_dbname(DbName, Normalized, Default) ->
+ maybe_handle(validate_dbname, [DbName, Normalized], Default).
+
+
+before_doc_update(_, #doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>} = Doc, _) ->
+ Doc;
+
+before_doc_update(Db, Doc0, UpdateType) ->
+ Fun = fabric2_db:get_before_doc_update_fun(Db),
+ case with_pipe(before_doc_update, [Doc0, Db, UpdateType]) of
+ [Doc1, _Db, UpdateType1] when is_function(Fun) ->
+ Fun(Doc1, Db, UpdateType1);
+ [Doc1, _Db, _UpdateType] ->
+ Doc1
+ end.
+
+
+after_doc_write(Db, Doc, NewWinner, OldWinner, NewRevId, Seq)->
+ with_pipe(after_doc_write, [Db, Doc, NewWinner, OldWinner, NewRevId, Seq]).
+
+
+after_doc_read(Db, Doc0) ->
+ Fun = fabric2_db:get_after_doc_read_fun(Db),
+ case with_pipe(after_doc_read, [Doc0, Db]) of
+ [Doc1, _Db] when is_function(Fun) -> Fun(Doc1, Db);
+ [Doc1, _Db] -> Doc1
+ end.
+
+
+validate_docid(Id) ->
+ Handle = couch_epi:get_handle(?SERVICE_ID),
+ %% callbacks return true only if it specifically allow the given Id
+ couch_epi:any(Handle, ?SERVICE_ID, validate_docid, [Id], []).
+
+
+check_is_admin(Db) ->
+ Handle = couch_epi:get_handle(?SERVICE_ID),
+ %% callbacks return true only if it specifically allow the given Id
+ couch_epi:any(Handle, ?SERVICE_ID, check_is_admin, [Db], []).
+
+
+is_valid_purge_client(DbName, Props) ->
+ Handle = couch_epi:get_handle(?SERVICE_ID),
+ %% callbacks return true only if it specifically allow the given Id
+ couch_epi:any(Handle, ?SERVICE_ID, is_valid_purge_client, [DbName, Props], []).
+
+%% ------------------------------------------------------------------
+%% Internal Function Definitions
+%% ------------------------------------------------------------------
+
+with_pipe(Func, Args) ->
+ do_apply(Func, Args, [pipe]).
+
+do_apply(Func, Args, Opts) ->
+ Handle = couch_epi:get_handle(?SERVICE_ID),
+ couch_epi:apply(Handle, ?SERVICE_ID, Func, Args, Opts).
+
+maybe_handle(Func, Args, Default) ->
+ Handle = couch_epi:get_handle(?SERVICE_ID),
+ case couch_epi:decide(Handle, ?SERVICE_ID, Func, Args, []) of
+ no_decision when is_function(Default) ->
+ apply(Default, Args);
+ no_decision ->
+ Default;
+ {decided, Result} ->
+ Result
+ end.
diff --git a/src/fabric/src/fabric2_epi.erl b/src/fabric/src/fabric2_epi.erl
new file mode 100644
index 000000000..f73eeb0d2
--- /dev/null
+++ b/src/fabric/src/fabric2_epi.erl
@@ -0,0 +1,48 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_epi).
+
+-behaviour(couch_epi_plugin).
+
+-export([
+ app/0,
+ providers/0,
+ services/0,
+ data_subscriptions/0,
+ data_providers/0,
+ processes/0,
+ notify/3
+]).
+
+app() ->
+ fabric.
+
+providers() ->
+ [].
+
+services() ->
+ [
+ {fabric2_db, fabric2_db_plugin}
+ ].
+
+data_subscriptions() ->
+ [].
+
+data_providers() ->
+ [].
+
+processes() ->
+ [].
+
+notify(_Key, _Old, _New) ->
+ ok.
diff --git a/src/fabric/src/fabric2_events.erl b/src/fabric/src/fabric2_events.erl
new file mode 100644
index 000000000..e1198243a
--- /dev/null
+++ b/src/fabric/src/fabric2_events.erl
@@ -0,0 +1,102 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_events).
+
+
+-export([
+ link_listener/4,
+ stop_listener/1
+]).
+
+-export([
+ init/2,
+ poll/1
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+
+link_listener(Mod, Fun, Acc, Options) ->
+ State = #{
+ dbname => fabric2_util:get_value(dbname, Options),
+ uuid => fabric2_util:get_value(uuid, Options, undefined),
+ timeout => fabric2_util:get_value(timeout, Options, 1000),
+ mod => Mod,
+ callback => Fun,
+ acc => Acc
+ },
+ Pid = spawn_link(?MODULE, init, [self(), State]),
+ receive
+ {Pid, initialized} -> ok
+ end,
+ {ok, Pid}.
+
+
+stop_listener(Pid) ->
+ Pid ! stop_listening.
+
+
+init(Parent, #{dbname := DbName} = State) ->
+ {ok, Db} = fabric2_db:open(DbName, [?ADMIN_CTX]),
+ Since = fabric2_db:get_update_seq(Db),
+ erlang:monitor(process, Parent),
+ Parent ! {self(), initialized},
+ poll(State#{since => Since}).
+
+
+poll(#{} = State) ->
+ #{
+ dbname := DbName,
+ uuid := DbUUID,
+ timeout := Timeout,
+ since := Since,
+ mod := Mod,
+ callback := Fun,
+ acc := Acc
+ } = State,
+ {Resp, NewSince} = try
+ Opts = [?ADMIN_CTX, {uuid, DbUUID}],
+ case fabric2_db:open(DbName, Opts) of
+ {ok, Db} ->
+ case fabric2_db:get_update_seq(Db) of
+ Since ->
+ {{ok, Acc}, Since};
+ Other ->
+ {Mod:Fun(DbName, updated, Acc), Other}
+ end;
+ Error ->
+ exit(Error)
+ end
+ catch error:database_does_not_exist ->
+ Mod:Fun(DbName, deleted, Acc),
+ {{stop, ok}, Since}
+ end,
+ receive
+ stop_listening ->
+ ok;
+ {'DOWN', _, _, _, _} ->
+ ok
+ after 0 ->
+ case Resp of
+ {ok, NewAcc} ->
+ timer:sleep(Timeout),
+ NewState = State#{
+ since := NewSince,
+ acc := NewAcc
+ },
+ ?MODULE:poll(NewState);
+ {stop, _} ->
+ ok
+ end
+ end.
diff --git a/src/fabric/src/fabric2_fdb.erl b/src/fabric/src/fabric2_fdb.erl
new file mode 100644
index 000000000..52303cef1
--- /dev/null
+++ b/src/fabric/src/fabric2_fdb.erl
@@ -0,0 +1,2082 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_fdb).
+
+
+-export([
+ transactional/1,
+ transactional/3,
+ transactional/2,
+
+ create/2,
+ open/2,
+ ensure_current/1,
+ delete/1,
+ undelete/3,
+ remove_deleted_db/2,
+ exists/1,
+
+ get_dir/1,
+
+ list_dbs/4,
+ list_dbs_info/4,
+ list_deleted_dbs_info/4,
+
+ get_info/1,
+ get_info_future/2,
+ get_info_wait/1,
+ set_config/3,
+
+ get_stat/2,
+ incr_stat/3,
+ incr_stat/4,
+
+ get_all_revs/2,
+ get_all_revs_future/2,
+ get_winning_revs/3,
+ get_winning_revs_future/3,
+ get_revs_wait/2,
+ get_non_deleted_rev/3,
+
+ get_doc_body/3,
+ get_doc_body_future/3,
+ get_doc_body_wait/4,
+
+ get_local_doc_rev_future/2,
+ get_local_doc_rev_wait/1,
+ get_local_doc_body_future/3,
+ get_local_doc_body_wait/4,
+ get_local_doc/2,
+ get_local_doc_rev/3,
+
+ write_doc/6,
+ write_local_doc/2,
+
+ read_attachment/3,
+ write_attachment/4,
+
+ get_last_change/1,
+
+ fold_range/5,
+
+ vs_to_seq/1,
+ seq_to_vs/1,
+ next_vs/1,
+
+ new_versionstamp/1,
+
+ get_approximate_tx_size/1,
+
+ debug_cluster/0,
+ debug_cluster/2
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include("fabric2.hrl").
+
+
+-define(MAX_FOLD_RANGE_RETRIES, 3).
+
+
+-record(fold_acc, {
+ db,
+ restart_tx,
+ start_key,
+ end_key,
+ limit,
+ skip,
+ retries,
+ base_opts,
+ user_fun,
+ user_acc
+}).
+
+-record(info_future, {
+ tx,
+ db_prefix,
+ changes_future,
+ meta_future,
+ uuid_future,
+ retries = 0
+}).
+
+
+transactional(Fun) ->
+ do_transaction(Fun, undefined).
+
+
+transactional(DbName, Options, Fun) when is_binary(DbName) ->
+ with_span(Fun, #{'db.name' => DbName}, fun() ->
+ transactional(fun(Tx) ->
+ Fun(init_db(Tx, DbName, Options))
+ end)
+ end).
+
+
+transactional(#{tx := undefined} = Db, Fun) ->
+ DbName = maps:get(name, Db, undefined),
+ try
+ Db1 = refresh(Db),
+ Reopen = maps:get(reopen, Db1, false),
+ Db2 = maps:remove(reopen, Db1),
+ LayerPrefix = case Reopen of
+ true -> undefined;
+ false -> maps:get(layer_prefix, Db2)
+ end,
+ with_span(Fun, #{'db.name' => DbName}, fun() ->
+ do_transaction(fun(Tx) ->
+ case Reopen of
+ true -> Fun(reopen(Db2#{tx => Tx}));
+ false -> Fun(Db2#{tx => Tx})
+ end
+ end, LayerPrefix)
+ end)
+ catch throw:{?MODULE, reopen} ->
+ with_span('db.reopen', #{'db.name' => DbName}, fun() ->
+ transactional(Db#{reopen => true}, Fun)
+ end)
+ end;
+
+transactional(#{tx := {erlfdb_transaction, _}} = Db, Fun) ->
+ DbName = maps:get(name, Db, undefined),
+ with_span(Fun, #{'db.name' => DbName}, fun() ->
+ Fun(Db)
+ end).
+
+
+do_transaction(Fun, LayerPrefix) when is_function(Fun, 1) ->
+ Db = get_db_handle(),
+ try
+ erlfdb:transactional(Db, fun(Tx) ->
+ case get(erlfdb_trace) of
+ Name when is_binary(Name) ->
+ UId = erlang:unique_integer([positive]),
+ UIdBin = integer_to_binary(UId, 36),
+ TxId = <<Name/binary, "_", UIdBin/binary>>,
+ erlfdb:set_option(Tx, transaction_logging_enable, TxId);
+ _ ->
+ ok
+ end,
+ case is_transaction_applied(Tx) of
+ true ->
+ get_previous_transaction_result();
+ false ->
+ execute_transaction(Tx, Fun, LayerPrefix)
+ end
+ end)
+ after
+ clear_transaction()
+ end.
+
+
+create(#{} = Db0, Options) ->
+ #{
+ name := DbName,
+ tx := Tx,
+ layer_prefix := LayerPrefix
+ } = Db1 = ensure_current(Db0, false),
+
+ DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
+ HCA = erlfdb_hca:create(erlfdb_tuple:pack({?DB_HCA}, LayerPrefix)),
+ AllocPrefix = erlfdb_hca:allocate(HCA, Tx),
+ DbPrefix = erlfdb_tuple:pack({?DBS, AllocPrefix}, LayerPrefix),
+ erlfdb:set(Tx, DbKey, DbPrefix),
+
+ % This key is responsible for telling us when something in
+ % the database cache (i.e., fabric2_server's ets table) has
+ % changed and requires re-loading. This currently includes
+ % revs_limit and validate_doc_update functions. There's
+ % no order to versioning here. Its just a value that changes
+ % that is used in the ensure_current check.
+ DbVersionKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
+ DbVersion = fabric2_util:uuid(),
+ erlfdb:set(Tx, DbVersionKey, DbVersion),
+
+ UUID = fabric2_util:uuid(),
+
+ Defaults = [
+ {?DB_CONFIG, <<"uuid">>, UUID},
+ {?DB_CONFIG, <<"revs_limit">>, ?uint2bin(1000)},
+ {?DB_CONFIG, <<"security_doc">>, <<"{}">>},
+ {?DB_STATS, <<"doc_count">>, ?uint2bin(0)},
+ {?DB_STATS, <<"doc_del_count">>, ?uint2bin(0)},
+ {?DB_STATS, <<"doc_design_count">>, ?uint2bin(0)},
+ {?DB_STATS, <<"doc_local_count">>, ?uint2bin(0)},
+ {?DB_STATS, <<"sizes">>, <<"external">>, ?uint2bin(2)},
+ {?DB_STATS, <<"sizes">>, <<"views">>, ?uint2bin(0)}
+ ],
+ lists:foreach(fun
+ ({P, K, V}) ->
+ Key = erlfdb_tuple:pack({P, K}, DbPrefix),
+ erlfdb:set(Tx, Key, V);
+ ({P, S, K, V}) ->
+ Key = erlfdb_tuple:pack({P, S, K}, DbPrefix),
+ erlfdb:set(Tx, Key, V)
+ end, Defaults),
+
+ UserCtx = fabric2_util:get_value(user_ctx, Options, #user_ctx{}),
+ Options1 = lists:keydelete(user_ctx, 1, Options),
+
+ Db2 = Db1#{
+ uuid => UUID,
+ db_prefix => DbPrefix,
+ db_version => DbVersion,
+
+ revs_limit => 1000,
+ security_doc => {[]},
+ user_ctx => UserCtx,
+ check_current_ts => erlang:monotonic_time(millisecond),
+
+ validate_doc_update_funs => [],
+ before_doc_update => undefined,
+ after_doc_read => undefined,
+ % All other db things as we add features,
+
+ db_options => Options1,
+ interactive => false
+ },
+ aegis:init_db(Db2, Options).
+
+
+open(#{} = Db0, Options) ->
+ #{
+ name := DbName,
+ tx := Tx,
+ layer_prefix := LayerPrefix
+ } = Db1 = ensure_current(Db0, false),
+
+ DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
+ DbPrefix = case erlfdb:wait(erlfdb:get(Tx, DbKey)) of
+ Bin when is_binary(Bin) -> Bin;
+ not_found -> erlang:error(database_does_not_exist)
+ end,
+
+ DbVersionKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
+ DbVersion = erlfdb:wait(erlfdb:get(Tx, DbVersionKey)),
+
+ UserCtx = fabric2_util:get_value(user_ctx, Options, #user_ctx{}),
+ Options1 = lists:keydelete(user_ctx, 1, Options),
+
+ UUID = fabric2_util:get_value(uuid, Options1),
+ Options2 = lists:keydelete(uuid, 1, Options1),
+
+ Interactive = fabric2_util:get_value(interactive, Options2, false),
+ Options3 = lists:keydelete(interactive, 1, Options2),
+
+ Db2 = Db1#{
+ db_prefix => DbPrefix,
+ db_version => DbVersion,
+
+ uuid => <<>>,
+ revs_limit => 1000,
+ security_doc => {[]},
+
+ user_ctx => UserCtx,
+ check_current_ts => erlang:monotonic_time(millisecond),
+
+ % Place holders until we implement these
+ % bits.
+ validate_doc_update_funs => [],
+ before_doc_update => undefined,
+ after_doc_read => undefined,
+
+ db_options => Options3,
+ interactive => Interactive
+ },
+
+ Db3 = load_config(Db2),
+ Db4 = aegis:open_db(Db3),
+
+ case {UUID, Db4} of
+ {undefined, _} -> ok;
+ {<<_/binary>>, #{uuid := UUID}} -> ok;
+ {<<_/binary>>, #{uuid := _}} -> erlang:error(database_does_not_exist)
+ end,
+
+ load_validate_doc_funs(Db4).
+
+
+% Match on `name` in the function head since some non-fabric2 db
+% objects might not have names and so they don't get cached
+refresh(#{tx := undefined, name := DbName} = Db) ->
+ #{
+ uuid := UUID,
+ md_version := OldVer
+ } = Db,
+
+ case fabric2_server:fetch(DbName, UUID) of
+ % Relying on these assumptions about the `md_version` value:
+ % - It is bumped every time `db_version` is bumped
+ % - Is a versionstamp, so we can check which one is newer
+ % - If it is `not_found`, it would sort less than a binary value
+ #{md_version := Ver} = Db1 when Ver > OldVer ->
+ Db1#{
+ user_ctx := maps:get(user_ctx, Db),
+ security_fun := maps:get(security_fun, Db),
+ interactive := maps:get(interactive, Db)
+ };
+ _ ->
+ Db
+ end;
+
+refresh(#{} = Db) ->
+ Db.
+
+
+
+reopen(#{} = OldDb) ->
+ require_transaction(OldDb),
+ #{
+ tx := Tx,
+ name := DbName,
+ uuid := UUID,
+ db_options := Options,
+ user_ctx := UserCtx,
+ security_fun := SecurityFun,
+ interactive := Interactive
+ } = OldDb,
+ Options1 = lists:keystore(user_ctx, 1, Options, {user_ctx, UserCtx}),
+ NewDb = open(init_db(Tx, DbName, Options1), Options1),
+
+ % Check if database was re-created
+ case {Interactive, maps:get(uuid, NewDb)} of
+ {true, _} -> ok;
+ {false, UUID} -> ok;
+ {false, _OtherUUID} -> error(database_does_not_exist)
+ end,
+
+ NewDb#{security_fun := SecurityFun, interactive := Interactive}.
+
+
+delete(#{} = Db) ->
+ DoRecovery = fabric2_util:do_recovery(),
+ case DoRecovery of
+ true -> soft_delete_db(Db);
+ false -> hard_delete_db(Db)
+ end.
+
+
+undelete(#{} = Db0, TgtDbName, TimeStamp) ->
+ #{
+ name := DbName,
+ tx := Tx,
+ layer_prefix := LayerPrefix
+ } = ensure_current(Db0, false),
+ DbKey = erlfdb_tuple:pack({?ALL_DBS, TgtDbName}, LayerPrefix),
+ case erlfdb:wait(erlfdb:get(Tx, DbKey)) of
+ Bin when is_binary(Bin) ->
+ file_exists;
+ not_found ->
+ DeletedDbTupleKey = {
+ ?DELETED_DBS,
+ DbName,
+ TimeStamp
+ },
+ DeleteDbKey = erlfdb_tuple:pack(DeletedDbTupleKey, LayerPrefix),
+ case erlfdb:wait(erlfdb:get(Tx, DeleteDbKey)) of
+ not_found ->
+ not_found;
+ DbPrefix ->
+ erlfdb:set(Tx, DbKey, DbPrefix),
+ erlfdb:clear(Tx, DeleteDbKey),
+ bump_db_version(#{
+ tx => Tx,
+ db_prefix => DbPrefix
+ }),
+ ok
+ end
+ end.
+
+
+remove_deleted_db(#{} = Db0, TimeStamp) ->
+ #{
+ name := DbName,
+ tx := Tx,
+ layer_prefix := LayerPrefix
+ } = ensure_current(Db0, false),
+
+ DeletedDbTupleKey = {
+ ?DELETED_DBS,
+ DbName,
+ TimeStamp
+ },
+ DeletedDbKey = erlfdb_tuple:pack(DeletedDbTupleKey, LayerPrefix),
+ case erlfdb:wait(erlfdb:get(Tx, DeletedDbKey)) of
+ not_found ->
+ not_found;
+ DbPrefix ->
+ erlfdb:clear(Tx, DeletedDbKey),
+ erlfdb:clear_range_startswith(Tx, DbPrefix),
+ bump_db_version(#{
+ tx => Tx,
+ db_prefix => DbPrefix
+ }),
+ ok
+ end.
+
+
+exists(#{name := DbName} = Db) when is_binary(DbName) ->
+ #{
+ tx := Tx,
+ layer_prefix := LayerPrefix
+ } = ensure_current(Db, false),
+
+ DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
+ case erlfdb:wait(erlfdb:get(Tx, DbKey)) of
+ Bin when is_binary(Bin) -> true;
+ not_found -> false
+ end.
+
+
+get_dir(Tx) ->
+ Root = erlfdb_directory:root(),
+ Dir = fabric2_server:fdb_directory(),
+ CouchDB = erlfdb_directory:create_or_open(Tx, Root, Dir),
+ erlfdb_directory:get_name(CouchDB).
+
+
+list_dbs(Tx, Callback, AccIn, Options0) ->
+ Options = case fabric2_util:get_value(restart_tx, Options0) of
+ undefined -> [{restart_tx, true} | Options0];
+ _AlreadySet -> Options0
+ end,
+ LayerPrefix = get_dir(Tx),
+ Prefix = erlfdb_tuple:pack({?ALL_DBS}, LayerPrefix),
+ fold_range({tx, Tx}, Prefix, fun({K, _V}, Acc) ->
+ {DbName} = erlfdb_tuple:unpack(K, Prefix),
+ Callback(DbName, Acc)
+ end, AccIn, Options).
+
+
+list_dbs_info(Tx, Callback, AccIn, Options0) ->
+ Options = case fabric2_util:get_value(restart_tx, Options0) of
+ undefined -> [{restart_tx, true} | Options0];
+ _AlreadySet -> Options0
+ end,
+ LayerPrefix = get_dir(Tx),
+ Prefix = erlfdb_tuple:pack({?ALL_DBS}, LayerPrefix),
+ fold_range({tx, Tx}, Prefix, fun({DbNameKey, DbPrefix}, Acc) ->
+ {DbName} = erlfdb_tuple:unpack(DbNameKey, Prefix),
+ InfoFuture = get_info_future(Tx, DbPrefix),
+ Callback(DbName, InfoFuture, Acc)
+ end, AccIn, Options).
+
+
+list_deleted_dbs_info(Tx, Callback, AccIn, Options0) ->
+ Options = case fabric2_util:get_value(restart_tx, Options0) of
+ undefined -> [{restart_tx, true} | Options0];
+ _AlreadySet -> Options0
+ end,
+ LayerPrefix = get_dir(Tx),
+ Prefix = erlfdb_tuple:pack({?DELETED_DBS}, LayerPrefix),
+ fold_range({tx, Tx}, Prefix, fun({DbKey, DbPrefix}, Acc) ->
+ {DbName, TimeStamp} = erlfdb_tuple:unpack(DbKey, Prefix),
+ InfoFuture = get_info_future(Tx, DbPrefix),
+ Callback(DbName, TimeStamp, InfoFuture, Acc)
+ end, AccIn, Options).
+
+
+get_info(#{} = Db) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+ get_info_wait(get_info_future(Tx, DbPrefix)).
+
+
+get_info_future(Tx, DbPrefix) ->
+ {CStart, CEnd} = erlfdb_tuple:range({?DB_CHANGES}, DbPrefix),
+ ChangesFuture = erlfdb:get_range(Tx, CStart, CEnd, [
+ {streaming_mode, exact},
+ {limit, 1},
+ {reverse, true}
+ ]),
+
+ UUIDKey = erlfdb_tuple:pack({?DB_CONFIG, <<"uuid">>}, DbPrefix),
+ UUIDFuture = erlfdb:get(Tx, UUIDKey),
+
+ StatsPrefix = erlfdb_tuple:pack({?DB_STATS}, DbPrefix),
+ MetaFuture = erlfdb:get_range_startswith(Tx, StatsPrefix),
+
+ % Save the tx object only if it's read-only as we might retry to get the
+ % future again after the tx was reset
+ SaveTx = case erlfdb:get_writes_allowed(Tx) of
+ true -> undefined;
+ false -> Tx
+ end,
+
+ #info_future{
+ tx = SaveTx,
+ db_prefix = DbPrefix,
+ changes_future = ChangesFuture,
+ meta_future = MetaFuture,
+ uuid_future = UUIDFuture
+ }.
+
+
+get_info_wait(#info_future{tx = Tx, retries = Retries} = Future)
+ when Tx =:= undefined orelse Retries >= 2 ->
+ get_info_wait_int(Future);
+
+get_info_wait(#info_future{tx = Tx, retries = Retries} = Future) ->
+ try
+ get_info_wait_int(Future)
+ catch
+ error:{erlfdb_error, ?TRANSACTION_CANCELLED} ->
+ Future1 = get_info_future(Tx, Future#info_future.db_prefix),
+ get_info_wait(Future1#info_future{retries = Retries + 1});
+ error:{erlfdb_error, ?TRANSACTION_TOO_OLD} ->
+ ok = erlfdb:reset(Tx),
+ Future1 = get_info_future(Tx, Future#info_future.db_prefix),
+ get_info_wait(Future1#info_future{retries = Retries + 1})
+ end.
+
+
+load_config(#{} = Db) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = Db,
+
+ {Start, End} = erlfdb_tuple:range({?DB_CONFIG}, DbPrefix),
+ Future = erlfdb:get_range(Tx, Start, End),
+
+ lists:foldl(fun({K, V}, DbAcc) ->
+ {?DB_CONFIG, Key} = erlfdb_tuple:unpack(K, DbPrefix),
+ case Key of
+ <<"uuid">> -> DbAcc#{uuid := V};
+ <<"revs_limit">> -> DbAcc#{revs_limit := ?bin2uint(V)};
+ <<"security_doc">> -> DbAcc#{security_doc := ?JSON_DECODE(V)}
+ end
+ end, Db, erlfdb:wait(Future)).
+
+
+set_config(#{} = Db0, Key, Val) when is_atom(Key) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = Db = ensure_current(Db0),
+ {BinKey, BinVal} = case Key of
+ uuid -> {<<"uuid">>, Val};
+ revs_limit -> {<<"revs_limit">>, ?uint2bin(max(1, Val))};
+ security_doc -> {<<"security_doc">>, ?JSON_ENCODE(Val)}
+ end,
+ DbKey = erlfdb_tuple:pack({?DB_CONFIG, BinKey}, DbPrefix),
+ erlfdb:set(Tx, DbKey, BinVal),
+ {ok, DbVersion} = bump_db_version(Db),
+ {ok, Db#{db_version := DbVersion, Key := Val}}.
+
+
+get_stat(#{} = Db, StatKey) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+
+ Key = erlfdb_tuple:pack({?DB_STATS, StatKey}, DbPrefix),
+
+ % Might need to figure out some sort of type
+ % system here. Uints are because stats are all
+ % atomic op adds for the moment.
+ ?bin2uint(erlfdb:wait(erlfdb:get(Tx, Key))).
+
+
+incr_stat(_Db, _StatKey, 0) ->
+ ok;
+
+incr_stat(#{} = Db, StatKey, Increment) when is_integer(Increment) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+
+ Key = erlfdb_tuple:pack({?DB_STATS, StatKey}, DbPrefix),
+ erlfdb:add(Tx, Key, Increment).
+
+
+incr_stat(_Db, _Section, _Key, 0) ->
+ ok;
+
+incr_stat(#{} = Db, Section, Key, Increment) when is_integer(Increment) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+
+ BinKey = erlfdb_tuple:pack({?DB_STATS, Section, Key}, DbPrefix),
+ erlfdb:add(Tx, BinKey, Increment).
+
+
+get_all_revs(#{} = Db, DocId) ->
+ DbName = maps:get(name, Db, undefined),
+ with_span('db.get_all_revs', #{'db.name' => DbName, 'doc.id' => DocId}, fun() ->
+ Future = get_all_revs_future(Db, DocId),
+ get_revs_wait(Db, Future)
+ end).
+
+
+get_all_revs_future(#{} = Db, DocId) ->
+ Options = [{streaming_mode, want_all}],
+ get_revs_future(Db, DocId, Options).
+
+
+get_winning_revs(Db, DocId, NumRevs) ->
+ DbName = maps:get(name, Db, undefined),
+ with_span('db.get_winning_revs', #{'db.name' => DbName, 'doc.id' => DocId}, fun() ->
+ Future = get_winning_revs_future(Db, DocId, NumRevs),
+ get_revs_wait(Db, Future)
+ end).
+
+
+get_winning_revs_future(#{} = Db, DocId, NumRevs) ->
+ Options = [{reverse, true}, {limit, NumRevs}],
+ get_revs_future(Db, DocId, Options).
+
+
+get_revs_future(#{} = Db, DocId, Options) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+
+ {StartKey, EndKey} = erlfdb_tuple:range({?DB_REVS, DocId}, DbPrefix),
+ erlfdb:fold_range_future(Tx, StartKey, EndKey, Options).
+
+
+get_revs_wait(#{} = Db, RangeFuture) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+
+ RevRows = erlfdb:fold_range_wait(Tx, RangeFuture, fun({K, V}, Acc) ->
+ Key = erlfdb_tuple:unpack(K, DbPrefix),
+ Val = erlfdb_tuple:unpack(V),
+ [fdb_to_revinfo(Key, Val) | Acc]
+ end, []),
+ lists:reverse(RevRows).
+
+
+get_non_deleted_rev(#{} = Db, DocId, RevId) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+
+ {RevPos, Rev} = RevId,
+
+ BaseKey = {?DB_REVS, DocId, true, RevPos, Rev},
+ Key = erlfdb_tuple:pack(BaseKey, DbPrefix),
+ case erlfdb:wait(erlfdb:get(Tx, Key)) of
+ not_found ->
+ not_found;
+ Val ->
+ fdb_to_revinfo(BaseKey, erlfdb_tuple:unpack(Val))
+ end.
+
+
+get_doc_body(Db, DocId, RevInfo) ->
+ DbName = maps:get(name, Db, undefined),
+ with_span('db.get_doc_body', #{'db.name' => DbName, 'doc.id' => DocId}, fun() ->
+ Future = get_doc_body_future(Db, DocId, RevInfo),
+ get_doc_body_wait(Db, DocId, RevInfo, Future)
+ end).
+
+
+get_doc_body_future(#{} = Db, DocId, RevInfo) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+
+ #{
+ rev_id := {RevPos, Rev}
+ } = RevInfo,
+
+ Key = {?DB_DOCS, DocId, RevPos, Rev},
+ {StartKey, EndKey} = erlfdb_tuple:range(Key, DbPrefix),
+ erlfdb:fold_range_future(Tx, StartKey, EndKey, []).
+
+
+get_doc_body_wait(#{} = Db0, DocId, RevInfo, Future) ->
+ #{
+ tx := Tx
+ } = Db = ensure_current(Db0),
+
+ #{
+ rev_id := {RevPos, Rev},
+ rev_path := RevPath
+ } = RevInfo,
+
+ FoldFun = aegis:wrap_fold_fun(Db, fun({_K, V}, Acc) ->
+ [V | Acc]
+ end),
+ RevBodyRows = erlfdb:fold_range_wait(Tx, Future, FoldFun, []),
+ BodyRows = lists:reverse(RevBodyRows),
+
+ fdb_to_doc(Db, DocId, RevPos, [Rev | RevPath], BodyRows).
+
+
+get_local_doc_rev_future(Db, DocId) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+
+ Key = erlfdb_tuple:pack({?DB_LOCAL_DOCS, DocId}, DbPrefix),
+ erlfdb:get(Tx, Key).
+
+
+get_local_doc_rev_wait(Future) ->
+ erlfdb:wait(Future).
+
+
+get_local_doc_body_future(#{} = Db, DocId, _Rev) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+
+ Prefix = erlfdb_tuple:pack({?DB_LOCAL_DOC_BODIES, DocId}, DbPrefix),
+ erlfdb:get_range_startswith(Tx, Prefix).
+
+
+get_local_doc_body_wait(#{} = Db0, DocId, Rev, Future) ->
+ Db = ensure_current(Db0),
+
+ {_, Chunks} = lists:unzip(aegis:decrypt(Db, erlfdb:wait(Future))),
+ fdb_to_local_doc(Db, DocId, Rev, Chunks).
+
+
+get_local_doc(#{} = Db, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId) ->
+ RevFuture = get_local_doc_rev_future(Db, DocId),
+ Rev = get_local_doc_rev_wait(RevFuture),
+
+ BodyFuture = get_local_doc_body_future(Db, DocId, Rev),
+ get_local_doc_body_wait(Db, DocId, Rev, BodyFuture).
+
+
+get_local_doc_rev(_Db0, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId, Val) ->
+ case Val of
+ <<255, RevBin/binary>> ->
+ % Versioned local docs
+ try
+ case erlfdb_tuple:unpack(RevBin) of
+ {?CURR_LDOC_FORMAT, Rev, _Size} -> Rev
+ end
+ catch _:_ ->
+ erlang:error({invalid_local_doc_rev, DocId, Val})
+ end;
+ <<131, _/binary>> ->
+ % Compatibility clause for an older encoding format
+ try binary_to_term(Val, [safe]) of
+ {Rev, _} -> Rev;
+ _ -> erlang:error({invalid_local_doc_rev, DocId, Val})
+ catch
+ error:badarg ->
+ erlang:error({invalid_local_doc_rev, DocId, Val})
+ end;
+ <<_/binary>> ->
+ try binary_to_integer(Val) of
+ IntVal when IntVal >= 0 ->
+ Val;
+ _ ->
+ erlang:error({invalid_local_doc_rev, DocId, Val})
+ catch
+ error:badarg ->
+ erlang:error({invalid_local_doc_rev, DocId, Val})
+ end
+ end.
+
+
+write_doc(#{} = Db0, Doc, NewWinner0, OldWinner, ToUpdate, ToRemove) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = Db = ensure_current(Db0),
+
+ #doc{
+ id = DocId,
+ deleted = Deleted,
+ atts = Atts
+ } = Doc,
+
+ % Doc body
+
+ ok = write_doc_body(Db, Doc),
+
+ % Attachment bookkeeping
+
+ % If a document's attachments have changed we have to scan
+ % for any attachments that may need to be deleted. The check
+ % for `>= 2` is a bit subtle. The important point is that
+ % one of the revisions will be from the new document so we
+ % have to find at least one more beyond that to assert that
+ % the attachments have not changed.
+ AttHash = fabric2_util:hash_atts(Atts),
+ RevsToCheck = [NewWinner0] ++ ToUpdate ++ ToRemove,
+ AttHashCount = lists:foldl(fun(Att, Count) ->
+ #{att_hash := RevAttHash} = Att,
+ case RevAttHash == AttHash of
+ true -> Count + 1;
+ false -> Count
+ end
+ end, 0, RevsToCheck),
+ if
+ AttHashCount == length(RevsToCheck) ->
+ ok;
+ AttHashCount >= 2 ->
+ ok;
+ true ->
+ cleanup_attachments(Db, DocId, Doc, ToRemove)
+ end,
+
+ % Revision tree
+
+ NewWinner = NewWinner0#{
+ winner := true
+ },
+ NewRevId = maps:get(rev_id, NewWinner),
+
+ {WKey, WVal, WinnerVS} = revinfo_to_fdb(Tx, DbPrefix, DocId, NewWinner),
+ ok = erlfdb:set_versionstamped_value(Tx, WKey, WVal),
+
+ lists:foreach(fun(RI0) ->
+ RI = RI0#{winner := false},
+ {K, V, undefined} = revinfo_to_fdb(Tx, DbPrefix, DocId, RI),
+ ok = erlfdb:set(Tx, K, V)
+ end, ToUpdate),
+
+ lists:foreach(fun(RI0) ->
+ RI = RI0#{winner := false},
+ {K, _, undefined} = revinfo_to_fdb(Tx, DbPrefix, DocId, RI),
+ ok = erlfdb:clear(Tx, K),
+ ok = clear_doc_body(Db, DocId, RI0)
+ end, ToRemove),
+
+ % _all_docs
+
+ UpdateStatus = case {OldWinner, NewWinner} of
+ {not_found, #{deleted := false}} ->
+ created;
+ {not_found, #{deleted := true}} ->
+ replicate_deleted;
+ {#{deleted := true}, #{deleted := false}} ->
+ recreated;
+ {#{deleted := false}, #{deleted := false}} ->
+ updated;
+ {#{deleted := false}, #{deleted := true}} ->
+ deleted;
+ {#{deleted := true}, #{deleted := true}} ->
+ ignore
+ end,
+
+ case UpdateStatus of
+ replicate_deleted ->
+ ok;
+ ignore ->
+ ok;
+ deleted ->
+ ADKey = erlfdb_tuple:pack({?DB_ALL_DOCS, DocId}, DbPrefix),
+ ok = erlfdb:clear(Tx, ADKey);
+ _ ->
+ ADKey = erlfdb_tuple:pack({?DB_ALL_DOCS, DocId}, DbPrefix),
+ ADVal = erlfdb_tuple:pack(NewRevId),
+ ok = erlfdb:set(Tx, ADKey, ADVal)
+ end,
+
+ % _changes
+
+ if OldWinner == not_found -> ok; true ->
+ OldSeq = maps:get(sequence, OldWinner),
+ OldSeqKey = erlfdb_tuple:pack({?DB_CHANGES, OldSeq}, DbPrefix),
+ erlfdb:clear(Tx, OldSeqKey)
+ end,
+
+ NewSeqKey = erlfdb_tuple:pack_vs({?DB_CHANGES, WinnerVS}, DbPrefix),
+ NewSeqVal = erlfdb_tuple:pack({DocId, Deleted, NewRevId}),
+ erlfdb:set_versionstamped_key(Tx, NewSeqKey, NewSeqVal),
+
+ % Bump db version on design doc changes
+
+ IsDDoc = case Doc#doc.id of
+ <<?DESIGN_DOC_PREFIX, _/binary>> -> true;
+ _ -> false
+ end,
+
+ if not IsDDoc -> ok; true ->
+ bump_db_version(Db)
+ end,
+
+ % Update our document counts
+
+ case UpdateStatus of
+ created ->
+ if not IsDDoc -> ok; true ->
+ incr_stat(Db, <<"doc_design_count">>, 1)
+ end,
+ incr_stat(Db, <<"doc_count">>, 1);
+ recreated ->
+ if not IsDDoc -> ok; true ->
+ incr_stat(Db, <<"doc_design_count">>, 1)
+ end,
+ incr_stat(Db, <<"doc_count">>, 1),
+ incr_stat(Db, <<"doc_del_count">>, -1);
+ replicate_deleted ->
+ incr_stat(Db, <<"doc_del_count">>, 1);
+ ignore ->
+ ok;
+ deleted ->
+ if not IsDDoc -> ok; true ->
+ incr_stat(Db, <<"doc_design_count">>, -1)
+ end,
+ incr_stat(Db, <<"doc_count">>, -1),
+ incr_stat(Db, <<"doc_del_count">>, 1);
+ updated ->
+ ok
+ end,
+
+ fabric2_db_plugin:after_doc_write(Db, Doc, NewWinner, OldWinner,
+ NewRevId, WinnerVS),
+
+ % Update database size
+ AddSize = sum_add_rev_sizes([NewWinner | ToUpdate]),
+ RemSize = sum_rem_rev_sizes(ToRemove),
+ incr_stat(Db, <<"sizes">>, <<"external">>, AddSize - RemSize),
+
+ ok.
+
+
+write_local_doc(#{} = Db0, Doc) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = Db = ensure_current(Db0),
+
+ Id = Doc#doc.id,
+
+ {LDocKey, LDocVal, NewSize, Rows} = local_doc_to_fdb(Db, Doc),
+
+ {WasDeleted, PrevSize} = case erlfdb:wait(erlfdb:get(Tx, LDocKey)) of
+ <<255, RevBin/binary>> ->
+ case erlfdb_tuple:unpack(RevBin) of
+ {?CURR_LDOC_FORMAT, _Rev, Size} ->
+ {false, Size}
+ end;
+ <<_/binary>> ->
+ {false, 0};
+ not_found ->
+ {true, 0}
+ end,
+
+ BPrefix = erlfdb_tuple:pack({?DB_LOCAL_DOC_BODIES, Id}, DbPrefix),
+
+ case Doc#doc.deleted of
+ true ->
+ erlfdb:clear(Tx, LDocKey),
+ erlfdb:clear_range_startswith(Tx, BPrefix);
+ false ->
+ erlfdb:set(Tx, LDocKey, LDocVal),
+ % Make sure to clear the whole range, in case there was a larger
+ % document body there before.
+ erlfdb:clear_range_startswith(Tx, BPrefix),
+ lists:foreach(fun({K, V}) ->
+ erlfdb:set(Tx, K, aegis:encrypt(Db, K, V))
+ end, Rows)
+ end,
+
+ case {WasDeleted, Doc#doc.deleted} of
+ {true, false} ->
+ incr_stat(Db, <<"doc_local_count">>, 1);
+ {false, true} ->
+ incr_stat(Db, <<"doc_local_count">>, -1);
+ _ ->
+ ok
+ end,
+
+ incr_stat(Db, <<"sizes">>, <<"external">>, NewSize - PrevSize),
+
+ ok.
+
+
+read_attachment(#{} = Db, DocId, AttId) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+
+ AttKey = erlfdb_tuple:pack({?DB_ATTS, DocId, AttId}, DbPrefix),
+ Data = case erlfdb:wait(erlfdb:get_range_startswith(Tx, AttKey)) of
+ not_found ->
+ throw({not_found, missing});
+ KVs ->
+ {_, Chunks} = lists:unzip(aegis:decrypt(Db, KVs)),
+ iolist_to_binary(Chunks)
+ end,
+
+ IdKey = erlfdb_tuple:pack({?DB_ATT_NAMES, DocId, AttId}, DbPrefix),
+ case erlfdb:wait(erlfdb:get(Tx, IdKey)) of
+ <<>> ->
+ Data; % Old format, before CURR_ATT_STORAGE_VER = 0
+ <<_/binary>> = InfoBin ->
+ {?CURR_ATT_STORAGE_VER, Compressed} = erlfdb_tuple:unpack(InfoBin),
+ case Compressed of
+ true -> binary_to_term(Data, [safe]);
+ false -> Data
+ end
+ end.
+
+
+write_attachment(#{} = Db, DocId, Data, Encoding)
+ when is_binary(Data), is_atom(Encoding) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+
+ AttId = fabric2_util:uuid(),
+
+ {Data1, Compressed} = case Encoding of
+ gzip ->
+ {Data, false};
+ _ ->
+ Opts = [{minor_version, 1}, {compressed, 6}],
+ CompressedData = term_to_binary(Data, Opts),
+ case size(CompressedData) < Data of
+ true -> {CompressedData, true};
+ false -> {Data, false}
+ end
+ end,
+
+ IdKey = erlfdb_tuple:pack({?DB_ATT_NAMES, DocId, AttId}, DbPrefix),
+ InfoVal = erlfdb_tuple:pack({?CURR_ATT_STORAGE_VER, Compressed}),
+ ok = erlfdb:set(Tx, IdKey, InfoVal),
+
+ Chunks = chunkify_binary(Data1),
+
+ lists:foldl(fun(Chunk, ChunkId) ->
+ AttKey = erlfdb_tuple:pack({?DB_ATTS, DocId, AttId, ChunkId}, DbPrefix),
+ ok = erlfdb:set(Tx, AttKey, aegis:encrypt(Db, AttKey, Chunk)),
+ ChunkId + 1
+ end, 0, Chunks),
+ {ok, AttId}.
+
+
+get_last_change(#{} = Db) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+
+ {Start, End} = erlfdb_tuple:range({?DB_CHANGES}, DbPrefix),
+ Options = [{limit, 1}, {reverse, true}],
+ case erlfdb:get_range(Tx, Start, End, Options) of
+ [] ->
+ vs_to_seq(fabric2_util:seq_zero_vs());
+ [{K, _V}] ->
+ {?DB_CHANGES, SeqVS} = erlfdb_tuple:unpack(K, DbPrefix),
+ vs_to_seq(SeqVS)
+ end.
+
+
+fold_range(TxOrDb, RangePrefix, UserFun, UserAcc, Options) ->
+ {Db, Tx} = case TxOrDb of
+ {tx, TxObj} ->
+ {undefined, TxObj};
+ #{} = DbObj ->
+ DbObj1 = #{tx := TxObj} = ensure_current(DbObj),
+ {DbObj1, TxObj}
+ end,
+ % FoundationDB treats a limit 0 of as unlimited so we guard against it
+ case fabric2_util:get_value(limit, Options) of 0 -> UserAcc; _ ->
+ FAcc = get_fold_acc(Db, RangePrefix, UserFun, UserAcc, Options),
+ try
+ fold_range(Tx, FAcc)
+ after
+ erase(?PDICT_FOLD_ACC_STATE)
+ end
+ end.
+
+
+fold_range(Tx, FAcc) ->
+ #fold_acc{
+ start_key = Start,
+ end_key = End,
+ limit = Limit,
+ base_opts = BaseOpts,
+ restart_tx = DoRestart
+ } = FAcc,
+ case DoRestart of false -> ok; true ->
+ ok = erlfdb:set_option(Tx, disallow_writes)
+ end,
+ Opts = [{limit, Limit} | BaseOpts],
+ Callback = fun fold_range_cb/2,
+ try
+ #fold_acc{
+ user_acc = FinalUserAcc
+ } = erlfdb:fold_range(Tx, Start, End, Callback, FAcc, Opts),
+ FinalUserAcc
+ catch error:{erlfdb_error, ?TRANSACTION_TOO_OLD} when DoRestart ->
+ % Possibly handle cluster_version_changed and future_version as well to
+ % continue iteration instead fallback to transactional and retrying
+ % from the beginning which is bound to fail when streaming data out to a
+ % socket.
+ fold_range(Tx, restart_fold(Tx, FAcc))
+ end.
+
+
+vs_to_seq(VS) when is_tuple(VS) ->
+ % 51 is the versionstamp type tag
+ <<51:8, SeqBin:12/binary>> = erlfdb_tuple:pack({VS}),
+ fabric2_util:to_hex(SeqBin).
+
+
+seq_to_vs(Seq) when is_binary(Seq) ->
+ Seq1 = fabric2_util:from_hex(Seq),
+ % 51 is the versionstamp type tag
+ Seq2 = <<51:8, Seq1/binary>>,
+ {VS} = erlfdb_tuple:unpack(Seq2),
+ VS.
+
+
+next_vs({versionstamp, VS, Batch, TxId}) ->
+ {V, B, T} = case TxId =< 65535 of
+ true ->
+ {VS, Batch, TxId + 1};
+ false ->
+ case Batch =< 65535 of
+ true ->
+ {VS, Batch + 1, 0};
+ false ->
+ {VS + 1, 0, 0}
+ end
+ end,
+ {versionstamp, V, B, T}.
+
+
+new_versionstamp(Tx) ->
+ TxId = erlfdb:get_next_tx_id(Tx),
+ {versionstamp, 16#FFFFFFFFFFFFFFFF, 16#FFFF, TxId}.
+
+
+get_approximate_tx_size(#{} = TxDb) ->
+ require_transaction(TxDb),
+ #{tx := Tx} = TxDb,
+ erlfdb:wait(erlfdb:get_approximate_size(Tx)).
+
+
+debug_cluster() ->
+ debug_cluster(<<>>, <<16#FE, 16#FF, 16#FF>>).
+
+
+debug_cluster(Start, End) ->
+ transactional(fun(Tx) ->
+ lists:foreach(fun({Key, Val}) ->
+ io:format(standard_error, "~s => ~s~n", [
+ string:pad(erlfdb_util:repr(Key), 60),
+ erlfdb_util:repr(Val)
+ ])
+ end, erlfdb:get_range(Tx, Start, End))
+ end).
+
+
+init_db(Tx, DbName, Options) ->
+ Prefix = get_dir(Tx),
+ Version = erlfdb:wait(erlfdb:get(Tx, ?METADATA_VERSION_KEY)),
+ #{
+ name => DbName,
+ tx => Tx,
+ layer_prefix => Prefix,
+ md_version => Version,
+
+ security_fun => undefined,
+ db_options => Options
+ }.
+
+
+load_validate_doc_funs(#{} = Db) ->
+ FoldFun = fun
+ ({row, Row}, Acc) ->
+ DDocInfo = #{id => fabric2_util:get_value(id, Row)},
+ {ok, [DDocInfo | Acc]};
+ (_, Acc) ->
+ {ok, Acc}
+ end,
+
+ Options = [
+ {start_key, <<"_design/">>},
+ {end_key, <<"_design0">>}
+ ],
+
+ {ok, Infos1} = fabric2_db:fold_docs(Db, FoldFun, [], Options),
+
+ Infos2 = lists:map(fun(Info) ->
+ #{
+ id := DDocId = <<"_design/", _/binary>>
+ } = Info,
+ Info#{
+ rev_info => get_winning_revs_future(Db, DDocId, 1)
+ }
+ end, Infos1),
+
+ Infos3 = lists:flatmap(fun(Info) ->
+ #{
+ id := DDocId,
+ rev_info := RevInfoFuture
+ } = Info,
+ [RevInfo] = get_revs_wait(Db, RevInfoFuture),
+ #{deleted := Deleted} = RevInfo,
+ if Deleted -> []; true ->
+ [Info#{
+ rev_info := RevInfo,
+ body => get_doc_body_future(Db, DDocId, RevInfo)
+ }]
+ end
+ end, Infos2),
+
+ VDUs = lists:flatmap(fun(Info) ->
+ #{
+ id := DDocId,
+ rev_info := RevInfo,
+ body := BodyFuture
+ } = Info,
+ #doc{} = Doc = get_doc_body_wait(Db, DDocId, RevInfo, BodyFuture),
+ case couch_doc:get_validate_doc_fun(Doc) of
+ nil -> [];
+ Fun -> [Fun]
+ end
+ end, Infos3),
+
+ Db#{
+ validate_doc_update_funs := VDUs
+ }.
+
+
+bump_metadata_version(Tx) ->
+ % The 14 zero bytes is pulled from the PR for adding the
+ % metadata version key. Not sure why 14 bytes when version
+ % stamps are only 80, but whatever for now.
+ erlfdb:set_versionstamped_value(Tx, ?METADATA_VERSION_KEY, <<0:112>>).
+
+
+check_metadata_version(#{} = Db) ->
+ #{
+ tx := Tx,
+ md_version := Version
+ } = Db,
+
+ AlreadyChecked = get(?PDICT_CHECKED_MD_IS_CURRENT),
+ if AlreadyChecked == true -> {current, Db}; true ->
+ case erlfdb:wait(erlfdb:get_ss(Tx, ?METADATA_VERSION_KEY)) of
+ Version ->
+ put(?PDICT_CHECKED_MD_IS_CURRENT, true),
+ % We want to set a read conflict on the db version as we'd want
+ % to conflict with any writes to this particular db. However
+ % during db creation db prefix might not exist yet so we don't
+ % add a read-conflict on it then.
+ case maps:get(db_prefix, Db, not_found) of
+ not_found ->
+ ok;
+ <<_/binary>> = DbPrefix ->
+ DbVerKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
+ erlfdb:add_read_conflict_key(Tx, DbVerKey)
+ end,
+ {current, Db};
+ NewVersion ->
+ {stale, Db#{md_version := NewVersion}}
+ end
+ end.
+
+
+bump_db_version(#{} = Db) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = Db,
+
+ DbVersionKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
+ DbVersion = fabric2_util:uuid(),
+ ok = erlfdb:set(Tx, DbVersionKey, DbVersion),
+ ok = bump_metadata_version(Tx),
+ {ok, DbVersion}.
+
+
+check_db_version(#{} = Db, CheckDbVersion) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix,
+ db_version := DbVersion
+ } = Db,
+
+ AlreadyChecked = get(?PDICT_CHECKED_DB_IS_CURRENT),
+ if not CheckDbVersion orelse AlreadyChecked == true -> current; true ->
+ DbVersionKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
+ case erlfdb:wait(erlfdb:get(Tx, DbVersionKey)) of
+ DbVersion ->
+ put(?PDICT_CHECKED_DB_IS_CURRENT, true),
+ current;
+ _NewDBVersion ->
+ stale
+ end
+ end.
+
+
+soft_delete_db(Db) ->
+ #{
+ name := DbName,
+ tx := Tx,
+ layer_prefix := LayerPrefix,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+
+ DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
+ Timestamp = list_to_binary(fabric2_util:iso8601_timestamp()),
+ DeletedDbKeyTuple = {?DELETED_DBS, DbName, Timestamp},
+ DeletedDbKey = erlfdb_tuple:pack(DeletedDbKeyTuple, LayerPrefix),
+ case erlfdb:wait(erlfdb:get(Tx, DeletedDbKey)) of
+ not_found ->
+ erlfdb:set(Tx, DeletedDbKey, DbPrefix),
+ erlfdb:clear(Tx, DbKey),
+ bump_db_version(Db),
+ ok;
+ _Val ->
+ {deletion_frequency_exceeded, DbName}
+ end.
+
+
+hard_delete_db(Db) ->
+ #{
+ name := DbName,
+ tx := Tx,
+ layer_prefix := LayerPrefix,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+
+ DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
+
+ erlfdb:clear(Tx, DbKey),
+ erlfdb:clear_range_startswith(Tx, DbPrefix),
+ bump_metadata_version(Tx),
+ ok.
+
+
+write_doc_body(#{} = Db0, #doc{} = Doc) ->
+ #{
+ tx := Tx
+ } = Db = ensure_current(Db0),
+
+ Rows = doc_to_fdb(Db, Doc),
+ lists:foreach(fun({Key, Value}) ->
+ ok = erlfdb:set(Tx, Key, aegis:encrypt(Db, Key, Value))
+ end, Rows).
+
+
+clear_doc_body(_Db, _DocId, not_found) ->
+ % No old body to clear
+ ok;
+
+clear_doc_body(#{} = Db, DocId, #{} = RevInfo) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = Db,
+
+ #{
+ rev_id := {RevPos, Rev}
+ } = RevInfo,
+
+ BaseKey = {?DB_DOCS, DocId, RevPos, Rev},
+ {StartKey, EndKey} = erlfdb_tuple:range(BaseKey, DbPrefix),
+ ok = erlfdb:clear_range(Tx, StartKey, EndKey).
+
+
+cleanup_attachments(Db, DocId, NewDoc, ToRemove) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = Db,
+
+ RemoveRevs = lists:map(fun(#{rev_id := RevId}) -> RevId end, ToRemove),
+
+ % Gather all known document revisions
+ {ok, DiskDocs} = fabric2_db:open_doc_revs(Db, DocId, all, []),
+ AllDocs = [{ok, NewDoc} | DiskDocs],
+
+ % Get referenced attachment ids
+ ActiveIdSet = lists:foldl(fun({ok, Doc}, Acc) ->
+ #doc{
+ revs = {Pos, [Rev | _]}
+ } = Doc,
+ case lists:member({Pos, Rev}, RemoveRevs) of
+ true ->
+ Acc;
+ false ->
+ lists:foldl(fun(Att, InnerAcc) ->
+ {loc, _Db, _DocId, AttId} = couch_att:fetch(data, Att),
+ sets:add_element(AttId, InnerAcc)
+ end, Acc, Doc#doc.atts)
+ end
+ end, sets:new(), AllDocs),
+
+ AttPrefix = erlfdb_tuple:pack({?DB_ATT_NAMES, DocId}, DbPrefix),
+ Options = [{streaming_mode, want_all}],
+ Future = erlfdb:get_range_startswith(Tx, AttPrefix, Options),
+
+ ExistingIdSet = lists:foldl(fun({K, _}, Acc) ->
+ {?DB_ATT_NAMES, DocId, AttId} = erlfdb_tuple:unpack(K, DbPrefix),
+ sets:add_element(AttId, Acc)
+ end, sets:new(), erlfdb:wait(Future)),
+
+ AttsToRemove = sets:subtract(ExistingIdSet, ActiveIdSet),
+
+ lists:foreach(fun(AttId) ->
+ IdKey = erlfdb_tuple:pack({?DB_ATT_NAMES, DocId, AttId}, DbPrefix),
+ erlfdb:clear(Tx, IdKey),
+
+ ChunkKey = erlfdb_tuple:pack({?DB_ATTS, DocId, AttId}, DbPrefix),
+ erlfdb:clear_range_startswith(Tx, ChunkKey)
+ end, sets:to_list(AttsToRemove)).
+
+
+revinfo_to_fdb(Tx, DbPrefix, DocId, #{winner := true} = RevId) ->
+ #{
+ deleted := Deleted,
+ rev_id := {RevPos, Rev},
+ rev_path := RevPath,
+ branch_count := BranchCount,
+ att_hash := AttHash,
+ rev_size := RevSize
+ } = RevId,
+ VS = new_versionstamp(Tx),
+ Key = {?DB_REVS, DocId, not Deleted, RevPos, Rev},
+ Val = {
+ ?CURR_REV_FORMAT,
+ VS,
+ BranchCount,
+ list_to_tuple(RevPath),
+ AttHash,
+ RevSize
+ },
+ KBin = erlfdb_tuple:pack(Key, DbPrefix),
+ VBin = erlfdb_tuple:pack_vs(Val),
+ {KBin, VBin, VS};
+
+revinfo_to_fdb(_Tx, DbPrefix, DocId, #{} = RevId) ->
+ #{
+ deleted := Deleted,
+ rev_id := {RevPos, Rev},
+ rev_path := RevPath,
+ att_hash := AttHash,
+ rev_size := RevSize
+ } = RevId,
+ Key = {?DB_REVS, DocId, not Deleted, RevPos, Rev},
+ Val = {?CURR_REV_FORMAT, list_to_tuple(RevPath), AttHash, RevSize},
+ KBin = erlfdb_tuple:pack(Key, DbPrefix),
+ VBin = erlfdb_tuple:pack(Val),
+ {KBin, VBin, undefined}.
+
+
+fdb_to_revinfo(Key, {?CURR_REV_FORMAT, _, _, _, _, _} = Val) ->
+ {?DB_REVS, _DocId, NotDeleted, RevPos, Rev} = Key,
+ {_RevFormat, Sequence, BranchCount, RevPath, AttHash, RevSize} = Val,
+ #{
+ winner => true,
+ exists => true,
+ deleted => not NotDeleted,
+ rev_id => {RevPos, Rev},
+ rev_path => tuple_to_list(RevPath),
+ sequence => Sequence,
+ branch_count => BranchCount,
+ att_hash => AttHash,
+ rev_size => RevSize
+ };
+
+fdb_to_revinfo(Key, {?CURR_REV_FORMAT, _, _, _} = Val) ->
+ {?DB_REVS, _DocId, NotDeleted, RevPos, Rev} = Key,
+ {_RevFormat, RevPath, AttHash, RevSize} = Val,
+ #{
+ winner => false,
+ exists => true,
+ deleted => not NotDeleted,
+ rev_id => {RevPos, Rev},
+ rev_path => tuple_to_list(RevPath),
+ sequence => undefined,
+ branch_count => undefined,
+ att_hash => AttHash,
+ rev_size => RevSize
+ };
+
+fdb_to_revinfo(Key, {0, Seq, BCount, RPath}) ->
+ Val = {1, Seq, BCount, RPath, <<>>},
+ fdb_to_revinfo(Key, Val);
+
+fdb_to_revinfo(Key, {0, RPath}) ->
+ Val = {1, RPath, <<>>},
+ fdb_to_revinfo(Key, Val);
+
+fdb_to_revinfo(Key, {1, Seq, BCount, RPath, AttHash}) ->
+ % Don't forget to change ?CURR_REV_FORMAT to 2 here when it increments
+ Val = {?CURR_REV_FORMAT, Seq, BCount, RPath, AttHash, 0},
+ fdb_to_revinfo(Key, Val);
+
+fdb_to_revinfo(Key, {1, RPath, AttHash}) ->
+ % Don't forget to change ?CURR_REV_FORMAT to 2 here when it increments
+ Val = {?CURR_REV_FORMAT, RPath, AttHash, 0},
+ fdb_to_revinfo(Key, Val).
+
+
+doc_to_fdb(Db, #doc{} = Doc) ->
+ #{
+ db_prefix := DbPrefix
+ } = Db,
+
+ #doc{
+ id = Id,
+ revs = {Start, [Rev | _]},
+ body = Body,
+ atts = Atts,
+ deleted = Deleted
+ } = Doc,
+
+ DiskAtts = lists:map(fun couch_att:to_disk_term/1, Atts),
+
+ Opts = [{minor_version, 1}, {compressed, 6}],
+ Value = term_to_binary({Body, DiskAtts, Deleted}, Opts),
+ Chunks = chunkify_binary(Value),
+
+ {Rows, _} = lists:mapfoldl(fun(Chunk, ChunkId) ->
+ Key = erlfdb_tuple:pack({?DB_DOCS, Id, Start, Rev, ChunkId}, DbPrefix),
+ {{Key, Chunk}, ChunkId + 1}
+ end, 0, Chunks),
+
+ Rows.
+
+
+fdb_to_doc(_Db, _DocId, _Pos, _Path, []) ->
+ {not_found, missing};
+
+fdb_to_doc(Db, DocId, Pos, Path, BinRows) when is_list(BinRows) ->
+ Bin = iolist_to_binary(BinRows),
+ {Body, DiskAtts, Deleted} = binary_to_term(Bin, [safe]),
+ Atts = lists:map(fun(Att) ->
+ couch_att:from_disk_term(Db, DocId, Att)
+ end, DiskAtts),
+ Doc0 = #doc{
+ id = DocId,
+ revs = {Pos, Path},
+ body = Body,
+ atts = Atts,
+ deleted = Deleted
+ },
+
+ case Db of
+ #{after_doc_read := undefined} -> Doc0;
+ #{after_doc_read := ADR} -> ADR(Doc0, Db)
+ end.
+
+
+local_doc_to_fdb(Db, #doc{} = Doc) ->
+ #{
+ db_prefix := DbPrefix
+ } = Db,
+
+ #doc{
+ id = Id,
+ revs = {0, [Rev]},
+ body = Body
+ } = Doc,
+
+ Key = erlfdb_tuple:pack({?DB_LOCAL_DOCS, Id}, DbPrefix),
+
+ StoreRev = case Rev of
+ _ when is_integer(Rev) -> integer_to_binary(Rev);
+ _ when is_binary(Rev) -> Rev
+ end,
+
+ BVal = term_to_binary(Body, [{minor_version, 1}, {compressed, 6}]),
+ {Rows, _} = lists:mapfoldl(fun(Chunk, ChunkId) ->
+ K = erlfdb_tuple:pack({?DB_LOCAL_DOC_BODIES, Id, ChunkId}, DbPrefix),
+ {{K, Chunk}, ChunkId + 1}
+ end, 0, chunkify_binary(BVal)),
+
+ NewSize = fabric2_util:ldoc_size(Doc),
+ RawValue = erlfdb_tuple:pack({?CURR_LDOC_FORMAT, StoreRev, NewSize}),
+
+ % Prefix our tuple encoding to make upgrades easier
+ Value = <<255, RawValue/binary>>,
+
+ {Key, Value, NewSize, Rows}.
+
+
+fdb_to_local_doc(_Db, _DocId, not_found, []) ->
+ {not_found, missing};
+
+fdb_to_local_doc(_Db, DocId, <<131, _/binary>> = Val, []) ->
+ % This is an upgrade clause for the old encoding. We allow reading the old
+ % value and will perform an upgrade of the storage format on an update.
+ {Rev, Body} = binary_to_term(Val, [safe]),
+ #doc{
+ id = DocId,
+ revs = {0, [Rev]},
+ deleted = false,
+ body = Body
+ };
+
+fdb_to_local_doc(_Db, DocId, <<255, RevBin/binary>>, Rows) when is_list(Rows) ->
+ Rev = case erlfdb_tuple:unpack(RevBin) of
+ {?CURR_LDOC_FORMAT, Rev0, _Size} -> Rev0
+ end,
+
+ BodyBin = iolist_to_binary(Rows),
+ Body = binary_to_term(BodyBin, [safe]),
+
+ #doc{
+ id = DocId,
+ revs = {0, [Rev]},
+ deleted = false,
+ body = Body
+ };
+
+fdb_to_local_doc(Db, DocId, RawRev, Rows) ->
+ BaseRev = erlfdb_tuple:pack({?CURR_LDOC_FORMAT, RawRev, 0}),
+ Rev = <<255, BaseRev/binary>>,
+ fdb_to_local_doc(Db, DocId, Rev, Rows).
+
+
+sum_add_rev_sizes(RevInfos) ->
+ lists:foldl(fun(RI, Acc) ->
+ #{
+ exists := Exists,
+ rev_size := Size
+ } = RI,
+ case Exists of
+ true -> Acc;
+ false -> Size + Acc
+ end
+ end, 0, RevInfos).
+
+
+sum_rem_rev_sizes(RevInfos) ->
+ lists:foldl(fun(RI, Acc) ->
+ #{
+ exists := true,
+ rev_size := Size
+ } = RI,
+ Size + Acc
+ end, 0, RevInfos).
+
+
+chunkify_binary(Data) ->
+ chunkify_data(Data, binary_chunk_size()).
+
+
+chunkify_data(Data, Size) ->
+ case Data of
+ <<>> ->
+ [];
+ <<Head:Size/binary, Rest/binary>> ->
+ [Head | chunkify_data(Rest, Size)];
+ <<_/binary>> when size(Data) < Size ->
+ [Data]
+ end.
+
+
+get_fold_acc(Db, RangePrefix, UserCallback, UserAcc, Options)
+ when is_map(Db) orelse Db =:= undefined ->
+
+ Reverse = case fabric2_util:get_value(dir, Options) of
+ rev -> true;
+ _ -> false
+ end,
+
+ StartKey0 = fabric2_util:get_value(start_key, Options),
+ EndKeyGt = fabric2_util:get_value(end_key_gt, Options),
+ EndKey0 = fabric2_util:get_value(end_key, Options, EndKeyGt),
+ InclusiveEnd = EndKeyGt == undefined,
+ WrapKeys = fabric2_util:get_value(wrap_keys, Options) /= false,
+
+ % CouchDB swaps the key meanings based on the direction
+ % of the fold. FoundationDB does not so we have to
+ % swap back here.
+ {StartKey1, EndKey1} = case Reverse of
+ false -> {StartKey0, EndKey0};
+ true -> {EndKey0, StartKey0}
+ end,
+
+ % Set the maximum bounds for the start and endkey
+ StartKey2 = case StartKey1 of
+ undefined ->
+ <<RangePrefix/binary, 16#00>>;
+ SK2 when not WrapKeys ->
+ erlfdb_tuple:pack(SK2, RangePrefix);
+ SK2 ->
+ erlfdb_tuple:pack({SK2}, RangePrefix)
+ end,
+
+ EndKey2 = case EndKey1 of
+ undefined ->
+ <<RangePrefix/binary, 16#FF>>;
+ EK2 when Reverse andalso not WrapKeys ->
+ PackedEK = erlfdb_tuple:pack(EK2, RangePrefix),
+ <<PackedEK/binary, 16#FF>>;
+ EK2 when Reverse ->
+ PackedEK = erlfdb_tuple:pack({EK2}, RangePrefix),
+ <<PackedEK/binary, 16#FF>>;
+ EK2 when not WrapKeys ->
+ erlfdb_tuple:pack(EK2, RangePrefix);
+ EK2 ->
+ erlfdb_tuple:pack({EK2}, RangePrefix)
+ end,
+
+ % FoundationDB ranges are applied as SK <= key < EK
+ % By default, CouchDB is SK <= key <= EK with the
+ % optional inclusive_end=false option changing that
+ % to SK <= key < EK. Also, remember that CouchDB
+ % swaps the meaning of SK and EK based on direction.
+ %
+ % Thus we have this wonderful bit of logic to account
+ % for all of those combinations.
+
+ StartKey3 = case {Reverse, InclusiveEnd} of
+ {true, false} ->
+ erlfdb_key:first_greater_than(StartKey2);
+ _ ->
+ StartKey2
+ end,
+
+ EndKey3 = case {Reverse, InclusiveEnd} of
+ {false, true} when EndKey0 /= undefined ->
+ erlfdb_key:first_greater_than(EndKey2);
+ {true, _} ->
+ erlfdb_key:first_greater_than(EndKey2);
+ _ ->
+ EndKey2
+ end,
+
+ Skip = case fabric2_util:get_value(skip, Options) of
+ S when is_integer(S), S >= 0 -> S;
+ _ -> 0
+ end,
+
+ Limit = case fabric2_util:get_value(limit, Options) of
+ L when is_integer(L), L >= 0 -> L + Skip;
+ undefined -> 0
+ end,
+
+ TargetBytes = case fabric2_util:get_value(target_bytes, Options) of
+ T when is_integer(T), T >= 0 -> [{target_bytes, T}];
+ undefined -> []
+ end,
+
+ StreamingMode = case fabric2_util:get_value(streaming_mode, Options) of
+ undefined -> [];
+ Name when is_atom(Name) -> [{streaming_mode, Name}]
+ end,
+
+ Snapshot = case fabric2_util:get_value(snapshot, Options) of
+ undefined -> [];
+ B when is_boolean(B) -> [{snapshot, B}]
+ end,
+
+ BaseOpts = [{reverse, Reverse}]
+ ++ TargetBytes
+ ++ StreamingMode
+ ++ Snapshot,
+
+ RestartTx = fabric2_util:get_value(restart_tx, Options, false),
+
+ #fold_acc{
+ db = Db,
+ start_key = StartKey3,
+ end_key = EndKey3,
+ skip = Skip,
+ limit = Limit,
+ retries = 0,
+ base_opts = BaseOpts,
+ restart_tx = RestartTx,
+ user_fun = UserCallback,
+ user_acc = UserAcc
+ }.
+
+
+fold_range_cb({K, V}, #fold_acc{} = Acc) ->
+ #fold_acc{
+ skip = Skip,
+ limit = Limit,
+ user_fun = UserFun,
+ user_acc = UserAcc,
+ base_opts = Opts
+ } = Acc,
+ Acc1 = case Skip =:= 0 of
+ true ->
+ UserAcc1 = UserFun({K, V}, UserAcc),
+ Acc#fold_acc{limit = max(0, Limit - 1), user_acc = UserAcc1};
+ false ->
+ Acc#fold_acc{skip = Skip - 1, limit = Limit - 1}
+ end,
+ Acc2 = case fabric2_util:get_value(reverse, Opts, false) of
+ true -> Acc1#fold_acc{end_key = erlfdb_key:last_less_or_equal(K)};
+ false -> Acc1#fold_acc{start_key = erlfdb_key:first_greater_than(K)}
+ end,
+ put(?PDICT_FOLD_ACC_STATE, Acc2),
+ Acc2.
+
+
+restart_fold(Tx, #fold_acc{} = Acc) ->
+ erase(?PDICT_CHECKED_MD_IS_CURRENT),
+
+ ok = erlfdb:reset(Tx),
+
+ case {erase(?PDICT_FOLD_ACC_STATE), Acc#fold_acc.retries} of
+ {#fold_acc{db = Db} = Acc1, _} ->
+ Acc1#fold_acc{db = check_db_instance(Db), retries = 0};
+ {undefined, Retries} when Retries < ?MAX_FOLD_RANGE_RETRIES ->
+ Db = check_db_instance(Acc#fold_acc.db),
+ Acc#fold_acc{db = Db, retries = Retries + 1};
+ {undefined, _} ->
+ error(fold_range_not_progressing)
+ end.
+
+
+get_db_handle() ->
+ case get(?PDICT_DB_KEY) of
+ undefined ->
+ {ok, Db} = application:get_env(fabric, db),
+ put(?PDICT_DB_KEY, Db),
+ Db;
+ Db ->
+ Db
+ end.
+
+
+require_transaction(#{tx := {erlfdb_transaction, _}} = _Db) ->
+ ok;
+require_transaction(#{} = _Db) ->
+ erlang:error(transaction_required).
+
+
+ensure_current(Db) ->
+ ensure_current(Db, true).
+
+
+ensure_current(#{} = Db0, CheckDbVersion) ->
+ require_transaction(Db0),
+ Db3 = case check_metadata_version(Db0) of
+ {current, Db1} ->
+ Db1;
+ {stale, Db1} ->
+ case check_db_version(Db1, CheckDbVersion) of
+ current ->
+ % If db version is current, update cache with the latest
+ % metadata so other requests can immediately see the
+ % refreshed db handle.
+ Now = erlang:monotonic_time(millisecond),
+ Db2 = Db1#{check_current_ts := Now},
+ fabric2_server:maybe_update(Db2),
+ Db2;
+ stale ->
+ fabric2_server:maybe_remove(Db1),
+ throw({?MODULE, reopen})
+ end
+ end,
+ case maps:get(security_fun, Db3) of
+ SecurityFun when is_function(SecurityFun, 2) ->
+ #{security_doc := SecDoc} = Db3,
+ ok = SecurityFun(Db3, SecDoc),
+ Db3#{security_fun := undefined};
+ undefined ->
+ Db3
+ end.
+
+
+check_db_instance(undefined) ->
+ undefined;
+
+check_db_instance(#{} = Db) ->
+ require_transaction(Db),
+ case check_metadata_version(Db) of
+ {current, Db1} ->
+ Db1;
+ {stale, Db1} ->
+ #{
+ tx := Tx,
+ uuid := UUID,
+ db_prefix := DbPrefix
+ } = Db1,
+ UUIDKey = erlfdb_tuple:pack({?DB_CONFIG, <<"uuid">>}, DbPrefix),
+ case erlfdb:wait(erlfdb:get(Tx, UUIDKey)) of
+ UUID -> Db1;
+ _ -> error(database_does_not_exist)
+ end
+ end.
+
+
+is_transaction_applied(Tx) ->
+ is_commit_unknown_result()
+ andalso has_transaction_id()
+ andalso transaction_id_exists(Tx).
+
+
+get_previous_transaction_result() ->
+ get(?PDICT_TX_RES_KEY).
+
+
+execute_transaction(Tx, Fun, LayerPrefix) ->
+ put(?PDICT_CHECKED_MD_IS_CURRENT, false),
+ put(?PDICT_CHECKED_DB_IS_CURRENT, false),
+ Result = Fun(Tx),
+ case erlfdb:is_read_only(Tx) of
+ true ->
+ ok;
+ false ->
+ erlfdb:set(Tx, get_transaction_id(Tx, LayerPrefix), <<>>),
+ put(?PDICT_TX_RES_KEY, Result)
+ end,
+ Result.
+
+
+clear_transaction() ->
+ fabric2_txids:remove(get(?PDICT_TX_ID_KEY)),
+ erase(?PDICT_CHECKED_DB_IS_CURRENT),
+ erase(?PDICT_CHECKED_MD_IS_CURRENT),
+ erase(?PDICT_TX_ID_KEY),
+ erase(?PDICT_TX_RES_KEY).
+
+
+is_commit_unknown_result() ->
+ erlfdb:get_last_error() == ?COMMIT_UNKNOWN_RESULT.
+
+
+has_transaction_id() ->
+ is_binary(get(?PDICT_TX_ID_KEY)).
+
+
+transaction_id_exists(Tx) ->
+ erlfdb:wait(erlfdb:get(Tx, get(?PDICT_TX_ID_KEY))) == <<>>.
+
+
+get_transaction_id(Tx, LayerPrefix) ->
+ case get(?PDICT_TX_ID_KEY) of
+ undefined ->
+ TxId = fabric2_txids:create(Tx, LayerPrefix),
+ put(?PDICT_TX_ID_KEY, TxId),
+ TxId;
+ TxId when is_binary(TxId) ->
+ TxId
+ end.
+
+
+with_span(Operation, ExtraTags, Fun) ->
+ case ctrace:has_span() of
+ true ->
+ Tags = maps:merge(#{
+ 'span.kind' => <<"client">>,
+ component => <<"couchdb.fabric">>,
+ 'db.instance' => fabric2_server:fdb_cluster(),
+ 'db.namespace' => fabric2_server:fdb_directory(),
+ 'db.type' => <<"fdb">>,
+ nonce => get(nonce),
+ pid => self()
+ }, ExtraTags),
+ ctrace:with_span(Operation, Tags, Fun);
+ false ->
+ Fun()
+ end.
+
+
+get_info_wait_int(#info_future{} = InfoFuture) ->
+ #info_future{
+ db_prefix = DbPrefix,
+ changes_future = ChangesFuture,
+ uuid_future = UUIDFuture,
+ meta_future = MetaFuture
+ } = InfoFuture,
+
+ RawSeq = case erlfdb:wait(ChangesFuture) of
+ [] ->
+ vs_to_seq(fabric2_util:seq_zero_vs());
+ [{SeqKey, _}] ->
+ {?DB_CHANGES, SeqVS} = erlfdb_tuple:unpack(SeqKey, DbPrefix),
+ vs_to_seq(SeqVS)
+ end,
+ CProp = {update_seq, RawSeq},
+
+ UUIDProp = {uuid, erlfdb:wait(UUIDFuture)},
+
+ MProps = lists:foldl(fun({K, V}, Acc) ->
+ case erlfdb_tuple:unpack(K, DbPrefix) of
+ {?DB_STATS, <<"doc_count">>} ->
+ [{doc_count, ?bin2uint(V)} | Acc];
+ {?DB_STATS, <<"doc_del_count">>} ->
+ [{doc_del_count, ?bin2uint(V)} | Acc];
+ {?DB_STATS, <<"sizes">>, Name} ->
+ Val = ?bin2uint(V),
+ {_, {Sizes}} = lists:keyfind(sizes, 1, Acc),
+ NewSizes = lists:keystore(Name, 1, Sizes, {Name, Val}),
+ lists:keystore(sizes, 1, Acc, {sizes, {NewSizes}});
+ {?DB_STATS, _} ->
+ Acc
+ end
+ end, [{sizes, {[]}}], erlfdb:wait(MetaFuture)),
+
+ [CProp, UUIDProp | MProps].
+
+
+binary_chunk_size() ->
+ config:get_integer(
+ "fabric", "binary_chunk_size", ?DEFAULT_BINARY_CHUNK_SIZE).
+
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+fdb_to_revinfo_version_compatibility_test() ->
+ DocId = <<"doc_id">>,
+ FirstRevFormat = 0,
+ RevPos = 1,
+ Rev = <<60,84,174,140,210,120,192,18,100,148,9,181,129,165,248,92>>,
+ RevPath = {},
+ NotDeleted = true,
+ Sequence = {versionstamp, 10873034897377, 0, 0},
+ BranchCount = 1,
+
+ KeyWinner = {?DB_REVS, DocId, NotDeleted, RevPos, Rev},
+ ValWinner = {FirstRevFormat, Sequence, BranchCount, RevPath},
+ ExpectedWinner = expected(
+ true, BranchCount, NotDeleted, RevPos, Rev, RevPath, Sequence),
+ ?assertEqual(ExpectedWinner, fdb_to_revinfo(KeyWinner, ValWinner)),
+
+ KeyLoser = {?DB_REVS, DocId, NotDeleted, RevPos, Rev},
+ ValLoser = {FirstRevFormat, RevPath},
+ ExpectedLoser = expected(
+ false, undefined, NotDeleted, RevPos, Rev, RevPath, undefined),
+ ?assertEqual(ExpectedLoser, fdb_to_revinfo(KeyLoser, ValLoser)),
+ ok.
+
+
+expected(Winner, BranchCount, NotDeleted, RevPos, Rev, RevPath, Sequence) ->
+ #{
+ att_hash => <<>>,
+ branch_count => BranchCount,
+ deleted => not NotDeleted,
+ exists => true,
+ rev_id => {RevPos, Rev},
+ rev_path => tuple_to_list(RevPath),
+ rev_size => 0,
+ sequence => Sequence,
+ winner => Winner
+ }.
+
+
+-endif.
diff --git a/src/fabric/src/fabric2_index.erl b/src/fabric/src/fabric2_index.erl
new file mode 100644
index 000000000..25c31a8c8
--- /dev/null
+++ b/src/fabric/src/fabric2_index.erl
@@ -0,0 +1,241 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_index).
+
+
+-behaviour(gen_server).
+
+
+-export([
+ register_index/1,
+ db_updated/1,
+ cleanup/1,
+ start_link/0
+]).
+
+-export([
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+
+-callback build_indices(Db :: map(), DDocs :: list(#doc{})) ->
+ [{ok, JobId::binary()} | {error, any()}].
+
+-callback cleanup_indices(Db :: map(), DDocs :: list(#doc{})) ->
+ [ok | {error, any()}].
+
+
+-define(SHARDS, 32).
+-define(DEFAULT_DELAY_MSEC, 60000).
+-define(DEFAULT_RESOLUTION_MSEC, 10000).
+
+
+register_index(Mod) when is_atom(Mod) ->
+ Indices = lists:usort([Mod | registrations()]),
+ application:set_env(fabric, indices, Indices).
+
+
+db_updated(DbName) when is_binary(DbName) ->
+ Table = table(erlang:phash2(DbName) rem ?SHARDS),
+ ets:insert_new(Table, {DbName, now_msec()}).
+
+
+cleanup(Db) ->
+ try
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ DDocs = fabric2_db:get_design_docs(Db),
+ cleanup_indices(TxDb, DDocs)
+ end)
+ catch
+ error:database_does_not_exist ->
+ ok;
+ Tag:Reason ->
+ Stack = erlang:get_stacktrace(),
+ DbName = fabric2_db:name(Db),
+ LogMsg = "~p failed to cleanup indices for `~s` ~p:~p ~p",
+ couch_log:error(LogMsg, [?MODULE, DbName, Tag, Reason, Stack])
+ end.
+
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+init(_) ->
+ lists:foreach(fun(T) ->
+ spawn_link(fun() -> process_loop(T) end)
+ end, create_tables()),
+ {ok, nil}.
+
+
+terminate(_M, _St) ->
+ ok.
+
+
+handle_call(Msg, _From, St) ->
+ {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(Msg, St) ->
+ {stop, {bad_cast, Msg}, St}.
+
+
+handle_info(Msg, St) ->
+ {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+ {ok, St}.
+
+
+create_tables() ->
+ Opts = [
+ named_table,
+ public,
+ {write_concurrency, true},
+ {read_concurrency, true}
+ ],
+ Tables = [table(N) || N <- lists:seq(0, ?SHARDS - 1)],
+ [ets:new(T, Opts) || T <- Tables].
+
+
+table(Id) when is_integer(Id), Id >= 0 andalso Id < ?SHARDS ->
+ list_to_atom("fabric2_index_" ++ integer_to_list(Id)).
+
+
+process_loop(Table) ->
+ Now = now_msec(),
+ Delay = delay_msec(),
+ Since = Now - Delay,
+ case is_enabled() of
+ true ->
+ process_updates(Table, Since),
+ clean_stale(Table, Since);
+ false ->
+ clean_stale(Table, Now)
+ end,
+ Resolution = resolution_msec(),
+ Jitter = rand:uniform(1 + Resolution div 2),
+ timer:sleep(Resolution + Jitter),
+ process_loop(Table).
+
+
+clean_stale(Table, Since) ->
+ Head = {'_', '$1'},
+ Guard = {'<', '$1', Since},
+ % Monotonic is not strictly monotonic, so we process items using `=<` but
+ % clean with `<` in case there was an update with the same timestamp after
+ % we started processing already at that timestamp.
+ ets:select_delete(Table, [{Head, [Guard], [true]}]).
+
+
+process_updates(Table, Since) ->
+ Head = {'$1', '$2'},
+ Guard = {'=<', '$2', Since},
+ case ets:select(Table, [{Head, [Guard], ['$1']}], 25) of
+ '$end_of_table' -> ok;
+ {Match, Cont} -> process_updates_iter(Match, Cont)
+ end.
+
+
+process_updates_iter([], Cont) ->
+ case ets:select(Cont) of
+ '$end_of_table' -> ok;
+ {Match, Cont1} -> process_updates_iter(Match, Cont1)
+ end;
+
+process_updates_iter([Db | Rest], Cont) ->
+ try
+ process_db(Db)
+ catch
+ error:database_does_not_exist ->
+ ok;
+ Tag:Reason ->
+ Stack = erlang:get_stacktrace(),
+ LogMsg = "~p failed to build indices for `~s` ~p:~p ~p",
+ couch_log:error(LogMsg, [?MODULE, Db, Tag, Reason, Stack])
+ end,
+ process_updates_iter(Rest, Cont).
+
+
+process_db(DbName) when is_binary(DbName) ->
+ {ok, Db} = fabric2_db:open(DbName, [?ADMIN_CTX]),
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ DDocs1 = fabric2_db:get_design_docs(TxDb),
+ DDocs2 = lists:filter(fun should_update/1, DDocs1),
+ DDocs3 = shuffle(DDocs2),
+ build_indices(TxDb, DDocs3),
+ case auto_cleanup() of
+ true -> cleanup_indices(TxDb, DDocs1);
+ false -> ok
+ end
+ end).
+
+
+build_indices(_TxDb, []) ->
+ [];
+
+build_indices(TxDb, DDocs) ->
+ lists:flatmap(fun(Mod) ->
+ Mod:build_indices(TxDb, DDocs)
+ end, registrations()).
+
+
+cleanup_indices(TxDb, DDocs) ->
+ lists:foreach(fun(Mod) ->
+ Mod:cleanup_indices(TxDb, DDocs)
+ end, registrations()).
+
+
+registrations() ->
+ application:get_env(fabric, indices, []).
+
+
+should_update(#doc{body = {Props}}) ->
+ couch_util:get_value(<<"autoupdate">>, Props, true).
+
+
+shuffle(Items) ->
+ Tagged = [{rand:uniform(), I} || I <- Items],
+ Sorted = lists:sort(Tagged),
+ [I || {_T, I} <- Sorted].
+
+
+now_msec() ->
+ erlang:monotonic_time(millisecond).
+
+
+is_enabled() ->
+ config:get_boolean("fabric", "index_updater_enabled", true).
+
+
+delay_msec() ->
+ config:get_integer("fabric", "index_updater_delay_msec",
+ ?DEFAULT_DELAY_MSEC).
+
+
+resolution_msec() ->
+ config:get_integer("fabric", "index_updater_resolution_msec",
+ ?DEFAULT_RESOLUTION_MSEC).
+
+
+auto_cleanup() ->
+ config:get_boolean("fabric", "index_updater_remove_old_indices", false).
diff --git a/src/fabric/src/fabric2_node_types.erl b/src/fabric/src/fabric2_node_types.erl
new file mode 100644
index 000000000..110f04d15
--- /dev/null
+++ b/src/fabric/src/fabric2_node_types.erl
@@ -0,0 +1,52 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_node_types).
+
+
+-export([
+ is_type/1
+]).
+
+
+is_type(Type) when is_atom(Type) ->
+ case {from_os_env(Type), from_app_env(Type)} of
+ {V, _} when is_boolean(V) ->
+ V;
+ {undefined, V} when is_boolean(V) ->
+ V;
+ {undefined, undefined} ->
+ % When not defined anywhere assume `true`, that is by default a
+ % node will perform all the background tasks
+ true
+ end.
+
+
+from_os_env(Type) when is_atom(Type) ->
+ StrType = erlang:atom_to_list(Type),
+ StrTypeUpper = string:to_upper(StrType),
+ case os:getenv("COUCHDB_NODE_TYPE_" ++ StrTypeUpper) of
+ false ->
+ undefined;
+ Str when is_list(Str) ->
+ case string:to_lower(Str) of
+ "false" -> false;
+ _ -> true
+ end
+ end.
+
+
+from_app_env(Type) when is_atom(Type) ->
+ case application:get_env(fabric, node_types) of
+ undefined -> undefined;
+ {ok, Props} when is_list(Props) -> proplists:get_value(Type, Props)
+ end.
diff --git a/src/fabric/src/fabric2_server.erl b/src/fabric/src/fabric2_server.erl
new file mode 100644
index 000000000..be674b10e
--- /dev/null
+++ b/src/fabric/src/fabric2_server.erl
@@ -0,0 +1,276 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_server).
+-behaviour(gen_server).
+-vsn(1).
+
+
+-export([
+ start_link/0,
+
+ fetch/2,
+
+ store/1,
+ maybe_update/1,
+
+ remove/1,
+ maybe_remove/1,
+
+ fdb_directory/0,
+ fdb_cluster/0
+]).
+
+
+-export([
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+
+-define(CLUSTER_FILE, "/usr/local/etc/foundationdb/fdb.cluster").
+-define(FDB_DIRECTORY, fdb_directory).
+-define(FDB_CLUSTER, fdb_cluster).
+-define(DEFAULT_FDB_DIRECTORY, <<"couchdb">>).
+-define(TX_OPTIONS_SECTION, "fdb_tx_options").
+-define(RELISTEN_DELAY, 1000).
+
+-define(DEFAULT_TIMEOUT_MSEC, "60000").
+-define(DEFAULT_RETRY_LIMIT, "100").
+
+-define(TX_OPTIONS, #{
+ machine_id => {binary, undefined},
+ datacenter_id => {binary, undefined},
+ transaction_logging_max_field_length => {integer, undefined},
+ timeout => {integer, ?DEFAULT_TIMEOUT_MSEC},
+ retry_limit => {integer, ?DEFAULT_RETRY_LIMIT},
+ max_retry_delay => {integer, undefined},
+ size_limit => {integer, undefined}
+}).
+
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+fetch(DbName, UUID) when is_binary(DbName) ->
+ case {UUID, ets:lookup(?MODULE, DbName)} of
+ {_, []} -> undefined;
+ {undefined, [{DbName, _UUID, _, #{} = Db}]} -> Db;
+ {<<_/binary>>, [{DbName, UUID, _, #{} = Db}]} -> Db;
+ {<<_/binary>>, [{DbName, _UUID, _, #{} = _Db}]} -> undefined
+ end.
+
+
+store(#{name := DbName} = Db0) when is_binary(DbName) ->
+ #{
+ uuid := UUID,
+ md_version := MDVer
+ } = Db0,
+ Db1 = sanitize(Db0),
+ case ets:insert_new(?MODULE, {DbName, UUID, MDVer, Db1}) of
+ true -> ok;
+ false -> maybe_update(Db1)
+ end,
+ ok.
+
+
+maybe_update(#{name := DbName} = Db0) when is_binary(DbName) ->
+ #{
+ uuid := UUID,
+ md_version := MDVer
+ } = Db0,
+ Db1 = sanitize(Db0),
+ Head = {DbName, UUID, '$1', '_'},
+ Guard = {'=<', '$1', MDVer},
+ Body = {DbName, UUID, MDVer, {const, Db1}},
+ try
+ 1 =:= ets:select_replace(?MODULE, [{Head, [Guard], [{Body}]}])
+ catch
+ error:badarg ->
+ false
+ end.
+
+
+remove(DbName) when is_binary(DbName) ->
+ true = ets:delete(?MODULE, DbName),
+ ok.
+
+
+maybe_remove(#{name := DbName} = Db) when is_binary(DbName) ->
+ #{
+ uuid := UUID,
+ md_version := MDVer
+ } = Db,
+ Head = {DbName, UUID, '$1', '_'},
+ Guard = {'=<', '$1', MDVer},
+ 1 =:= ets:select_delete(?MODULE, [{Head, [Guard], [true]}]).
+
+
+init(_) ->
+ ets:new(?MODULE, [
+ public,
+ named_table,
+ {read_concurrency, true},
+ {write_concurrency, true}
+ ]),
+ {Cluster, Db} = get_db_and_cluster([empty]),
+ application:set_env(fabric, ?FDB_CLUSTER, Cluster),
+ application:set_env(fabric, db, Db),
+
+ Dir = case config:get("fabric", "fdb_directory") of
+ Val when is_list(Val), length(Val) > 0 ->
+ [?l2b(Val)];
+ _ ->
+ [?DEFAULT_FDB_DIRECTORY]
+ end,
+ application:set_env(fabric, ?FDB_DIRECTORY, Dir),
+ config:subscribe_for_changes([?TX_OPTIONS_SECTION]),
+ {ok, nil}.
+
+
+terminate(_, _St) ->
+ ok.
+
+
+handle_call(Msg, _From, St) ->
+ {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(Msg, St) ->
+ {stop, {bad_cast, Msg}, St}.
+
+
+handle_info({config_change, ?TX_OPTIONS_SECTION, _K, deleted, _}, St) ->
+ % Since we don't know the exact default values to reset the options
+ % to we recreate the db handle instead which will start with a default
+ % handle and re-apply all the options
+ {_Cluster, NewDb} = get_db_and_cluster([]),
+ application:set_env(fabric, db, NewDb),
+ {noreply, St};
+
+handle_info({config_change, ?TX_OPTIONS_SECTION, K, V, _}, St) ->
+ {ok, Db} = application:get_env(fabric, db),
+ apply_tx_options(Db, [{K, V}]),
+ {noreply, St};
+
+handle_info({gen_event_EXIT, _Handler, _Reason}, St) ->
+ erlang:send_after(?RELISTEN_DELAY, self(), restart_config_listener),
+ {noreply, St};
+
+handle_info(restart_config_listener, St) ->
+ config:subscribe_for_changes([?TX_OPTIONS_SECTION]),
+ {noreply, St};
+
+handle_info(Msg, St) ->
+ {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+ {ok, St}.
+
+
+fdb_directory() ->
+ get_env(?FDB_DIRECTORY).
+
+fdb_cluster() ->
+ get_env(?FDB_CLUSTER).
+
+get_env(Key) ->
+ case get(Key) of
+ undefined ->
+ case application:get_env(fabric, Key) of
+ undefined ->
+ erlang:error(fabric_application_not_started);
+ {ok, Value} ->
+ put(Key, Value),
+ Value
+ end;
+ Value ->
+ Value
+ end.
+
+
+get_db_and_cluster(EunitDbOpts) ->
+ {Cluster, Db} = case application:get_env(fabric, eunit_run) of
+ {ok, true} ->
+ {<<"eunit_test">>, erlfdb_util:get_test_db(EunitDbOpts)};
+ undefined ->
+ ClusterFileStr = config:get("erlfdb", "cluster_file", ?CLUSTER_FILE),
+ {ok, ConnectionStr} = file:read_file(ClusterFileStr),
+ DbHandle = erlfdb:open(iolist_to_binary(ClusterFileStr)),
+ {string:trim(ConnectionStr), DbHandle}
+ end,
+ apply_tx_options(Db, config:get(?TX_OPTIONS_SECTION)),
+ {Cluster, Db}.
+
+
+apply_tx_options(Db, Cfg) ->
+ maps:map(fun(Option, {Type, Default}) ->
+ case lists:keyfind(atom_to_list(Option), 1, Cfg) of
+ false ->
+ case Default of
+ undefined -> ok;
+ _Defined -> apply_tx_option(Db, Option, Default, Type)
+ end;
+ {_K, Val} ->
+ apply_tx_option(Db, Option, Val, Type)
+ end
+ end, ?TX_OPTIONS).
+
+
+apply_tx_option(Db, Option, Val, integer) ->
+ try
+ set_option(Db, Option, list_to_integer(Val))
+ catch
+ error:badarg ->
+ Msg = "~p : Invalid integer tx option ~p = ~p",
+ couch_log:error(Msg, [?MODULE, Option, Val])
+ end;
+
+apply_tx_option(Db, Option, Val, binary) ->
+ BinVal = list_to_binary(Val),
+ case size(BinVal) < 16 of
+ true ->
+ set_option(Db, Option, BinVal);
+ false ->
+ Msg = "~p : String tx option ~p is larger than 16 bytes",
+ couch_log:error(Msg, [?MODULE, Option])
+ end.
+
+
+set_option(Db, Option, Val) ->
+ try
+ erlfdb:set_option(Db, Option, Val)
+ catch
+ % This could happen if the option is not supported by erlfdb or
+ % fdbsever.
+ error:badarg ->
+ Msg = "~p : Could not set fdb tx option ~p = ~p",
+ couch_log:error(Msg, [?MODULE, Option, Val])
+ end.
+
+
+sanitize(#{} = Db) ->
+ Db#{
+ tx := undefined,
+ user_ctx := #user_ctx{},
+ security_fun := undefined,
+ interactive := false
+ }.
diff --git a/src/fabric/src/fabric2_sup.erl b/src/fabric/src/fabric2_sup.erl
new file mode 100644
index 000000000..874a8c240
--- /dev/null
+++ b/src/fabric/src/fabric2_sup.erl
@@ -0,0 +1,69 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_sup).
+-behaviour(supervisor).
+-vsn(1).
+
+
+-export([
+ start_link/1
+]).
+
+-export([
+ init/1
+]).
+
+
+start_link(Args) ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, Args).
+
+
+init([]) ->
+ config:enable_feature(fdb),
+ Flags = {rest_for_one, 1, 5},
+ Children = [
+ {
+ fabric2_server,
+ {fabric2_server, start_link, []},
+ permanent,
+ 5000,
+ worker,
+ [fabric2_server]
+ },
+ {
+ fabric2_txids,
+ {fabric2_txids, start_link, []},
+ permanent,
+ 5000,
+ worker,
+ [fabric2_server]
+ },
+ {
+ fabric2_index,
+ {fabric2_index, start_link, []},
+ permanent,
+ 5000,
+ worker,
+ [fabric2_index]
+ },
+ {
+ fabric2_db_expiration,
+ {fabric2_db_expiration, start_link, []},
+ permanent,
+ 5000,
+ worker,
+ [fabric2_db_expiration]
+ }
+ ],
+ ChildrenWithEpi = couch_epi:register_service(fabric2_epi, Children),
+ {ok, {Flags, ChildrenWithEpi}}.
diff --git a/src/fabric/src/fabric2_txids.erl b/src/fabric/src/fabric2_txids.erl
new file mode 100644
index 000000000..285e342ed
--- /dev/null
+++ b/src/fabric/src/fabric2_txids.erl
@@ -0,0 +1,153 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_txids).
+-behaviour(gen_server).
+-vsn(1).
+
+
+-export([
+ start_link/0,
+ create/2,
+ remove/1
+]).
+
+
+-export([
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3,
+ format_status/2
+]).
+
+
+-include("fabric2.hrl").
+
+
+-define(ONE_HOUR, 3600000000).
+-define(MAX_TX_IDS, 1000).
+
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+create(Tx, undefined) ->
+ Prefix = fabric2_fdb:get_dir(Tx),
+ create(Tx, Prefix);
+
+create(_Tx, LayerPrefix) ->
+ {Mega, Secs, Micro} = os:timestamp(),
+ Key = {?TX_IDS, Mega, Secs, Micro, fabric2_util:uuid()},
+ erlfdb_tuple:pack(Key, LayerPrefix).
+
+
+remove(TxId) when is_binary(TxId) ->
+ gen_server:cast(?MODULE, {remove, TxId});
+
+remove(undefined) ->
+ ok.
+
+
+
+init(_) ->
+ {ok, #{
+ last_sweep => os:timestamp(),
+ txids => []
+ }}.
+
+
+terminate(_, #{txids := TxIds}) ->
+ if TxIds == [] -> ok; true ->
+ fabric2_fdb:transactional(fun(Tx) ->
+ lists:foreach(fun(TxId) ->
+ erlfdb:clear(Tx, TxId)
+ end, TxIds)
+ end)
+ end,
+ ok.
+
+
+handle_call(Msg, _From, St) ->
+ {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast({remove, TxId}, St) ->
+ #{
+ last_sweep := LastSweep,
+ txids := TxIds
+ } = St,
+
+ NewTxIds = [TxId | TxIds],
+ NewSt = St#{txids := NewTxIds},
+
+ NeedsSweep = timer:now_diff(os:timestamp(), LastSweep) > ?ONE_HOUR,
+
+ case NeedsSweep orelse length(NewTxIds) >= ?MAX_TX_IDS of
+ true ->
+ {noreply, clean(NewSt, NeedsSweep)};
+ false ->
+ {noreply, NewSt}
+ end.
+
+
+handle_info(Msg, St) ->
+ {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+ {ok, St}.
+
+
+format_status(_Opt, [_PDict, State]) ->
+ #{
+ txids := TxIds
+ } = State,
+ Scrubbed = State#{
+ txids => {length, length(TxIds)}
+ },
+ [{data, [{"State",
+ Scrubbed
+ }]}].
+
+
+clean(St, NeedsSweep) ->
+ #{
+ last_sweep := LastSweep,
+ txids := TxIds
+ } = St,
+ fabric2_fdb:transactional(fun(Tx) ->
+ lists:foreach(fun(TxId) ->
+ erlfdb:clear(Tx, TxId)
+ end, TxIds),
+ case NeedsSweep of
+ true ->
+ sweep(Tx, LastSweep),
+ St#{
+ last_sweep := os:timestamp(),
+ txids := []
+ };
+ false ->
+ St#{txids := []}
+ end
+ end).
+
+
+sweep(Tx, {Mega, Secs, Micro}) ->
+ Prefix = fabric2_fdb:get_dir(Tx),
+ StartKey = erlfdb_tuple:pack({?TX_IDS}, Prefix),
+ EndKey = erlfdb_tuple:pack({?TX_IDS, Mega, Secs, Micro}, Prefix),
+ erlfdb:set_option(Tx, next_write_no_write_conflict_range),
+ erlfdb:clear_range(Tx, StartKey, EndKey).
diff --git a/src/fabric/src/fabric2_users_db.erl b/src/fabric/src/fabric2_users_db.erl
new file mode 100644
index 000000000..9a8a462c3
--- /dev/null
+++ b/src/fabric/src/fabric2_users_db.erl
@@ -0,0 +1,144 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_users_db).
+
+-export([
+ before_doc_update/3,
+ after_doc_read/2,
+ strip_non_public_fields/1
+]).
+
+-include_lib("couch/include/couch_db.hrl").
+
+-define(NAME, <<"name">>).
+-define(PASSWORD, <<"password">>).
+-define(DERIVED_KEY, <<"derived_key">>).
+-define(PASSWORD_SCHEME, <<"password_scheme">>).
+-define(SIMPLE, <<"simple">>).
+-define(PASSWORD_SHA, <<"password_sha">>).
+-define(PBKDF2, <<"pbkdf2">>).
+-define(ITERATIONS, <<"iterations">>).
+-define(SALT, <<"salt">>).
+-define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
+
+-define(
+ DDOCS_ADMIN_ONLY,
+ <<"Only administrators can view design docs in the users database.">>
+).
+
+% If the request's userCtx identifies an admin
+% -> save_doc (see below)
+%
+% If the request's userCtx.name is null:
+% -> save_doc
+% // this is an anonymous user registering a new document
+% // in case a user doc with the same id already exists, the anonymous
+% // user will get a regular doc update conflict.
+% If the request's userCtx.name doesn't match the doc's name
+% -> 404 // Not Found
+% Else
+% -> save_doc
+before_doc_update(Doc, Db, _UpdateType) ->
+ #user_ctx{name = Name} = fabric2_db:get_user_ctx(Db),
+ DocName = get_doc_name(Doc),
+ case (catch fabric2_db:check_is_admin(Db)) of
+ ok ->
+ save_doc(Doc);
+ _ when Name =:= DocName orelse Name =:= null ->
+ save_doc(Doc);
+ _ ->
+ throw(not_found)
+ end.
+
+% If newDoc.password == null || newDoc.password == undefined:
+% ->
+% noop
+% Else -> // calculate password hash server side
+% newDoc.password_sha = hash_pw(newDoc.password + salt)
+% newDoc.salt = salt
+% newDoc.password = null
+save_doc(#doc{body={Body}} = Doc) ->
+ %% Support both schemes to smooth migration from legacy scheme
+ Scheme = config:get("couch_httpd_auth", "password_scheme", "pbkdf2"),
+ case {fabric2_util:get_value(?PASSWORD, Body), Scheme} of
+ {null, _} -> % server admins don't have a user-db password entry
+ Doc;
+ {undefined, _} ->
+ Doc;
+ {ClearPassword, "simple"} -> % deprecated
+ Salt = couch_uuids:random(),
+ PasswordSha = couch_passwords:simple(ClearPassword, Salt),
+ Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?SIMPLE),
+ Body1 = ?replace(Body0, ?SALT, Salt),
+ Body2 = ?replace(Body1, ?PASSWORD_SHA, PasswordSha),
+ Body3 = proplists:delete(?PASSWORD, Body2),
+ Doc#doc{body={Body3}};
+ {ClearPassword, "pbkdf2"} ->
+ Iterations = list_to_integer(config:get("couch_httpd_auth", "iterations", "1000")),
+ Salt = couch_uuids:random(),
+ DerivedKey = couch_passwords:pbkdf2(ClearPassword, Salt, Iterations),
+ Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?PBKDF2),
+ Body1 = ?replace(Body0, ?ITERATIONS, Iterations),
+ Body2 = ?replace(Body1, ?DERIVED_KEY, DerivedKey),
+ Body3 = ?replace(Body2, ?SALT, Salt),
+ Body4 = proplists:delete(?PASSWORD, Body3),
+ Doc#doc{body={Body4}};
+ {_ClearPassword, Scheme} ->
+ couch_log:error("[couch_httpd_auth] password_scheme value of '~p' is invalid.", [Scheme]),
+ throw({forbidden, "Server cannot hash passwords at this time."})
+ end.
+
+
+% If the doc is a design doc
+% If the request's userCtx identifies an admin
+% -> return doc
+% Else
+% -> 403 // Forbidden
+% If the request's userCtx identifies an admin
+% -> return doc
+% If the request's userCtx.name doesn't match the doc's name
+% -> 404 // Not Found
+% Else
+% -> return doc
+after_doc_read(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, Db) ->
+ case (catch fabric2_db:check_is_admin(Db)) of
+ ok -> Doc;
+ _ -> throw({forbidden, ?DDOCS_ADMIN_ONLY})
+ end;
+after_doc_read(Doc, Db) ->
+ #user_ctx{name = Name} = fabric2_db:get_user_ctx(Db),
+ DocName = get_doc_name(Doc),
+ case (catch fabric2_db:check_is_admin(Db)) of
+ ok ->
+ Doc;
+ _ when Name =:= DocName ->
+ Doc;
+ _ ->
+ Doc1 = strip_non_public_fields(Doc),
+ case Doc1 of
+ #doc{body={[]}} -> throw(not_found);
+ _ -> Doc1
+ end
+ end.
+
+
+get_doc_name(#doc{id= <<"org.couchdb.user:", Name/binary>>}) ->
+ Name;
+get_doc_name(_) ->
+ undefined.
+
+
+strip_non_public_fields(#doc{body={Props}}=Doc) ->
+ PublicFields = config:get("couch_httpd_auth", "public_fields", ""),
+ Public = re:split(PublicFields, "\\s*,\\s*", [{return, binary}]),
+ Doc#doc{body={[{K, V} || {K, V} <- Props, lists:member(K, Public)]}}.
diff --git a/src/fabric/src/fabric2_util.erl b/src/fabric/src/fabric2_util.erl
new file mode 100644
index 000000000..136762b34
--- /dev/null
+++ b/src/fabric/src/fabric2_util.erl
@@ -0,0 +1,405 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_util).
+
+
+-export([
+ revinfo_to_revs/1,
+ revinfo_to_path/1,
+ sort_revinfos/1,
+ rev_size/1,
+ ldoc_size/1,
+
+ seq_zero_vs/0,
+ seq_max_vs/0,
+
+ user_ctx_to_json/1,
+
+ validate_security_object/1,
+
+ hash_atts/1,
+
+ dbname_ends_with/2,
+
+ get_value/2,
+ get_value/3,
+ to_hex/1,
+ from_hex/1,
+ uuid/0,
+
+ encode_all_doc_key/1,
+ all_docs_view_opts/1,
+
+ iso8601_timestamp/0,
+ now/1,
+ do_recovery/0,
+
+ pmap/2,
+ pmap/3
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+
+
+revinfo_to_revs(RevInfo) ->
+ #{
+ rev_id := {RevPos, Rev},
+ rev_path := RevPath
+ } = RevInfo,
+ {RevPos, [Rev | RevPath]}.
+
+
+revinfo_to_path(RevInfo) ->
+ #{
+ rev_id := {RevPos, Rev},
+ rev_path := RevPath
+ } = RevInfo,
+ Revs = lists:reverse(RevPath, [Rev]),
+ Path = revinfo_to_path(RevInfo, Revs),
+ {RevPos - length(Revs) + 1, Path}.
+
+
+revinfo_to_path(RevInfo, [Rev]) ->
+ {Rev, RevInfo, []};
+
+revinfo_to_path(RevInfo, [Rev | Rest]) ->
+ {Rev, ?REV_MISSING, [revinfo_to_path(RevInfo, Rest)]}.
+
+
+sort_revinfos(RevInfos) ->
+ CmpFun = fun(A, B) -> rev_sort_key(A) > rev_sort_key(B) end,
+ lists:sort(CmpFun, RevInfos).
+
+
+rev_sort_key(#{} = RevInfo) ->
+ #{
+ deleted := Deleted,
+ rev_id := {RevPos, Rev}
+ } = RevInfo,
+ {not Deleted, RevPos, Rev}.
+
+
+rev_size(#doc{} = Doc) ->
+ #doc{
+ id = Id,
+ revs = Revs,
+ body = Body,
+ atts = Atts
+ } = Doc,
+
+ {Start, Rev} = case Revs of
+ {0, []} -> {0, <<>>};
+ {N, [RevId | _]} -> {N, RevId}
+ end,
+
+ lists:sum([
+ size(Id),
+ size(erlfdb_tuple:pack({Start})),
+ size(Rev),
+ 1, % FDB tuple encoding of booleans for deleted flag is 1 byte
+ couch_ejson_size:encoded_size(Body),
+ lists:foldl(fun(Att, Acc) ->
+ couch_att:external_size(Att) + Acc
+ end, 0, Atts)
+ ]).
+
+
+ldoc_size(#doc{id = <<"_local/", _/binary>>} = Doc) ->
+ #doc{
+ id = Id,
+ revs = {0, [Rev]},
+ deleted = Deleted,
+ body = Body
+ } = Doc,
+
+ StoreRev = case Rev of
+ _ when is_integer(Rev) -> integer_to_binary(Rev);
+ _ when is_binary(Rev) -> Rev
+ end,
+
+ case Deleted of
+ true ->
+ 0;
+ false ->
+ lists:sum([
+ size(Id),
+ size(StoreRev),
+ couch_ejson_size:encoded_size(Body)
+ ])
+ end.
+
+
+seq_zero_vs() ->
+ {versionstamp, 0, 0, 0}.
+
+
+seq_max_vs() ->
+ {versionstamp, 18446744073709551615, 65535, 65535}.
+
+
+user_ctx_to_json(Db) ->
+ UserCtx = fabric2_db:get_user_ctx(Db),
+ {[
+ {<<"db">>, fabric2_db:name(Db)},
+ {<<"name">>, UserCtx#user_ctx.name},
+ {<<"roles">>, UserCtx#user_ctx.roles}
+ ]}.
+
+
+validate_security_object({SecProps}) ->
+ Admins = get_value(<<"admins">>, SecProps, {[]}),
+ ok = validate_names_and_roles(Admins),
+
+ % we fallback to readers here for backwards compatibility
+ Readers = get_value(<<"readers">>, SecProps, {[]}),
+ Members = get_value(<<"members">>, SecProps, Readers),
+ ok = validate_names_and_roles(Members).
+
+
+validate_names_and_roles({Props}) when is_list(Props) ->
+ validate_json_list_of_strings(<<"names">>, Props),
+ validate_json_list_of_strings(<<"roles">>, Props);
+validate_names_and_roles(_) ->
+ throw("admins or members must be a JSON list of strings").
+
+
+validate_json_list_of_strings(Member, Props) ->
+ case get_value(Member, Props, []) of
+ Values when is_list(Values) ->
+ NonBinary = lists:filter(fun(V) -> not is_binary(V) end, Values),
+ if NonBinary == [] -> ok; true ->
+ MemberStr = binary_to_list(Member),
+ throw(MemberStr ++ " must be a JSON list of strings")
+ end;
+ _ ->
+ MemberStr = binary_to_list(Member),
+ throw(MemberStr ++ " must be a JSON list of strings")
+ end.
+
+
+hash_atts([]) ->
+ <<>>;
+
+hash_atts(Atts) ->
+ SortedAtts = lists:sort(fun(A, B) ->
+ couch_att:fetch(name, A) =< couch_att:fetch(name, B)
+ end, Atts),
+ Md5St = lists:foldl(fun(Att, Acc) ->
+ {loc, _Db, _DocId, AttId} = couch_att:fetch(data, Att),
+ couch_hash:md5_hash_update(Acc, AttId)
+ end, couch_hash:md5_hash_init(), SortedAtts),
+ couch_hash:md5_hash_final(Md5St).
+
+
+dbname_ends_with(#{} = Db, Suffix) ->
+ dbname_ends_with(fabric2_db:name(Db), Suffix);
+
+dbname_ends_with(DbName, Suffix) when is_binary(DbName), is_binary(Suffix) ->
+ Suffix == filename:basename(DbName).
+
+
+get_value(Key, List) ->
+ get_value(Key, List, undefined).
+
+
+get_value(Key, List, Default) ->
+ case lists:keysearch(Key, 1, List) of
+ {value, {Key,Value}} ->
+ Value;
+ false ->
+ Default
+ end.
+
+
+to_hex(Bin) ->
+ list_to_binary(to_hex_int(Bin)).
+
+
+to_hex_int(<<>>) ->
+ [];
+to_hex_int(<<Hi:4, Lo:4, Rest/binary>>) ->
+ [nibble_to_hex(Hi), nibble_to_hex(Lo) | to_hex(Rest)].
+
+
+nibble_to_hex(I) ->
+ case I of
+ 0 -> $0;
+ 1 -> $1;
+ 2 -> $2;
+ 3 -> $3;
+ 4 -> $4;
+ 5 -> $5;
+ 6 -> $6;
+ 7 -> $7;
+ 8 -> $8;
+ 9 -> $9;
+ 10 -> $a;
+ 11 -> $b;
+ 12 -> $c;
+ 13 -> $d;
+ 14 -> $e;
+ 15 -> $f
+ end.
+
+
+from_hex(Bin) ->
+ iolist_to_binary(from_hex_int(Bin)).
+
+
+from_hex_int(<<>>) ->
+ [];
+from_hex_int(<<Hi:8, Lo:8, RestBinary/binary>>) ->
+ HiNib = hex_to_nibble(Hi),
+ LoNib = hex_to_nibble(Lo),
+ [<<HiNib:4, LoNib:4>> | from_hex_int(RestBinary)];
+from_hex_int(<<BadHex/binary>>) ->
+ erlang:error({invalid_hex, BadHex}).
+
+
+hex_to_nibble(N) ->
+ case N of
+ $0 -> 0;
+ $1 -> 1;
+ $2 -> 2;
+ $3 -> 3;
+ $4 -> 4;
+ $5 -> 5;
+ $6 -> 6;
+ $7 -> 7;
+ $8 -> 8;
+ $9 -> 9;
+ $a -> 10;
+ $A -> 10;
+ $b -> 11;
+ $B -> 11;
+ $c -> 12;
+ $C -> 12;
+ $d -> 13;
+ $D -> 13;
+ $e -> 14;
+ $E -> 14;
+ $f -> 15;
+ $F -> 15;
+ _ -> erlang:error({invalid_hex, N})
+ end.
+
+
+uuid() ->
+ to_hex(crypto:strong_rand_bytes(16)).
+
+
+encode_all_doc_key(B) when is_binary(B) -> B;
+encode_all_doc_key(Term) when Term < <<>> -> <<>>;
+encode_all_doc_key(_) -> <<255>>.
+
+
+all_docs_view_opts(#mrargs{} = Args) ->
+ NS = couch_util:get_value(namespace, Args#mrargs.extra),
+ StartKey = case Args#mrargs.start_key of
+ undefined -> Args#mrargs.start_key_docid;
+ SKey -> SKey
+ end,
+ EndKey = case Args#mrargs.end_key of
+ undefined -> Args#mrargs.end_key_docid;
+ EKey -> EKey
+ end,
+ StartKeyOpts = case StartKey of
+ undefined -> [];
+ _ -> [{start_key, encode_all_doc_key(StartKey)}]
+ end,
+ EndKeyOpts = case {EndKey, Args#mrargs.inclusive_end} of
+ {undefined, _} -> [];
+ {_, false} -> [{end_key_gt, encode_all_doc_key(EndKey)}];
+ {_, true} -> [{end_key, encode_all_doc_key(EndKey)}]
+ end,
+
+ DocOpts = case Args#mrargs.conflicts of
+ true -> [conflicts | Args#mrargs.doc_options];
+ _ -> Args#mrargs.doc_options
+ end,
+
+ [
+ {dir, Args#mrargs.direction},
+ {limit, Args#mrargs.limit},
+ {skip, Args#mrargs.skip},
+ {update_seq, Args#mrargs.update_seq},
+ {namespace, NS},
+ {include_docs, Args#mrargs.include_docs},
+ {doc_opts, DocOpts}
+ ] ++ StartKeyOpts ++ EndKeyOpts.
+
+
+iso8601_timestamp() ->
+ Now = os:timestamp(),
+ {{Year, Month, Date}, {Hour, Minute, Second}} =
+ calendar:now_to_datetime(Now),
+ Format = "~4.10.0B-~2.10.0B-~2.10.0BT~2.10.0B:~2.10.0B:~2.10.0BZ",
+ io_lib:format(Format, [Year, Month, Date, Hour, Minute, Second]).
+
+
+now(ms) ->
+ {Mega, Sec, Micro} = os:timestamp(),
+ (Mega * 1000000 + Sec) * 1000 + round(Micro / 1000);
+now(sec) ->
+ now(ms) div 1000.
+
+
+do_recovery() ->
+ config:get_boolean("couchdb",
+ "enable_database_recovery", false).
+
+
+pmap(Fun, Args) ->
+ pmap(Fun, Args, []).
+
+
+pmap(Fun, Args, Opts) ->
+ Refs = lists:map(fun(Arg) ->
+ {_, Ref} = spawn_monitor(fun() -> exit(pmap_exec(Fun, Arg)) end),
+ Ref
+ end, Args),
+ Timeout = fabric2_util:get_value(timeout, Opts, 5000),
+ lists:map(fun(Ref) ->
+ receive
+ {'DOWN', Ref, _, _, {'$res', Res}} ->
+ Res;
+ {'DOWN', Ref, _, _, {'$err', Tag, Reason, Stack}} ->
+ erlang:raise(Tag, Reason, Stack)
+ after Timeout ->
+ error({pmap_timeout, Timeout})
+ end
+ end, Refs).
+
+
+% OTP_RELEASE is defined in OTP 21+ only
+-ifdef(OTP_RELEASE).
+
+pmap_exec(Fun, Arg) ->
+ try
+ {'$res', Fun(Arg)}
+ catch Tag:Reason:Stack ->
+ {'$err', Tag, Reason, Stack}
+ end.
+
+-else.
+
+pmap_exec(Fun, Arg) ->
+ try
+ {'$res', Fun(Arg)}
+ catch Tag:Reason ->
+ {'$err', Tag, Reason, erlang:get_stacktrace()}
+ end.
+
+-endif.
diff --git a/src/fabric/src/fabric_db_create.erl b/src/fabric/src/fabric_db_create.erl
index 03fabb4ea..a2833e6aa 100644
--- a/src/fabric/src/fabric_db_create.erl
+++ b/src/fabric/src/fabric_db_create.erl
@@ -185,44 +185,44 @@ make_document([#shard{dbname=DbName}|_] = Shards, Suffix, Options) ->
db_exists(DbName) -> is_list(catch mem3:shards(DbName)).
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-db_exists_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- fun db_exists_for_existing_db/0,
- fun db_exists_for_missing_db/0
- ]
- }.
-
-
-setup_all() ->
- meck:new(mem3).
-
-
-teardown_all(_) ->
- meck:unload().
-
-
-db_exists_for_existing_db() ->
- Mock = fun(DbName) when is_binary(DbName) ->
- [#shard{dbname = DbName, range = [0,100]}]
- end,
- ok = meck:expect(mem3, shards, Mock),
- ?assertEqual(true, db_exists(<<"foobar">>)),
- ?assertEqual(true, meck:validate(mem3)).
-
-
-db_exists_for_missing_db() ->
- Mock = fun(DbName) ->
- erlang:error(database_does_not_exist, DbName)
- end,
- ok = meck:expect(mem3, shards, Mock),
- ?assertEqual(false, db_exists(<<"foobar">>)),
- ?assertEqual(false, meck:validate(mem3)).
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% db_exists_test_() ->
+%% {
+%% setup,
+%% fun setup_all/0,
+%% fun teardown_all/1,
+%% [
+%% fun db_exists_for_existing_db/0,
+%% fun db_exists_for_missing_db/0
+%% ]
+%% }.
+%%
+%%
+%% setup_all() ->
+%% meck:new(mem3).
+%%
+%%
+%% teardown_all(_) ->
+%% meck:unload().
+%%
+%%
+%% db_exists_for_existing_db() ->
+%% Mock = fun(DbName) when is_binary(DbName) ->
+%% [#shard{dbname = DbName, range = [0,100]}]
+%% end,
+%% ok = meck:expect(mem3, shards, Mock),
+%% ?assertEqual(true, db_exists(<<"foobar">>)),
+%% ?assertEqual(true, meck:validate(mem3)).
+%%
+%%
+%% db_exists_for_missing_db() ->
+%% Mock = fun(DbName) ->
+%% erlang:error(database_does_not_exist, DbName)
+%% end,
+%% ok = meck:expect(mem3, shards, Mock),
+%% ?assertEqual(false, db_exists(<<"foobar">>)),
+%% ?assertEqual(false, meck:validate(mem3)).
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_db_info.erl b/src/fabric/src/fabric_db_info.erl
index 40da678e5..6c7d2d177 100644
--- a/src/fabric/src/fabric_db_info.erl
+++ b/src/fabric/src/fabric_db_info.erl
@@ -138,34 +138,34 @@ get_cluster_info(Shards) ->
{ok, [{q, Q}, {n, N}, {w, WR}, {r, WR}]}.
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-get_cluster_info_test_() ->
- {
- setup,
- fun setup/0,
- fun get_cluster_info_test_generator/1
- }.
-
-
-setup() ->
- Quorums = [1, 2, 3],
- Shards = [1, 3, 5, 8, 12, 24],
- [{N, Q} || N <- Quorums, Q <- Shards].
-
-get_cluster_info_test_generator([]) ->
- [];
-get_cluster_info_test_generator([{N, Q} | Rest]) ->
- {generator,
- fun() ->
- Nodes = lists:seq(1, 8),
- Shards = mem3_util:create_partition_map(<<"foo">>, N, Q, Nodes),
- {ok, Info} = get_cluster_info(Shards),
- [
- ?_assertEqual(N, couch_util:get_value(n, Info)),
- ?_assertEqual(Q, couch_util:get_value(q, Info))
- ] ++ get_cluster_info_test_generator(Rest)
- end}.
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% get_cluster_info_test_() ->
+%% {
+%% setup,
+%% fun setup/0,
+%% fun get_cluster_info_test_generator/1
+%% }.
+%%
+%%
+%% setup() ->
+%% Quorums = [1, 2, 3],
+%% Shards = [1, 3, 5, 8, 12, 24],
+%% [{N, Q} || N <- Quorums, Q <- Shards].
+%%
+%% get_cluster_info_test_generator([]) ->
+%% [];
+%% get_cluster_info_test_generator([{N, Q} | Rest]) ->
+%% {generator,
+%% fun() ->
+%% Nodes = lists:seq(1, 8),
+%% Shards = mem3_util:create_partition_map(<<"foo">>, N, Q, Nodes),
+%% {ok, Info} = get_cluster_info(Shards),
+%% [
+%% ?_assertEqual(N, couch_util:get_value(n, Info)),
+%% ?_assertEqual(Q, couch_util:get_value(q, Info))
+%% ] ++ get_cluster_info_test_generator(Rest)
+%% end}.
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_doc_open.erl b/src/fabric/src/fabric_doc_open.erl
index 8ef604b60..fe3a79a1f 100644
--- a/src/fabric/src/fabric_doc_open.erl
+++ b/src/fabric/src/fabric_doc_open.erl
@@ -182,429 +182,429 @@ format_reply(Else, _) ->
Else.
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
--define(MECK_MODS, [
- couch_log,
- couch_stats,
- fabric,
- fabric_util,
- mem3,
- rexi,
- rexi_monitor
-]).
-
-
-setup_all() ->
- meck:new(?MECK_MODS, [passthrough]).
-
-
-teardown_all(_) ->
- meck:unload().
-
-
-setup() ->
- meck:reset(?MECK_MODS).
-
-
-teardown(_) ->
- ok.
-
-
-open_doc_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- t_is_r_met(),
- t_handle_message_down(),
- t_handle_message_exit(),
- t_handle_message_reply(),
- t_store_node_revs(),
- t_read_repair(),
- t_handle_response_quorum_met(),
- t_get_doc_info()
- ]
- }
- }.
-
-
-t_is_r_met() ->
- ?_test(begin
- Workers0 = [],
- Workers1 = [nil],
- Workers2 = [nil, nil],
-
- SuccessCases = [
- {{true, foo}, [fabric_util:kv(foo, 2)], 2},
- {{true, foo}, [fabric_util:kv(foo, 3)], 2},
- {{true, foo}, [fabric_util:kv(foo, 1)], 1},
- {{true, foo}, [fabric_util:kv(foo, 2), fabric_util:kv(bar, 1)], 2},
- {{true, bar}, [fabric_util:kv(bar, 1), fabric_util:kv(bar, 2)], 2},
- {{true, bar}, [fabric_util:kv(bar, 2), fabric_util:kv(foo, 1)], 2}
- ],
- lists:foreach(fun({Expect, Replies, Q}) ->
- ?assertEqual(Expect, is_r_met(Workers0, Replies, Q))
- end, SuccessCases),
-
- WaitForMoreCases = [
- {[fabric_util:kv(foo, 1)], 2},
- {[fabric_util:kv(foo, 2)], 3},
- {[fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2}
- ],
- lists:foreach(fun({Replies, Q}) ->
- ?assertEqual(wait_for_more, is_r_met(Workers2, Replies, Q))
- end, WaitForMoreCases),
-
- FailureCases = [
- {Workers0, [fabric_util:kv(foo, 1)], 2},
- {Workers1, [fabric_util:kv(foo, 1)], 2},
- {Workers1, [fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2},
- {Workers1, [fabric_util:kv(foo, 2)], 3}
- ],
- lists:foreach(fun({Workers, Replies, Q}) ->
- ?assertEqual(no_more_workers, is_r_met(Workers, Replies, Q))
- end, FailureCases)
- end).
-
-
-t_handle_message_down() ->
- Node0 = 'foo@localhost',
- Node1 = 'bar@localhost',
- Down0 = {rexi_DOWN, nil, {nil, Node0}, nil},
- Down1 = {rexi_DOWN, nil, {nil, Node1}, nil},
- Workers0 = [#shard{node=Node0} || _ <- [a, b]],
- Worker1 = #shard{node=Node1},
- Workers1 = Workers0 ++ [Worker1],
-
- ?_test(begin
- % Stop when no more workers are left
- ?assertEqual(
- {stop, #acc{workers=[]}},
- handle_message(Down0, nil, #acc{workers=Workers0})
- ),
-
- % Continue when we have more workers
- ?assertEqual(
- {ok, #acc{workers=[Worker1]}},
- handle_message(Down0, nil, #acc{workers=Workers1})
- ),
-
- % A second DOWN removes the remaining workers
- ?assertEqual(
- {stop, #acc{workers=[]}},
- handle_message(Down1, nil, #acc{workers=[Worker1]})
- )
- end).
-
-
-t_handle_message_exit() ->
- Exit = {rexi_EXIT, nil},
- Worker0 = #shard{ref=erlang:make_ref()},
- Worker1 = #shard{ref=erlang:make_ref()},
-
- ?_test(begin
- % Only removes the specified worker
- ?assertEqual(
- {ok, #acc{workers=[Worker1]}},
- handle_message(Exit, Worker0, #acc{workers=[Worker0, Worker1]})
- ),
-
- ?assertEqual(
- {ok, #acc{workers=[Worker0]}},
- handle_message(Exit, Worker1, #acc{workers=[Worker0, Worker1]})
- ),
-
- % We bail if it was the last worker
- ?assertEqual(
- {stop, #acc{workers=[]}},
- handle_message(Exit, Worker0, #acc{workers=[Worker0]})
- )
- end).
-
-
-t_handle_message_reply() ->
- Worker0 = #shard{ref=erlang:make_ref()},
- Worker1 = #shard{ref=erlang:make_ref()},
- Worker2 = #shard{ref=erlang:make_ref()},
- Workers = [Worker0, Worker1, Worker2],
- Acc0 = #acc{workers=Workers, r=2, replies=[]},
-
- ?_test(begin
- meck:expect(rexi, kill_all, fun(_) -> ok end),
-
- % Test that we continue when we haven't met R yet
- ?assertMatch(
- {ok, #acc{
- workers=[Worker0, Worker1],
- replies=[{foo, {foo, 1}}]
- }},
- handle_message(foo, Worker2, Acc0)
- ),
-
- ?assertMatch(
- {ok, #acc{
- workers=[Worker0, Worker1],
- replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
- }},
- handle_message(bar, Worker2, Acc0#acc{
- replies=[{foo, {foo, 1}}]
- })
- ),
-
- % Test that we don't get a quorum when R isn't met. q_reply
- % isn't set and state remains unchanged and {stop, NewAcc}
- % is returned. Bit subtle on the assertions here.
-
- ?assertMatch(
- {stop, #acc{workers=[], replies=[{foo, {foo, 1}}]}},
- handle_message(foo, Worker0, Acc0#acc{workers=[Worker0]})
- ),
-
- ?assertMatch(
- {stop, #acc{
- workers=[],
- replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
- }},
- handle_message(bar, Worker0, Acc0#acc{
- workers=[Worker0],
- replies=[{foo, {foo, 1}}]
- })
- ),
-
- % Check that when R is met we stop with a new state and
- % a q_reply.
-
- ?assertMatch(
- {stop, #acc{
- workers=[],
- replies=[{foo, {foo, 2}}],
- state=r_met,
- q_reply=foo
- }},
- handle_message(foo, Worker1, Acc0#acc{
- workers=[Worker0, Worker1],
- replies=[{foo, {foo, 1}}]
- })
- ),
-
- ?assertEqual(
- {stop, #acc{
- workers=[],
- r=1,
- replies=[{foo, {foo, 1}}],
- state=r_met,
- q_reply=foo
- }},
- handle_message(foo, Worker0, Acc0#acc{r=1})
- ),
-
- ?assertMatch(
- {stop, #acc{
- workers=[],
- replies=[{bar, {bar, 1}}, {foo, {foo, 2}}],
- state=r_met,
- q_reply=foo
- }},
- handle_message(foo, Worker0, Acc0#acc{
- workers=[Worker0],
- replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
- })
- )
- end).
-
-
-t_store_node_revs() ->
- W1 = #shard{node = w1, ref = erlang:make_ref()},
- W2 = #shard{node = w2, ref = erlang:make_ref()},
- W3 = #shard{node = w3, ref = erlang:make_ref()},
- Foo1 = {ok, #doc{id = <<"bar">>, revs = {1, [<<"foo">>]}}},
- Foo2 = {ok, #doc{id = <<"bar">>, revs = {2, [<<"foo2">>, <<"foo">>]}}},
- NFM = {not_found, missing},
-
- InitAcc = #acc{workers = [W1, W2, W3], replies = [], r = 2},
-
- ?_test(begin
- meck:expect(rexi, kill_all, fun(_) -> ok end),
-
- % Simple case
- {ok, #acc{node_revs = NodeRevs1}} = handle_message(Foo1, W1, InitAcc),
- ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs1),
-
- % Make sure we only hold the head rev
- {ok, #acc{node_revs = NodeRevs2}} = handle_message(Foo2, W1, InitAcc),
- ?assertEqual([{w1, [{2, <<"foo2">>}]}], NodeRevs2),
-
- % Make sure we don't capture anything on error
- {ok, #acc{node_revs = NodeRevs3}} = handle_message(NFM, W1, InitAcc),
- ?assertEqual([], NodeRevs3),
-
- % Make sure we accumulate node revs
- Acc1 = InitAcc#acc{node_revs = [{w1, [{1, <<"foo">>}]}]},
- {ok, #acc{node_revs = NodeRevs4}} = handle_message(Foo2, W2, Acc1),
- ?assertEqual(
- [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
- NodeRevs4
- ),
-
- % Make sure rexi_DOWN doesn't modify node_revs
- Down = {rexi_DOWN, nil, {nil, w1}, nil},
- {ok, #acc{node_revs = NodeRevs5}} = handle_message(Down, W2, Acc1),
- ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs5),
-
- % Make sure rexi_EXIT doesn't modify node_revs
- Exit = {rexi_EXIT, reason},
- {ok, #acc{node_revs = NodeRevs6}} = handle_message(Exit, W2, Acc1),
- ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs6),
-
- % Make sure an error doesn't remove any node revs
- {ok, #acc{node_revs = NodeRevs7}} = handle_message(NFM, W2, Acc1),
- ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs7),
-
- % Make sure we have all of our node_revs when meeting
- % quorum
- {ok, Acc2} = handle_message(Foo1, W1, InitAcc),
- {ok, Acc3} = handle_message(Foo2, W2, Acc2),
- {stop, Acc4} = handle_message(NFM, W3, Acc3),
- ?assertEqual(
- [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
- Acc4#acc.node_revs
- )
- end).
-
-
-t_read_repair() ->
- Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
- Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
- NFM = {not_found, missing},
-
- ?_test(begin
- meck:expect(couch_log, notice, fun(_, _) -> ok end),
- meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-
- % Test when we have actual doc data to repair
- meck:expect(fabric, update_docs, fun(_, [_], _) -> {ok, []} end),
- Acc0 = #acc{
- dbname = <<"name">>,
- replies = [fabric_util:kv(Foo1,1)]
- },
- ?assertEqual(Foo1, read_repair(Acc0)),
-
- meck:expect(fabric, update_docs, fun(_, [_, _], _) -> {ok, []} end),
- Acc1 = #acc{
- dbname = <<"name">>,
- replies = [fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,1)]
- },
- ?assertEqual(Foo2, read_repair(Acc1)),
-
- % Test when we have nothing but errors
- Acc2 = #acc{replies=[fabric_util:kv(NFM, 1)]},
- ?assertEqual(NFM, read_repair(Acc2)),
-
- Acc3 = #acc{replies=[fabric_util:kv(NFM,1), fabric_util:kv(foo,2)]},
- ?assertEqual(NFM, read_repair(Acc3)),
-
- Acc4 = #acc{replies=[fabric_util:kv(foo,1), fabric_util:kv(bar,1)]},
- ?assertEqual(bar, read_repair(Acc4))
- end).
-
-
-t_handle_response_quorum_met() ->
- Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
- Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
- Bar1 = {ok, #doc{revs = {1,[<<"bar">>]}}},
-
- ?_test(begin
- meck:expect(couch_log, notice, fun(_, _) -> ok end),
- meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
- meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-
- BasicOkAcc = #acc{
- state=r_met,
- replies=[fabric_util:kv(Foo1,2)],
- q_reply=Foo1
- },
- ?assertEqual(Foo1, handle_response(BasicOkAcc)),
-
- WithAncestorsAcc = #acc{
- state=r_met,
- replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,2)],
- q_reply=Foo2
- },
- ?assertEqual(Foo2, handle_response(WithAncestorsAcc)),
-
- % This also checks when the quorum isn't the most recent
- % revision.
- DeeperWinsAcc = #acc{
- state=r_met,
- replies=[fabric_util:kv(Foo1,2), fabric_util:kv(Foo2,1)],
- q_reply=Foo1
- },
- ?assertEqual(Foo2, handle_response(DeeperWinsAcc)),
-
- % Check that we return the proper doc based on rev
- % (ie, pos is equal)
- BiggerRevWinsAcc = #acc{
- state=r_met,
- replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Bar1,2)],
- q_reply=Bar1
- },
- ?assertEqual(Foo1, handle_response(BiggerRevWinsAcc))
-
- % r_not_met is a proxy to read_repair so we rely on
- % read_repair_test for those conditions.
- end).
-
-
-t_get_doc_info() ->
- ?_test(begin
- meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
- meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
- meck:expect(fabric_util, submit_jobs, fun(_, _, _) -> ok end),
- meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
- meck:expect(rexi_monitor, stop, fun(_) -> ok end),
- meck:expect(mem3, shards, fun(_, _) -> ok end),
- meck:expect(mem3, n, fun(_) -> 3 end),
- meck:expect(mem3, quorum, fun(_) -> 2 end),
-
- meck:expect(fabric_util, recv, fun(_, _, _, _) ->
- {ok, #acc{state = r_not_met}}
- end),
- Rsp1 = fabric_doc_open:go("test", "one", [doc_info]),
- ?assertEqual({error, quorum_not_met}, Rsp1),
-
- Rsp2 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
- ?assertEqual({error, quorum_not_met}, Rsp2),
-
- meck:expect(fabric_util, recv, fun(_, _, _, _) ->
- {ok, #acc{state = r_met, q_reply = not_found}}
- end),
- MissingRsp1 = fabric_doc_open:go("test", "one", [doc_info]),
- ?assertEqual({not_found, missing}, MissingRsp1),
- MissingRsp2 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
- ?assertEqual({not_found, missing}, MissingRsp2),
-
- meck:expect(fabric_util, recv, fun(_, _, _, _) ->
- A = #doc_info{},
- {ok, #acc{state = r_met, q_reply = {ok, A}}}
- end),
- {ok, Rec1} = fabric_doc_open:go("test", "one", [doc_info]),
- ?assert(is_record(Rec1, doc_info)),
-
- meck:expect(fabric_util, recv, fun(_, _, _, _) ->
- A = #full_doc_info{deleted = true},
- {ok, #acc{state = r_met, q_reply = {ok, A}}}
- end),
- Rsp3 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
- ?assertEqual({not_found, deleted}, Rsp3),
- {ok, Rec2} = fabric_doc_open:go("test", "one", [{doc_info, full},deleted]),
- ?assert(is_record(Rec2, full_doc_info))
- end).
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% -define(MECK_MODS, [
+%% couch_log,
+%% couch_stats,
+%% fabric,
+%% fabric_util,
+%% mem3,
+%% rexi,
+%% rexi_monitor
+%% ]).
+%%
+%%
+%% setup_all() ->
+%% meck:new(?MECK_MODS, [passthrough]).
+%%
+%%
+%% teardown_all(_) ->
+%% meck:unload().
+%%
+%%
+%% setup() ->
+%% meck:reset(?MECK_MODS).
+%%
+%%
+%% teardown(_) ->
+%% ok.
+%%
+%%
+%% open_doc_test_() ->
+%% {
+%% setup,
+%% fun setup_all/0,
+%% fun teardown_all/1,
+%% {
+%% foreach,
+%% fun setup/0,
+%% fun teardown/1,
+%% [
+%% t_is_r_met(),
+%% t_handle_message_down(),
+%% t_handle_message_exit(),
+%% t_handle_message_reply(),
+%% t_store_node_revs(),
+%% t_read_repair(),
+%% t_handle_response_quorum_met(),
+%% t_get_doc_info()
+%% ]
+%% }
+%% }.
+%%
+%%
+%% t_is_r_met() ->
+%% ?_test(begin
+%% Workers0 = [],
+%% Workers1 = [nil],
+%% Workers2 = [nil, nil],
+%%
+%% SuccessCases = [
+%% {{true, foo}, [fabric_util:kv(foo, 2)], 2},
+%% {{true, foo}, [fabric_util:kv(foo, 3)], 2},
+%% {{true, foo}, [fabric_util:kv(foo, 1)], 1},
+%% {{true, foo}, [fabric_util:kv(foo, 2), fabric_util:kv(bar, 1)], 2},
+%% {{true, bar}, [fabric_util:kv(bar, 1), fabric_util:kv(bar, 2)], 2},
+%% {{true, bar}, [fabric_util:kv(bar, 2), fabric_util:kv(foo, 1)], 2}
+%% ],
+%% lists:foreach(fun({Expect, Replies, Q}) ->
+%% ?assertEqual(Expect, is_r_met(Workers0, Replies, Q))
+%% end, SuccessCases),
+%%
+%% WaitForMoreCases = [
+%% {[fabric_util:kv(foo, 1)], 2},
+%% {[fabric_util:kv(foo, 2)], 3},
+%% {[fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2}
+%% ],
+%% lists:foreach(fun({Replies, Q}) ->
+%% ?assertEqual(wait_for_more, is_r_met(Workers2, Replies, Q))
+%% end, WaitForMoreCases),
+%%
+%% FailureCases = [
+%% {Workers0, [fabric_util:kv(foo, 1)], 2},
+%% {Workers1, [fabric_util:kv(foo, 1)], 2},
+%% {Workers1, [fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2},
+%% {Workers1, [fabric_util:kv(foo, 2)], 3}
+%% ],
+%% lists:foreach(fun({Workers, Replies, Q}) ->
+%% ?assertEqual(no_more_workers, is_r_met(Workers, Replies, Q))
+%% end, FailureCases)
+%% end).
+%%
+%%
+%% t_handle_message_down() ->
+%% Node0 = 'foo@localhost',
+%% Node1 = 'bar@localhost',
+%% Down0 = {rexi_DOWN, nil, {nil, Node0}, nil},
+%% Down1 = {rexi_DOWN, nil, {nil, Node1}, nil},
+%% Workers0 = [#shard{node=Node0} || _ <- [a, b]],
+%% Worker1 = #shard{node=Node1},
+%% Workers1 = Workers0 ++ [Worker1],
+%%
+%% ?_test(begin
+%% % Stop when no more workers are left
+%% ?assertEqual(
+%% {stop, #acc{workers=[]}},
+%% handle_message(Down0, nil, #acc{workers=Workers0})
+%% ),
+%%
+%% % Continue when we have more workers
+%% ?assertEqual(
+%% {ok, #acc{workers=[Worker1]}},
+%% handle_message(Down0, nil, #acc{workers=Workers1})
+%% ),
+%%
+%% % A second DOWN removes the remaining workers
+%% ?assertEqual(
+%% {stop, #acc{workers=[]}},
+%% handle_message(Down1, nil, #acc{workers=[Worker1]})
+%% )
+%% end).
+%%
+%%
+%% t_handle_message_exit() ->
+%% Exit = {rexi_EXIT, nil},
+%% Worker0 = #shard{ref=erlang:make_ref()},
+%% Worker1 = #shard{ref=erlang:make_ref()},
+%%
+%% ?_test(begin
+%% % Only removes the specified worker
+%% ?assertEqual(
+%% {ok, #acc{workers=[Worker1]}},
+%% handle_message(Exit, Worker0, #acc{workers=[Worker0, Worker1]})
+%% ),
+%%
+%% ?assertEqual(
+%% {ok, #acc{workers=[Worker0]}},
+%% handle_message(Exit, Worker1, #acc{workers=[Worker0, Worker1]})
+%% ),
+%%
+%% % We bail if it was the last worker
+%% ?assertEqual(
+%% {stop, #acc{workers=[]}},
+%% handle_message(Exit, Worker0, #acc{workers=[Worker0]})
+%% )
+%% end).
+%%
+%%
+%% t_handle_message_reply() ->
+%% Worker0 = #shard{ref=erlang:make_ref()},
+%% Worker1 = #shard{ref=erlang:make_ref()},
+%% Worker2 = #shard{ref=erlang:make_ref()},
+%% Workers = [Worker0, Worker1, Worker2],
+%% Acc0 = #acc{workers=Workers, r=2, replies=[]},
+%%
+%% ?_test(begin
+%% meck:expect(rexi, kill_all, fun(_) -> ok end),
+%%
+%% % Test that we continue when we haven't met R yet
+%% ?assertMatch(
+%% {ok, #acc{
+%% workers=[Worker0, Worker1],
+%% replies=[{foo, {foo, 1}}]
+%% }},
+%% handle_message(foo, Worker2, Acc0)
+%% ),
+%%
+%% ?assertMatch(
+%% {ok, #acc{
+%% workers=[Worker0, Worker1],
+%% replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
+%% }},
+%% handle_message(bar, Worker2, Acc0#acc{
+%% replies=[{foo, {foo, 1}}]
+%% })
+%% ),
+%%
+%% % Test that we don't get a quorum when R isn't met. q_reply
+%% % isn't set and state remains unchanged and {stop, NewAcc}
+%% % is returned. Bit subtle on the assertions here.
+%%
+%% ?assertMatch(
+%% {stop, #acc{workers=[], replies=[{foo, {foo, 1}}]}},
+%% handle_message(foo, Worker0, Acc0#acc{workers=[Worker0]})
+%% ),
+%%
+%% ?assertMatch(
+%% {stop, #acc{
+%% workers=[],
+%% replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
+%% }},
+%% handle_message(bar, Worker0, Acc0#acc{
+%% workers=[Worker0],
+%% replies=[{foo, {foo, 1}}]
+%% })
+%% ),
+%%
+%% % Check that when R is met we stop with a new state and
+%% % a q_reply.
+%%
+%% ?assertMatch(
+%% {stop, #acc{
+%% workers=[],
+%% replies=[{foo, {foo, 2}}],
+%% state=r_met,
+%% q_reply=foo
+%% }},
+%% handle_message(foo, Worker1, Acc0#acc{
+%% workers=[Worker0, Worker1],
+%% replies=[{foo, {foo, 1}}]
+%% })
+%% ),
+%%
+%% ?assertEqual(
+%% {stop, #acc{
+%% workers=[],
+%% r=1,
+%% replies=[{foo, {foo, 1}}],
+%% state=r_met,
+%% q_reply=foo
+%% }},
+%% handle_message(foo, Worker0, Acc0#acc{r=1})
+%% ),
+%%
+%% ?assertMatch(
+%% {stop, #acc{
+%% workers=[],
+%% replies=[{bar, {bar, 1}}, {foo, {foo, 2}}],
+%% state=r_met,
+%% q_reply=foo
+%% }},
+%% handle_message(foo, Worker0, Acc0#acc{
+%% workers=[Worker0],
+%% replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
+%% })
+%% )
+%% end).
+%%
+%%
+%% t_store_node_revs() ->
+%% W1 = #shard{node = w1, ref = erlang:make_ref()},
+%% W2 = #shard{node = w2, ref = erlang:make_ref()},
+%% W3 = #shard{node = w3, ref = erlang:make_ref()},
+%% Foo1 = {ok, #doc{id = <<"bar">>, revs = {1, [<<"foo">>]}}},
+%% Foo2 = {ok, #doc{id = <<"bar">>, revs = {2, [<<"foo2">>, <<"foo">>]}}},
+%% NFM = {not_found, missing},
+%%
+%% InitAcc = #acc{workers = [W1, W2, W3], replies = [], r = 2},
+%%
+%% ?_test(begin
+%% meck:expect(rexi, kill_all, fun(_) -> ok end),
+%%
+%% % Simple case
+%% {ok, #acc{node_revs = NodeRevs1}} = handle_message(Foo1, W1, InitAcc),
+%% ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs1),
+%%
+%% % Make sure we only hold the head rev
+%% {ok, #acc{node_revs = NodeRevs2}} = handle_message(Foo2, W1, InitAcc),
+%% ?assertEqual([{w1, [{2, <<"foo2">>}]}], NodeRevs2),
+%%
+%% % Make sure we don't capture anything on error
+%% {ok, #acc{node_revs = NodeRevs3}} = handle_message(NFM, W1, InitAcc),
+%% ?assertEqual([], NodeRevs3),
+%%
+%% % Make sure we accumulate node revs
+%% Acc1 = InitAcc#acc{node_revs = [{w1, [{1, <<"foo">>}]}]},
+%% {ok, #acc{node_revs = NodeRevs4}} = handle_message(Foo2, W2, Acc1),
+%% ?assertEqual(
+%% [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
+%% NodeRevs4
+%% ),
+%%
+%% % Make sure rexi_DOWN doesn't modify node_revs
+%% Down = {rexi_DOWN, nil, {nil, w1}, nil},
+%% {ok, #acc{node_revs = NodeRevs5}} = handle_message(Down, W2, Acc1),
+%% ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs5),
+%%
+%% % Make sure rexi_EXIT doesn't modify node_revs
+%% Exit = {rexi_EXIT, reason},
+%% {ok, #acc{node_revs = NodeRevs6}} = handle_message(Exit, W2, Acc1),
+%% ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs6),
+%%
+%% % Make sure an error doesn't remove any node revs
+%% {ok, #acc{node_revs = NodeRevs7}} = handle_message(NFM, W2, Acc1),
+%% ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs7),
+%%
+%% % Make sure we have all of our node_revs when meeting
+%% % quorum
+%% {ok, Acc2} = handle_message(Foo1, W1, InitAcc),
+%% {ok, Acc3} = handle_message(Foo2, W2, Acc2),
+%% {stop, Acc4} = handle_message(NFM, W3, Acc3),
+%% ?assertEqual(
+%% [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
+%% Acc4#acc.node_revs
+%% )
+%% end).
+%%
+%%
+%% t_read_repair() ->
+%% Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
+%% Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
+%% NFM = {not_found, missing},
+%%
+%% ?_test(begin
+%% meck:expect(couch_log, notice, fun(_, _) -> ok end),
+%% meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
+%%
+%% % Test when we have actual doc data to repair
+%% meck:expect(fabric, update_docs, fun(_, [_], _) -> {ok, []} end),
+%% Acc0 = #acc{
+%% dbname = <<"name">>,
+%% replies = [fabric_util:kv(Foo1,1)]
+%% },
+%% ?assertEqual(Foo1, read_repair(Acc0)),
+%%
+%% meck:expect(fabric, update_docs, fun(_, [_, _], _) -> {ok, []} end),
+%% Acc1 = #acc{
+%% dbname = <<"name">>,
+%% replies = [fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,1)]
+%% },
+%% ?assertEqual(Foo2, read_repair(Acc1)),
+%%
+%% % Test when we have nothing but errors
+%% Acc2 = #acc{replies=[fabric_util:kv(NFM, 1)]},
+%% ?assertEqual(NFM, read_repair(Acc2)),
+%%
+%% Acc3 = #acc{replies=[fabric_util:kv(NFM,1), fabric_util:kv(foo,2)]},
+%% ?assertEqual(NFM, read_repair(Acc3)),
+%%
+%% Acc4 = #acc{replies=[fabric_util:kv(foo,1), fabric_util:kv(bar,1)]},
+%% ?assertEqual(bar, read_repair(Acc4))
+%% end).
+%%
+%%
+%% t_handle_response_quorum_met() ->
+%% Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
+%% Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
+%% Bar1 = {ok, #doc{revs = {1,[<<"bar">>]}}},
+%%
+%% ?_test(begin
+%% meck:expect(couch_log, notice, fun(_, _) -> ok end),
+%% meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
+%% meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
+%%
+%% BasicOkAcc = #acc{
+%% state=r_met,
+%% replies=[fabric_util:kv(Foo1,2)],
+%% q_reply=Foo1
+%% },
+%% ?assertEqual(Foo1, handle_response(BasicOkAcc)),
+%%
+%% WithAncestorsAcc = #acc{
+%% state=r_met,
+%% replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,2)],
+%% q_reply=Foo2
+%% },
+%% ?assertEqual(Foo2, handle_response(WithAncestorsAcc)),
+%%
+%% % This also checks when the quorum isn't the most recent
+%% % revision.
+%% DeeperWinsAcc = #acc{
+%% state=r_met,
+%% replies=[fabric_util:kv(Foo1,2), fabric_util:kv(Foo2,1)],
+%% q_reply=Foo1
+%% },
+%% ?assertEqual(Foo2, handle_response(DeeperWinsAcc)),
+%%
+%% % Check that we return the proper doc based on rev
+%% % (ie, pos is equal)
+%% BiggerRevWinsAcc = #acc{
+%% state=r_met,
+%% replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Bar1,2)],
+%% q_reply=Bar1
+%% },
+%% ?assertEqual(Foo1, handle_response(BiggerRevWinsAcc))
+%%
+%% % r_not_met is a proxy to read_repair so we rely on
+%% % read_repair_test for those conditions.
+%% end).
+%%
+%%
+%% t_get_doc_info() ->
+%% ?_test(begin
+%% meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
+%% meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
+%% meck:expect(fabric_util, submit_jobs, fun(_, _, _) -> ok end),
+%% meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
+%% meck:expect(rexi_monitor, stop, fun(_) -> ok end),
+%% meck:expect(mem3, shards, fun(_, _) -> ok end),
+%% meck:expect(mem3, n, fun(_) -> 3 end),
+%% meck:expect(mem3, quorum, fun(_) -> 2 end),
+%%
+%% meck:expect(fabric_util, recv, fun(_, _, _, _) ->
+%% {ok, #acc{state = r_not_met}}
+%% end),
+%% Rsp1 = fabric_doc_open:go("test", "one", [doc_info]),
+%% ?assertEqual({error, quorum_not_met}, Rsp1),
+%%
+%% Rsp2 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
+%% ?assertEqual({error, quorum_not_met}, Rsp2),
+%%
+%% meck:expect(fabric_util, recv, fun(_, _, _, _) ->
+%% {ok, #acc{state = r_met, q_reply = not_found}}
+%% end),
+%% MissingRsp1 = fabric_doc_open:go("test", "one", [doc_info]),
+%% ?assertEqual({not_found, missing}, MissingRsp1),
+%% MissingRsp2 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
+%% ?assertEqual({not_found, missing}, MissingRsp2),
+%%
+%% meck:expect(fabric_util, recv, fun(_, _, _, _) ->
+%% A = #doc_info{},
+%% {ok, #acc{state = r_met, q_reply = {ok, A}}}
+%% end),
+%% {ok, Rec1} = fabric_doc_open:go("test", "one", [doc_info]),
+%% ?assert(is_record(Rec1, doc_info)),
+%%
+%% meck:expect(fabric_util, recv, fun(_, _, _, _) ->
+%% A = #full_doc_info{deleted = true},
+%% {ok, #acc{state = r_met, q_reply = {ok, A}}}
+%% end),
+%% Rsp3 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
+%% ?assertEqual({not_found, deleted}, Rsp3),
+%% {ok, Rec2} = fabric_doc_open:go("test", "one", [{doc_info, full},deleted]),
+%% ?assert(is_record(Rec2, full_doc_info))
+%% end).
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_doc_open_revs.erl b/src/fabric/src/fabric_doc_open_revs.erl
index 3d7b9dc3c..aa7f53e9b 100644
--- a/src/fabric/src/fabric_doc_open_revs.erl
+++ b/src/fabric/src/fabric_doc_open_revs.erl
@@ -313,487 +313,487 @@ collapse_duplicate_revs_int([Reply | Rest]) ->
[Reply | collapse_duplicate_revs(Rest)].
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-
-setup_all() ->
- config:start_link([]),
- meck:new([fabric, couch_stats, couch_log]),
- meck:new(fabric_util, [passthrough]),
- meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, nil} end),
- meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
- meck:expect(couch_log, notice, fun(_, _) -> ok end),
- meck:expect(fabric_util, cleanup, fun(_) -> ok end).
-
-
-
-teardown_all(_) ->
- meck:unload(),
- config:stop().
-
-
-setup() ->
- meck:reset([
- couch_log,
- couch_stats,
- fabric,
- fabric_util
- ]).
-
-
-teardown(_) ->
- ok.
-
-
-state0(Revs, Latest) ->
- #state{
- worker_count = 3,
- workers =
- [#shard{node='node1'}, #shard{node='node2'}, #shard{node='node3'}],
- r = 2,
- revs = Revs,
- latest = Latest
- }.
-
-
-revs() -> [{1,<<"foo">>}, {1,<<"bar">>}, {1,<<"baz">>}].
-
-
-foo1() -> {ok, #doc{revs = {1, [<<"foo">>]}}}.
-foo2() -> {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}}.
-foo2stemmed() -> {ok, #doc{revs = {2, [<<"foo2">>]}}}.
-fooNF() -> {{not_found, missing}, {1,<<"foo">>}}.
-foo2NF() -> {{not_found, missing}, {2, <<"foo2">>}}.
-bar1() -> {ok, #doc{revs = {1, [<<"bar">>]}}}.
-barNF() -> {{not_found, missing}, {1,<<"bar">>}}.
-bazNF() -> {{not_found, missing}, {1,<<"baz">>}}.
-baz1() -> {ok, #doc{revs = {1, [<<"baz">>]}}}.
-
-
-
-open_doc_revs_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- {
- foreach,
- fun setup/0,
- fun teardown/1,
- [
- check_empty_response_not_quorum(),
- check_basic_response(),
- check_finish_quorum(),
- check_finish_quorum_newer(),
- check_no_quorum_on_second(),
- check_done_on_third(),
- check_specific_revs_first_msg(),
- check_revs_done_on_agreement(),
- check_latest_true(),
- check_ancestor_counted_in_quorum(),
- check_not_found_counts_for_descendant(),
- check_worker_error_skipped(),
- check_quorum_only_counts_valid_responses(),
- check_empty_list_when_no_workers_reply(),
- check_node_rev_stored(),
- check_node_rev_store_head_only(),
- check_node_rev_store_multiple(),
- check_node_rev_dont_store_errors(),
- check_node_rev_store_non_errors(),
- check_node_rev_store_concatenate(),
- check_node_rev_store_concantenate_multiple(),
- check_node_rev_unmodified_on_down_or_exit(),
- check_not_found_replies_are_removed_when_doc_found(),
- check_not_found_returned_when_one_of_docs_not_found(),
- check_not_found_returned_when_doc_not_found(),
- check_longer_rev_list_returned(),
- check_longer_rev_list_not_combined(),
- check_not_found_removed_and_longer_rev_list()
- ]
- }
- }.
-
-
-% Tests for revs=all
-
-
-check_empty_response_not_quorum() ->
- % Simple smoke test that we don't think we're
- % done with a first empty response
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- W3 = #shard{node='node3'},
- ?_assertMatch(
- {ok, #state{workers = [W2, W3]}},
- handle_message({ok, []}, W1, state0(all, false))
- ).
-
-
-check_basic_response() ->
- % Check that we've handle a response
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- W3 = #shard{node='node3'},
- ?_assertMatch(
- {ok, #state{reply_count = 1, workers = [W2, W3]}},
- handle_message({ok, [foo1(), bar1()]}, W1, state0(all, false))
- ).
-
-
-check_finish_quorum() ->
- % Two messages with the same revisions means we're done
- ?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- S0 = state0(all, false),
- {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
- Expect = {stop, [bar1(), foo1()]},
- ?assertEqual(Expect, handle_message({ok, [foo1(), bar1()]}, W2, S1))
- end).
-
-
-check_finish_quorum_newer() ->
- % We count a descendant of a revision for quorum so
- % foo1 should count for foo2 which means we're finished.
- % We also validate that read_repair was triggered.
- ?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- S0 = state0(all, false),
- {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
- Expect = {stop, [bar1(), foo2()]},
- ok = meck:reset(fabric),
- ?assertEqual(Expect, handle_message({ok, [foo2(), bar1()]}, W2, S1)),
- ok = meck:wait(fabric, update_docs, '_', 5000),
- ?assertMatch(
- [{_, {fabric, update_docs, [_, _, _]}, _}],
- meck:history(fabric)
- )
- end).
-
-
-check_no_quorum_on_second() ->
- % Quorum not yet met for the foo revision so we
- % would wait for w3
- ?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- W3 = #shard{node='node3'},
- S0 = state0(all, false),
- {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
- ?assertMatch(
- {ok, #state{workers = [W3]}},
- handle_message({ok, [bar1()]}, W2, S1)
- )
- end).
-
-
-check_done_on_third() ->
- % The third message of three means we're done no matter
- % what. Every revision seen in this pattern should be
- % included.
- ?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- W3 = #shard{node='node3'},
- S0 = state0(all, false),
- {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
- {ok, S2} = handle_message({ok, [bar1()]}, W2, S1),
- Expect = {stop, [bar1(), foo1()]},
- ?assertEqual(Expect, handle_message({ok, [bar1()]}, W3, S2))
- end).
-
-
-% Tests for a specific list of revs
-
-
-check_specific_revs_first_msg() ->
- ?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- W3 = #shard{node='node3'},
- S0 = state0(revs(), false),
- ?assertMatch(
- {ok, #state{reply_count = 1, workers = [W2, W3]}},
- handle_message({ok, [foo1(), bar1(), bazNF()]}, W1, S0)
- )
- end).
-
-
-check_revs_done_on_agreement() ->
- ?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- S0 = state0(revs(), false),
- Msg = {ok, [foo1(), bar1(), bazNF()]},
- {ok, S1} = handle_message(Msg, W1, S0),
- Expect = {stop, [bar1(), foo1(), bazNF()]},
- ?assertEqual(Expect, handle_message(Msg, W2, S1))
- end).
-
-
-check_latest_true() ->
- ?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- S0 = state0(revs(), true),
- Msg1 = {ok, [foo2(), bar1(), bazNF()]},
- Msg2 = {ok, [foo2(), bar1(), bazNF()]},
- {ok, S1} = handle_message(Msg1, W1, S0),
- Expect = {stop, [bar1(), foo2(), bazNF()]},
- ?assertEqual(Expect, handle_message(Msg2, W2, S1))
- end).
-
-
-check_ancestor_counted_in_quorum() ->
- ?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- S0 = state0(revs(), true),
- Msg1 = {ok, [foo1(), bar1(), bazNF()]},
- Msg2 = {ok, [foo2(), bar1(), bazNF()]},
- Expect = {stop, [bar1(), foo2(), bazNF()]},
-
- % Older first
- {ok, S1} = handle_message(Msg1, W1, S0),
- ?assertEqual(Expect, handle_message(Msg2, W2, S1)),
-
- % Newer first
- {ok, S2} = handle_message(Msg2, W2, S0),
- ?assertEqual(Expect, handle_message(Msg1, W1, S2))
- end).
-
-
-check_not_found_counts_for_descendant() ->
- ?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- S0 = state0(revs(), true),
- Msg1 = {ok, [foo1(), bar1(), bazNF()]},
- Msg2 = {ok, [foo1(), bar1(), baz1()]},
- Expect = {stop, [bar1(), baz1(), foo1()]},
-
- % not_found first
- {ok, S1} = handle_message(Msg1, W1, S0),
- ?assertEqual(Expect, handle_message(Msg2, W2, S1)),
-
- % not_found second
- {ok, S2} = handle_message(Msg2, W2, S0),
- ?assertEqual(Expect, handle_message(Msg1, W1, S2))
- end).
-
-
-check_worker_error_skipped() ->
- ?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- W3 = #shard{node='node3'},
- S0 = state0(revs(), true),
- Msg1 = {ok, [foo1(), bar1(), baz1()]},
- Msg2 = {rexi_EXIT, reason},
- Msg3 = {ok, [foo1(), bar1(), baz1()]},
- Expect = {stop, [bar1(), baz1(), foo1()]},
-
- {ok, S1} = handle_message(Msg1, W1, S0),
- {ok, S2} = handle_message(Msg2, W2, S1),
- ?assertEqual(Expect, handle_message(Msg3, W3, S2))
- end).
-
-
-check_quorum_only_counts_valid_responses() ->
- ?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- W3 = #shard{node='node3'},
- S0 = state0(revs(), true),
- Msg1 = {rexi_EXIT, reason},
- Msg2 = {rexi_EXIT, reason},
- Msg3 = {ok, [foo1(), bar1(), baz1()]},
- Expect = {stop, [bar1(), baz1(), foo1()]},
-
- {ok, S1} = handle_message(Msg1, W1, S0),
- {ok, S2} = handle_message(Msg2, W2, S1),
- ?assertEqual(Expect, handle_message(Msg3, W3, S2))
- end).
-
-
-check_empty_list_when_no_workers_reply() ->
- ?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- W3 = #shard{node='node3'},
- S0 = state0(revs(), true),
- Msg1 = {rexi_EXIT, reason},
- Msg2 = {rexi_EXIT, reason},
- Msg3 = {rexi_DOWN, nodedown, {nil, node()}, nil},
- Expect = {stop, all_workers_died},
-
- {ok, S1} = handle_message(Msg1, W1, S0),
- {ok, S2} = handle_message(Msg2, W2, S1),
- ?assertEqual(Expect, handle_message(Msg3, W3, S2))
- end).
-
-
-check_node_rev_stored() ->
- ?_test(begin
- W1 = #shard{node = node1},
- S0 = state0([], true),
-
- {ok, S1} = handle_message({ok, [foo1()]}, W1, S0),
- ?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
- end).
-
-
-check_node_rev_store_head_only() ->
- ?_test(begin
- W1 = #shard{node = node1},
- S0 = state0([], true),
-
- {ok, S1} = handle_message({ok, [foo2()]}, W1, S0),
- ?assertEqual([{node1, [{2, <<"foo2">>}]}], S1#state.node_revs)
- end).
-
-
-check_node_rev_store_multiple() ->
- ?_test(begin
- W1 = #shard{node = node1},
- S0 = state0([], true),
-
- {ok, S1} = handle_message({ok, [foo1(), foo2()]}, W1, S0),
- ?assertEqual(
- [{node1, [{2, <<"foo2">>}, {1, <<"foo">>}]}],
- S1#state.node_revs
- )
- end).
-
-
-check_node_rev_dont_store_errors() ->
- ?_test(begin
- W1 = #shard{node = node1},
- S0 = state0([], true),
-
- {ok, S1} = handle_message({ok, [barNF()]}, W1, S0),
- ?assertEqual([], S1#state.node_revs)
- end).
-
-
-check_node_rev_store_non_errors() ->
- ?_test(begin
- W1 = #shard{node = node1},
- S0 = state0([], true),
-
- {ok, S1} = handle_message({ok, [foo1(), barNF()]}, W1, S0),
- ?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
- end).
-
-
-check_node_rev_store_concatenate() ->
- ?_test(begin
- W2 = #shard{node = node2},
- S0 = state0([], true),
- S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
-
- {ok, S2} = handle_message({ok, [foo2()]}, W2, S1),
- ?assertEqual(
- [{node2, [{2, <<"foo2">>}]}, {node1, [{1, <<"foo">>}]}],
- S2#state.node_revs
- )
- end).
-
-
-check_node_rev_store_concantenate_multiple() ->
- ?_test(begin
- W2 = #shard{node = node2},
- S0 = state0([], true),
- S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
-
- {ok, S2} = handle_message({ok, [foo2(), bar1()]}, W2, S1),
- ?assertEqual(
- [
- {node2, [{1, <<"bar">>}, {2, <<"foo2">>}]},
- {node1, [{1, <<"foo">>}]}
- ],
- S2#state.node_revs
- )
- end).
-
-
-check_node_rev_unmodified_on_down_or_exit() ->
- ?_test(begin
- W2 = #shard{node = node2},
- S0 = state0([], true),
- S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
-
- Down = {rexi_DOWN, nodedown, {nil, node()}, nil},
- {ok, S2} = handle_message(Down, W2, S1),
- ?assertEqual(
- [{node1, [{1, <<"foo">>}]}],
- S2#state.node_revs
- ),
-
- Exit = {rexi_EXIT, reason},
- {ok, S3} = handle_message(Exit, W2, S1),
- ?assertEqual(
- [{node1, [{1, <<"foo">>}]}],
- S3#state.node_revs
- )
- end).
-
-
-check_not_found_replies_are_removed_when_doc_found() ->
- ?_test(begin
- Replies = replies_to_dict([foo1(), bar1(), fooNF()]),
- Expect = [bar1(), foo1()],
- ?assertEqual(Expect, dict_format_replies(Replies))
- end).
-
-check_not_found_returned_when_one_of_docs_not_found() ->
- ?_test(begin
- Replies = replies_to_dict([foo1(), foo2(), barNF()]),
- Expect = [foo1(), foo2(), barNF()],
- ?assertEqual(Expect, dict_format_replies(Replies))
- end).
-
-check_not_found_returned_when_doc_not_found() ->
- ?_test(begin
- Replies = replies_to_dict([fooNF(), barNF(), bazNF()]),
- Expect = [barNF(), bazNF(), fooNF()],
- ?assertEqual(Expect, dict_format_replies(Replies))
- end).
-
-check_longer_rev_list_returned() ->
- ?_test(begin
- Replies = replies_to_dict([foo2(), foo2stemmed()]),
- Expect = [foo2()],
- ?assertEqual(2, length(Replies)),
- ?assertEqual(Expect, dict_format_replies(Replies))
- end).
-
-check_longer_rev_list_not_combined() ->
- ?_test(begin
- Replies = replies_to_dict([foo2(), foo2stemmed(), bar1()]),
- Expect = [bar1(), foo2()],
- ?assertEqual(3, length(Replies)),
- ?assertEqual(Expect, dict_format_replies(Replies))
- end).
-
-check_not_found_removed_and_longer_rev_list() ->
- ?_test(begin
- Replies = replies_to_dict([foo2(), foo2stemmed(), foo2NF()]),
- Expect = [foo2()],
- ?assertEqual(3, length(Replies)),
- ?assertEqual(Expect, dict_format_replies(Replies))
- end).
-
-
-replies_to_dict(Replies) ->
- [reply_to_element(R) || R <- Replies].
-
-reply_to_element({ok, #doc{revs = Revs}} = Reply) ->
- {_, [Rev | _]} = Revs,
- {{Rev, Revs}, {Reply, 1}};
-reply_to_element(Reply) ->
- {Reply, {Reply, 1}}.
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%%
+%% setup_all() ->
+%% config:start_link([]),
+%% meck:new([fabric, couch_stats, couch_log]),
+%% meck:new(fabric_util, [passthrough]),
+%% meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, nil} end),
+%% meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
+%% meck:expect(couch_log, notice, fun(_, _) -> ok end),
+%% meck:expect(fabric_util, cleanup, fun(_) -> ok end).
+%%
+%%
+%%
+%% teardown_all(_) ->
+%% meck:unload(),
+%% config:stop().
+%%
+%%
+%% setup() ->
+%% meck:reset([
+%% couch_log,
+%% couch_stats,
+%% fabric,
+%% fabric_util
+%% ]).
+%%
+%%
+%% teardown(_) ->
+%% ok.
+%%
+%%
+%% state0(Revs, Latest) ->
+%% #state{
+%% worker_count = 3,
+%% workers =
+%% [#shard{node='node1'}, #shard{node='node2'}, #shard{node='node3'}],
+%% r = 2,
+%% revs = Revs,
+%% latest = Latest
+%% }.
+%%
+%%
+%% revs() -> [{1,<<"foo">>}, {1,<<"bar">>}, {1,<<"baz">>}].
+%%
+%%
+%% foo1() -> {ok, #doc{revs = {1, [<<"foo">>]}}}.
+%% foo2() -> {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}}.
+%% foo2stemmed() -> {ok, #doc{revs = {2, [<<"foo2">>]}}}.
+%% fooNF() -> {{not_found, missing}, {1,<<"foo">>}}.
+%% foo2NF() -> {{not_found, missing}, {2, <<"foo2">>}}.
+%% bar1() -> {ok, #doc{revs = {1, [<<"bar">>]}}}.
+%% barNF() -> {{not_found, missing}, {1,<<"bar">>}}.
+%% bazNF() -> {{not_found, missing}, {1,<<"baz">>}}.
+%% baz1() -> {ok, #doc{revs = {1, [<<"baz">>]}}}.
+%%
+%%
+%%
+%% open_doc_revs_test_() ->
+%% {
+%% setup,
+%% fun setup_all/0,
+%% fun teardown_all/1,
+%% {
+%% foreach,
+%% fun setup/0,
+%% fun teardown/1,
+%% [
+%% check_empty_response_not_quorum(),
+%% check_basic_response(),
+%% check_finish_quorum(),
+%% check_finish_quorum_newer(),
+%% check_no_quorum_on_second(),
+%% check_done_on_third(),
+%% check_specific_revs_first_msg(),
+%% check_revs_done_on_agreement(),
+%% check_latest_true(),
+%% check_ancestor_counted_in_quorum(),
+%% check_not_found_counts_for_descendant(),
+%% check_worker_error_skipped(),
+%% check_quorum_only_counts_valid_responses(),
+%% check_empty_list_when_no_workers_reply(),
+%% check_node_rev_stored(),
+%% check_node_rev_store_head_only(),
+%% check_node_rev_store_multiple(),
+%% check_node_rev_dont_store_errors(),
+%% check_node_rev_store_non_errors(),
+%% check_node_rev_store_concatenate(),
+%% check_node_rev_store_concantenate_multiple(),
+%% check_node_rev_unmodified_on_down_or_exit(),
+%% check_not_found_replies_are_removed_when_doc_found(),
+%% check_not_found_returned_when_one_of_docs_not_found(),
+%% check_not_found_returned_when_doc_not_found(),
+%% check_longer_rev_list_returned(),
+%% check_longer_rev_list_not_combined(),
+%% check_not_found_removed_and_longer_rev_list()
+%% ]
+%% }
+%% }.
+%%
+%%
+%% % Tests for revs=all
+%%
+%%
+%% check_empty_response_not_quorum() ->
+%% % Simple smoke test that we don't think we're
+%% % done with a first empty response
+%% W1 = #shard{node='node1'},
+%% W2 = #shard{node='node2'},
+%% W3 = #shard{node='node3'},
+%% ?_assertMatch(
+%% {ok, #state{workers = [W2, W3]}},
+%% handle_message({ok, []}, W1, state0(all, false))
+%% ).
+%%
+%%
+%% check_basic_response() ->
+%% % Check that we've handle a response
+%% W1 = #shard{node='node1'},
+%% W2 = #shard{node='node2'},
+%% W3 = #shard{node='node3'},
+%% ?_assertMatch(
+%% {ok, #state{reply_count = 1, workers = [W2, W3]}},
+%% handle_message({ok, [foo1(), bar1()]}, W1, state0(all, false))
+%% ).
+%%
+%%
+%% check_finish_quorum() ->
+%% % Two messages with the same revisions means we're done
+%% ?_test(begin
+%% W1 = #shard{node='node1'},
+%% W2 = #shard{node='node2'},
+%% S0 = state0(all, false),
+%% {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
+%% Expect = {stop, [bar1(), foo1()]},
+%% ?assertEqual(Expect, handle_message({ok, [foo1(), bar1()]}, W2, S1))
+%% end).
+%%
+%%
+%% check_finish_quorum_newer() ->
+%% % We count a descendant of a revision for quorum so
+%% % foo1 should count for foo2 which means we're finished.
+%% % We also validate that read_repair was triggered.
+%% ?_test(begin
+%% W1 = #shard{node='node1'},
+%% W2 = #shard{node='node2'},
+%% S0 = state0(all, false),
+%% {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
+%% Expect = {stop, [bar1(), foo2()]},
+%% ok = meck:reset(fabric),
+%% ?assertEqual(Expect, handle_message({ok, [foo2(), bar1()]}, W2, S1)),
+%% ok = meck:wait(fabric, update_docs, '_', 5000),
+%% ?assertMatch(
+%% [{_, {fabric, update_docs, [_, _, _]}, _}],
+%% meck:history(fabric)
+%% )
+%% end).
+%%
+%%
+%% check_no_quorum_on_second() ->
+%% % Quorum not yet met for the foo revision so we
+%% % would wait for w3
+%% ?_test(begin
+%% W1 = #shard{node='node1'},
+%% W2 = #shard{node='node2'},
+%% W3 = #shard{node='node3'},
+%% S0 = state0(all, false),
+%% {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
+%% ?assertMatch(
+%% {ok, #state{workers = [W3]}},
+%% handle_message({ok, [bar1()]}, W2, S1)
+%% )
+%% end).
+%%
+%%
+%% check_done_on_third() ->
+%% % The third message of three means we're done no matter
+%% % what. Every revision seen in this pattern should be
+%% % included.
+%% ?_test(begin
+%% W1 = #shard{node='node1'},
+%% W2 = #shard{node='node2'},
+%% W3 = #shard{node='node3'},
+%% S0 = state0(all, false),
+%% {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
+%% {ok, S2} = handle_message({ok, [bar1()]}, W2, S1),
+%% Expect = {stop, [bar1(), foo1()]},
+%% ?assertEqual(Expect, handle_message({ok, [bar1()]}, W3, S2))
+%% end).
+%%
+%%
+%% % Tests for a specific list of revs
+%%
+%%
+%% check_specific_revs_first_msg() ->
+%% ?_test(begin
+%% W1 = #shard{node='node1'},
+%% W2 = #shard{node='node2'},
+%% W3 = #shard{node='node3'},
+%% S0 = state0(revs(), false),
+%% ?assertMatch(
+%% {ok, #state{reply_count = 1, workers = [W2, W3]}},
+%% handle_message({ok, [foo1(), bar1(), bazNF()]}, W1, S0)
+%% )
+%% end).
+%%
+%%
+%% check_revs_done_on_agreement() ->
+%% ?_test(begin
+%% W1 = #shard{node='node1'},
+%% W2 = #shard{node='node2'},
+%% S0 = state0(revs(), false),
+%% Msg = {ok, [foo1(), bar1(), bazNF()]},
+%% {ok, S1} = handle_message(Msg, W1, S0),
+%% Expect = {stop, [bar1(), foo1(), bazNF()]},
+%% ?assertEqual(Expect, handle_message(Msg, W2, S1))
+%% end).
+%%
+%%
+%% check_latest_true() ->
+%% ?_test(begin
+%% W1 = #shard{node='node1'},
+%% W2 = #shard{node='node2'},
+%% S0 = state0(revs(), true),
+%% Msg1 = {ok, [foo2(), bar1(), bazNF()]},
+%% Msg2 = {ok, [foo2(), bar1(), bazNF()]},
+%% {ok, S1} = handle_message(Msg1, W1, S0),
+%% Expect = {stop, [bar1(), foo2(), bazNF()]},
+%% ?assertEqual(Expect, handle_message(Msg2, W2, S1))
+%% end).
+%%
+%%
+%% check_ancestor_counted_in_quorum() ->
+%% ?_test(begin
+%% W1 = #shard{node='node1'},
+%% W2 = #shard{node='node2'},
+%% S0 = state0(revs(), true),
+%% Msg1 = {ok, [foo1(), bar1(), bazNF()]},
+%% Msg2 = {ok, [foo2(), bar1(), bazNF()]},
+%% Expect = {stop, [bar1(), foo2(), bazNF()]},
+%%
+%% % Older first
+%% {ok, S1} = handle_message(Msg1, W1, S0),
+%% ?assertEqual(Expect, handle_message(Msg2, W2, S1)),
+%%
+%% % Newer first
+%% {ok, S2} = handle_message(Msg2, W2, S0),
+%% ?assertEqual(Expect, handle_message(Msg1, W1, S2))
+%% end).
+%%
+%%
+%% check_not_found_counts_for_descendant() ->
+%% ?_test(begin
+%% W1 = #shard{node='node1'},
+%% W2 = #shard{node='node2'},
+%% S0 = state0(revs(), true),
+%% Msg1 = {ok, [foo1(), bar1(), bazNF()]},
+%% Msg2 = {ok, [foo1(), bar1(), baz1()]},
+%% Expect = {stop, [bar1(), baz1(), foo1()]},
+%%
+%% % not_found first
+%% {ok, S1} = handle_message(Msg1, W1, S0),
+%% ?assertEqual(Expect, handle_message(Msg2, W2, S1)),
+%%
+%% % not_found second
+%% {ok, S2} = handle_message(Msg2, W2, S0),
+%% ?assertEqual(Expect, handle_message(Msg1, W1, S2))
+%% end).
+%%
+%%
+%% check_worker_error_skipped() ->
+%% ?_test(begin
+%% W1 = #shard{node='node1'},
+%% W2 = #shard{node='node2'},
+%% W3 = #shard{node='node3'},
+%% S0 = state0(revs(), true),
+%% Msg1 = {ok, [foo1(), bar1(), baz1()]},
+%% Msg2 = {rexi_EXIT, reason},
+%% Msg3 = {ok, [foo1(), bar1(), baz1()]},
+%% Expect = {stop, [bar1(), baz1(), foo1()]},
+%%
+%% {ok, S1} = handle_message(Msg1, W1, S0),
+%% {ok, S2} = handle_message(Msg2, W2, S1),
+%% ?assertEqual(Expect, handle_message(Msg3, W3, S2))
+%% end).
+%%
+%%
+%% check_quorum_only_counts_valid_responses() ->
+%% ?_test(begin
+%% W1 = #shard{node='node1'},
+%% W2 = #shard{node='node2'},
+%% W3 = #shard{node='node3'},
+%% S0 = state0(revs(), true),
+%% Msg1 = {rexi_EXIT, reason},
+%% Msg2 = {rexi_EXIT, reason},
+%% Msg3 = {ok, [foo1(), bar1(), baz1()]},
+%% Expect = {stop, [bar1(), baz1(), foo1()]},
+%%
+%% {ok, S1} = handle_message(Msg1, W1, S0),
+%% {ok, S2} = handle_message(Msg2, W2, S1),
+%% ?assertEqual(Expect, handle_message(Msg3, W3, S2))
+%% end).
+%%
+%%
+%% check_empty_list_when_no_workers_reply() ->
+%% ?_test(begin
+%% W1 = #shard{node='node1'},
+%% W2 = #shard{node='node2'},
+%% W3 = #shard{node='node3'},
+%% S0 = state0(revs(), true),
+%% Msg1 = {rexi_EXIT, reason},
+%% Msg2 = {rexi_EXIT, reason},
+%% Msg3 = {rexi_DOWN, nodedown, {nil, node()}, nil},
+%% Expect = {stop, all_workers_died},
+%%
+%% {ok, S1} = handle_message(Msg1, W1, S0),
+%% {ok, S2} = handle_message(Msg2, W2, S1),
+%% ?assertEqual(Expect, handle_message(Msg3, W3, S2))
+%% end).
+%%
+%%
+%% check_node_rev_stored() ->
+%% ?_test(begin
+%% W1 = #shard{node = node1},
+%% S0 = state0([], true),
+%%
+%% {ok, S1} = handle_message({ok, [foo1()]}, W1, S0),
+%% ?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
+%% end).
+%%
+%%
+%% check_node_rev_store_head_only() ->
+%% ?_test(begin
+%% W1 = #shard{node = node1},
+%% S0 = state0([], true),
+%%
+%% {ok, S1} = handle_message({ok, [foo2()]}, W1, S0),
+%% ?assertEqual([{node1, [{2, <<"foo2">>}]}], S1#state.node_revs)
+%% end).
+%%
+%%
+%% check_node_rev_store_multiple() ->
+%% ?_test(begin
+%% W1 = #shard{node = node1},
+%% S0 = state0([], true),
+%%
+%% {ok, S1} = handle_message({ok, [foo1(), foo2()]}, W1, S0),
+%% ?assertEqual(
+%% [{node1, [{2, <<"foo2">>}, {1, <<"foo">>}]}],
+%% S1#state.node_revs
+%% )
+%% end).
+%%
+%%
+%% check_node_rev_dont_store_errors() ->
+%% ?_test(begin
+%% W1 = #shard{node = node1},
+%% S0 = state0([], true),
+%%
+%% {ok, S1} = handle_message({ok, [barNF()]}, W1, S0),
+%% ?assertEqual([], S1#state.node_revs)
+%% end).
+%%
+%%
+%% check_node_rev_store_non_errors() ->
+%% ?_test(begin
+%% W1 = #shard{node = node1},
+%% S0 = state0([], true),
+%%
+%% {ok, S1} = handle_message({ok, [foo1(), barNF()]}, W1, S0),
+%% ?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
+%% end).
+%%
+%%
+%% check_node_rev_store_concatenate() ->
+%% ?_test(begin
+%% W2 = #shard{node = node2},
+%% S0 = state0([], true),
+%% S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
+%%
+%% {ok, S2} = handle_message({ok, [foo2()]}, W2, S1),
+%% ?assertEqual(
+%% [{node2, [{2, <<"foo2">>}]}, {node1, [{1, <<"foo">>}]}],
+%% S2#state.node_revs
+%% )
+%% end).
+%%
+%%
+%% check_node_rev_store_concantenate_multiple() ->
+%% ?_test(begin
+%% W2 = #shard{node = node2},
+%% S0 = state0([], true),
+%% S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
+%%
+%% {ok, S2} = handle_message({ok, [foo2(), bar1()]}, W2, S1),
+%% ?assertEqual(
+%% [
+%% {node2, [{1, <<"bar">>}, {2, <<"foo2">>}]},
+%% {node1, [{1, <<"foo">>}]}
+%% ],
+%% S2#state.node_revs
+%% )
+%% end).
+%%
+%%
+%% check_node_rev_unmodified_on_down_or_exit() ->
+%% ?_test(begin
+%% W2 = #shard{node = node2},
+%% S0 = state0([], true),
+%% S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
+%%
+%% Down = {rexi_DOWN, nodedown, {nil, node()}, nil},
+%% {ok, S2} = handle_message(Down, W2, S1),
+%% ?assertEqual(
+%% [{node1, [{1, <<"foo">>}]}],
+%% S2#state.node_revs
+%% ),
+%%
+%% Exit = {rexi_EXIT, reason},
+%% {ok, S3} = handle_message(Exit, W2, S1),
+%% ?assertEqual(
+%% [{node1, [{1, <<"foo">>}]}],
+%% S3#state.node_revs
+%% )
+%% end).
+%%
+%%
+%% check_not_found_replies_are_removed_when_doc_found() ->
+%% ?_test(begin
+%% Replies = replies_to_dict([foo1(), bar1(), fooNF()]),
+%% Expect = [bar1(), foo1()],
+%% ?assertEqual(Expect, dict_format_replies(Replies))
+%% end).
+%%
+%% check_not_found_returned_when_one_of_docs_not_found() ->
+%% ?_test(begin
+%% Replies = replies_to_dict([foo1(), foo2(), barNF()]),
+%% Expect = [foo1(), foo2(), barNF()],
+%% ?assertEqual(Expect, dict_format_replies(Replies))
+%% end).
+%%
+%% check_not_found_returned_when_doc_not_found() ->
+%% ?_test(begin
+%% Replies = replies_to_dict([fooNF(), barNF(), bazNF()]),
+%% Expect = [barNF(), bazNF(), fooNF()],
+%% ?assertEqual(Expect, dict_format_replies(Replies))
+%% end).
+%%
+%% check_longer_rev_list_returned() ->
+%% ?_test(begin
+%% Replies = replies_to_dict([foo2(), foo2stemmed()]),
+%% Expect = [foo2()],
+%% ?assertEqual(2, length(Replies)),
+%% ?assertEqual(Expect, dict_format_replies(Replies))
+%% end).
+%%
+%% check_longer_rev_list_not_combined() ->
+%% ?_test(begin
+%% Replies = replies_to_dict([foo2(), foo2stemmed(), bar1()]),
+%% Expect = [bar1(), foo2()],
+%% ?assertEqual(3, length(Replies)),
+%% ?assertEqual(Expect, dict_format_replies(Replies))
+%% end).
+%%
+%% check_not_found_removed_and_longer_rev_list() ->
+%% ?_test(begin
+%% Replies = replies_to_dict([foo2(), foo2stemmed(), foo2NF()]),
+%% Expect = [foo2()],
+%% ?assertEqual(3, length(Replies)),
+%% ?assertEqual(Expect, dict_format_replies(Replies))
+%% end).
+%%
+%%
+%% replies_to_dict(Replies) ->
+%% [reply_to_element(R) || R <- Replies].
+%%
+%% reply_to_element({ok, #doc{revs = Revs}} = Reply) ->
+%% {_, [Rev | _]} = Revs,
+%% {{Rev, Revs}, {Reply, 1}};
+%% reply_to_element(Reply) ->
+%% {Reply, {Reply, 1}}.
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_doc_purge.erl b/src/fabric/src/fabric_doc_purge.erl
index 3492f88c5..bda9039ba 100644
--- a/src/fabric/src/fabric_doc_purge.erl
+++ b/src/fabric/src/fabric_doc_purge.erl
@@ -224,348 +224,348 @@ has_quorum(Resps, Count, W) ->
end.
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-purge_test_() ->
- {
- setup,
- fun setup/0,
- fun teardown/1,
- [
- t_w2_ok(),
- t_w3_ok(),
-
- t_w2_mixed_accepted(),
- t_w3_mixed_accepted(),
-
- t_w2_exit1_ok(),
- t_w2_exit2_accepted(),
- t_w2_exit3_error(),
-
- t_w4_accepted(),
-
- t_mixed_ok_accepted(),
- t_mixed_errors()
- ]
- }.
-
-
-setup() ->
- meck:new(couch_log),
- meck:expect(couch_log, warning, fun(_, _) -> ok end),
- meck:expect(couch_log, notice, fun(_, _) -> ok end).
-
-
-teardown(_) ->
- meck:unload().
-
-
-t_w2_ok() ->
- ?_test(begin
- Acc0 = create_init_acc(2),
- Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-
- {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
- ?assertEqual(2, length(Acc1#acc.worker_uuids)),
- check_quorum(Acc1, false),
-
- {stop, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
- ?assertEqual(1, length(Acc2#acc.worker_uuids)),
- check_quorum(Acc2, true),
-
- Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
- ?assertEqual(Expect, Resps),
- ?assertEqual(ok, resp_health(Resps))
- end).
-
-
-t_w3_ok() ->
- ?_test(begin
- Acc0 = create_init_acc(3),
- Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-
- {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
- check_quorum(Acc1, false),
-
- {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
- ?assertEqual(1, length(Acc2#acc.worker_uuids)),
- check_quorum(Acc2, false),
-
- {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
- ?assertEqual(0, length(Acc3#acc.worker_uuids)),
- check_quorum(Acc3, true),
-
- Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
- ?assertEqual(Expect, Resps),
- ?assertEqual(ok, resp_health(Resps))
- end).
-
-
-t_w2_mixed_accepted() ->
- ?_test(begin
- Acc0 = create_init_acc(2),
- Msg1 = {ok, [{ok, [{1, <<"foo1">>}]}, {ok, [{2, <<"bar1">>}]}]},
- Msg2 = {ok, [{ok, [{1, <<"foo2">>}]}, {ok, [{2, <<"bar2">>}]}]},
-
- {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
- ?assertEqual(2, length(Acc1#acc.worker_uuids)),
- check_quorum(Acc1, false),
-
- {ok, Acc2} = handle_message(Msg2, worker(2, Acc0), Acc1),
- ?assertEqual(1, length(Acc2#acc.worker_uuids)),
- check_quorum(Acc2, false),
-
- {stop, Acc3} = handle_message(Msg1, worker(3, Acc0), Acc2),
- ?assertEqual(0, length(Acc3#acc.worker_uuids)),
- check_quorum(Acc3, true),
-
- Expect = [
- {accepted, [{1, <<"foo1">>}, {1, <<"foo2">>}]},
- {accepted, [{2, <<"bar1">>}, {2, <<"bar2">>}]}
- ],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
- ?assertEqual(Expect, Resps),
- ?assertEqual(accepted, resp_health(Resps))
- end).
-
-
-t_w3_mixed_accepted() ->
- ?_test(begin
- Acc0 = create_init_acc(3),
- Msg1 = {ok, [{ok, [{1, <<"foo1">>}]}, {ok, [{2, <<"bar1">>}]}]},
- Msg2 = {ok, [{ok, [{1, <<"foo2">>}]}, {ok, [{2, <<"bar2">>}]}]},
-
- {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
- ?assertEqual(2, length(Acc1#acc.worker_uuids)),
- check_quorum(Acc1, false),
-
- {ok, Acc2} = handle_message(Msg2, worker(2, Acc0), Acc1),
- ?assertEqual(1, length(Acc2#acc.worker_uuids)),
- check_quorum(Acc2, false),
-
- {stop, Acc3} = handle_message(Msg2, worker(3, Acc0), Acc2),
- ?assertEqual(0, length(Acc3#acc.worker_uuids)),
- check_quorum(Acc3, true),
-
- Expect = [
- {accepted, [{1, <<"foo1">>}, {1, <<"foo2">>}]},
- {accepted, [{2, <<"bar1">>}, {2, <<"bar2">>}]}
- ],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
- ?assertEqual(Expect, Resps),
- ?assertEqual(accepted, resp_health(Resps))
- end).
-
-
-t_w2_exit1_ok() ->
- ?_test(begin
- Acc0 = create_init_acc(2),
- Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
- ExitMsg = {rexi_EXIT, blargh},
-
- {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
- ?assertEqual(2, length(Acc1#acc.worker_uuids)),
- check_quorum(Acc1, false),
-
- {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
- ?assertEqual(1, length(Acc2#acc.worker_uuids)),
- check_quorum(Acc2, false),
-
- {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
- ?assertEqual(0, length(Acc3#acc.worker_uuids)),
- check_quorum(Acc3, true),
-
- Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
- ?assertEqual(Expect, Resps),
- ?assertEqual(ok, resp_health(Resps))
- end).
-
-
-t_w2_exit2_accepted() ->
- ?_test(begin
- Acc0 = create_init_acc(2),
- Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
- ExitMsg = {rexi_EXIT, blargh},
-
- {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
- ?assertEqual(2, length(Acc1#acc.worker_uuids)),
- check_quorum(Acc1, false),
-
- {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
- ?assertEqual(1, length(Acc2#acc.worker_uuids)),
- check_quorum(Acc2, false),
-
- {stop, Acc3} = handle_message(ExitMsg, worker(3, Acc0), Acc2),
- ?assertEqual(0, length(Acc3#acc.worker_uuids)),
- check_quorum(Acc3, true),
-
- Expect = [{accepted, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
- ?assertEqual(Expect, Resps),
- ?assertEqual(accepted, resp_health(Resps))
- end).
-
-
-t_w2_exit3_error() ->
- ?_test(begin
- Acc0 = create_init_acc(2),
- ExitMsg = {rexi_EXIT, blargh},
-
- {ok, Acc1} = handle_message(ExitMsg, worker(1, Acc0), Acc0),
- ?assertEqual(2, length(Acc1#acc.worker_uuids)),
- check_quorum(Acc1, false),
-
- {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
- ?assertEqual(1, length(Acc2#acc.worker_uuids)),
- check_quorum(Acc2, false),
-
- {stop, Acc3} = handle_message(ExitMsg, worker(3, Acc0), Acc2),
- ?assertEqual(0, length(Acc3#acc.worker_uuids)),
- check_quorum(Acc3, true),
-
- Expect = [
- {error, internal_server_error},
- {error, internal_server_error}
- ],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
- ?assertEqual(Expect, Resps),
- ?assertEqual(error, resp_health(Resps))
- end).
-
-
-t_w4_accepted() ->
- % Make sure we return when all workers have responded
- % rather than wait around for a timeout if a user asks
- % for a qourum with more than the available number of
- % shards.
- ?_test(begin
- Acc0 = create_init_acc(4),
- Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-
- {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
- ?assertEqual(2, length(Acc1#acc.worker_uuids)),
- check_quorum(Acc1, false),
-
- {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
- ?assertEqual(1, length(Acc2#acc.worker_uuids)),
- check_quorum(Acc2, false),
-
- {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
- ?assertEqual(0, length(Acc3#acc.worker_uuids)),
- check_quorum(Acc3, true),
-
- Expect = [{accepted, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
- ?assertEqual(Expect, Resps),
- ?assertEqual(accepted, resp_health(Resps))
- end).
-
-
-t_mixed_ok_accepted() ->
- ?_test(begin
- WorkerUUIDs = [
- {#shard{node = a, range = [1, 2]}, [<<"uuid1">>]},
- {#shard{node = b, range = [1, 2]}, [<<"uuid1">>]},
- {#shard{node = c, range = [1, 2]}, [<<"uuid1">>]},
-
- {#shard{node = a, range = [3, 4]}, [<<"uuid2">>]},
- {#shard{node = b, range = [3, 4]}, [<<"uuid2">>]},
- {#shard{node = c, range = [3, 4]}, [<<"uuid2">>]}
- ],
-
- Acc0 = #acc{
- worker_uuids = WorkerUUIDs,
- resps = dict:from_list([{<<"uuid1">>, []}, {<<"uuid2">>, []}]),
- uuid_counts = dict:from_list([{<<"uuid1">>, 3}, {<<"uuid2">>, 3}]),
- w = 2
- },
-
- Msg1 = {ok, [{ok, [{1, <<"foo">>}]}]},
- Msg2 = {ok, [{ok, [{2, <<"bar">>}]}]},
- ExitMsg = {rexi_EXIT, blargh},
-
- {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
- {ok, Acc2} = handle_message(Msg1, worker(2, Acc0), Acc1),
- {ok, Acc3} = handle_message(ExitMsg, worker(4, Acc0), Acc2),
- {ok, Acc4} = handle_message(ExitMsg, worker(5, Acc0), Acc3),
- {stop, Acc5} = handle_message(Msg2, worker(6, Acc0), Acc4),
-
- Expect = [{ok, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc5),
- ?assertEqual(Expect, Resps),
- ?assertEqual(accepted, resp_health(Resps))
- end).
-
-
-t_mixed_errors() ->
- ?_test(begin
- WorkerUUIDs = [
- {#shard{node = a, range = [1, 2]}, [<<"uuid1">>]},
- {#shard{node = b, range = [1, 2]}, [<<"uuid1">>]},
- {#shard{node = c, range = [1, 2]}, [<<"uuid1">>]},
-
- {#shard{node = a, range = [3, 4]}, [<<"uuid2">>]},
- {#shard{node = b, range = [3, 4]}, [<<"uuid2">>]},
- {#shard{node = c, range = [3, 4]}, [<<"uuid2">>]}
- ],
-
- Acc0 = #acc{
- worker_uuids = WorkerUUIDs,
- resps = dict:from_list([{<<"uuid1">>, []}, {<<"uuid2">>, []}]),
- uuid_counts = dict:from_list([{<<"uuid1">>, 3}, {<<"uuid2">>, 3}]),
- w = 2
- },
-
- Msg = {ok, [{ok, [{1, <<"foo">>}]}]},
- ExitMsg = {rexi_EXIT, blargh},
-
- {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
- {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
- {ok, Acc3} = handle_message(ExitMsg, worker(4, Acc0), Acc2),
- {ok, Acc4} = handle_message(ExitMsg, worker(5, Acc0), Acc3),
- {stop, Acc5} = handle_message(ExitMsg, worker(6, Acc0), Acc4),
-
- Expect = [{ok, [{1, <<"foo">>}]}, {error, internal_server_error}],
- Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc5),
- ?assertEqual(Expect, Resps),
- ?assertEqual(error, resp_health(Resps))
- end).
-
-
-create_init_acc(W) ->
- UUID1 = <<"uuid1">>,
- UUID2 = <<"uuid2">>,
-
- Nodes = [node1, node2, node3],
- Shards = mem3_util:create_partition_map(<<"foo">>, 3, 1, Nodes),
-
- % Create our worker_uuids. We're relying on the fact that
- % we're using a fake Q=1 db so we don't have to worry
- % about any hashing here.
- WorkerUUIDs = lists:map(fun(Shard) ->
- {Shard#shard{ref = erlang:make_ref()}, [UUID1, UUID2]}
- end, Shards),
-
- #acc{
- worker_uuids = WorkerUUIDs,
- resps = dict:from_list([{UUID1, []}, {UUID2, []}]),
- uuid_counts = dict:from_list([{UUID1, 3}, {UUID2, 3}]),
- w = W
- }.
-
-
-worker(N, #acc{worker_uuids = WorkerUUIDs}) ->
- {Worker, _} = lists:nth(N, WorkerUUIDs),
- Worker.
-
-
-check_quorum(Acc, Expect) ->
- dict:fold(fun(_Shard, Resps, _) ->
- ?assertEqual(Expect, has_quorum(Resps, 3, Acc#acc.w))
- end, nil, Acc#acc.resps).
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% purge_test_() ->
+%% {
+%% setup,
+%% fun setup/0,
+%% fun teardown/1,
+%% [
+%% t_w2_ok(),
+%% t_w3_ok(),
+%%
+%% t_w2_mixed_accepted(),
+%% t_w3_mixed_accepted(),
+%%
+%% t_w2_exit1_ok(),
+%% t_w2_exit2_accepted(),
+%% t_w2_exit3_error(),
+%%
+%% t_w4_accepted(),
+%%
+%% t_mixed_ok_accepted(),
+%% t_mixed_errors()
+%% ]
+%% }.
+%%
+%%
+%% setup() ->
+%% meck:new(couch_log),
+%% meck:expect(couch_log, warning, fun(_, _) -> ok end),
+%% meck:expect(couch_log, notice, fun(_, _) -> ok end).
+%%
+%%
+%% teardown(_) ->
+%% meck:unload().
+%%
+%%
+%% t_w2_ok() ->
+%% ?_test(begin
+%% Acc0 = create_init_acc(2),
+%% Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
+%%
+%% {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%% ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%% check_quorum(Acc1, false),
+%%
+%% {stop, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
+%% ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%% check_quorum(Acc2, true),
+%%
+%% Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
+%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
+%% ?assertEqual(Expect, Resps),
+%% ?assertEqual(ok, resp_health(Resps))
+%% end).
+%%
+%%
+%% t_w3_ok() ->
+%% ?_test(begin
+%% Acc0 = create_init_acc(3),
+%% Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
+%%
+%% {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%% check_quorum(Acc1, false),
+%%
+%% {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
+%% ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%% check_quorum(Acc2, false),
+%%
+%% {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
+%% ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%% check_quorum(Acc3, true),
+%%
+%% Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
+%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
+%% ?assertEqual(Expect, Resps),
+%% ?assertEqual(ok, resp_health(Resps))
+%% end).
+%%
+%%
+%% t_w2_mixed_accepted() ->
+%% ?_test(begin
+%% Acc0 = create_init_acc(2),
+%% Msg1 = {ok, [{ok, [{1, <<"foo1">>}]}, {ok, [{2, <<"bar1">>}]}]},
+%% Msg2 = {ok, [{ok, [{1, <<"foo2">>}]}, {ok, [{2, <<"bar2">>}]}]},
+%%
+%% {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
+%% ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%% check_quorum(Acc1, false),
+%%
+%% {ok, Acc2} = handle_message(Msg2, worker(2, Acc0), Acc1),
+%% ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%% check_quorum(Acc2, false),
+%%
+%% {stop, Acc3} = handle_message(Msg1, worker(3, Acc0), Acc2),
+%% ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%% check_quorum(Acc3, true),
+%%
+%% Expect = [
+%% {accepted, [{1, <<"foo1">>}, {1, <<"foo2">>}]},
+%% {accepted, [{2, <<"bar1">>}, {2, <<"bar2">>}]}
+%% ],
+%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
+%% ?assertEqual(Expect, Resps),
+%% ?assertEqual(accepted, resp_health(Resps))
+%% end).
+%%
+%%
+%% t_w3_mixed_accepted() ->
+%% ?_test(begin
+%% Acc0 = create_init_acc(3),
+%% Msg1 = {ok, [{ok, [{1, <<"foo1">>}]}, {ok, [{2, <<"bar1">>}]}]},
+%% Msg2 = {ok, [{ok, [{1, <<"foo2">>}]}, {ok, [{2, <<"bar2">>}]}]},
+%%
+%% {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
+%% ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%% check_quorum(Acc1, false),
+%%
+%% {ok, Acc2} = handle_message(Msg2, worker(2, Acc0), Acc1),
+%% ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%% check_quorum(Acc2, false),
+%%
+%% {stop, Acc3} = handle_message(Msg2, worker(3, Acc0), Acc2),
+%% ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%% check_quorum(Acc3, true),
+%%
+%% Expect = [
+%% {accepted, [{1, <<"foo1">>}, {1, <<"foo2">>}]},
+%% {accepted, [{2, <<"bar1">>}, {2, <<"bar2">>}]}
+%% ],
+%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
+%% ?assertEqual(Expect, Resps),
+%% ?assertEqual(accepted, resp_health(Resps))
+%% end).
+%%
+%%
+%% t_w2_exit1_ok() ->
+%% ?_test(begin
+%% Acc0 = create_init_acc(2),
+%% Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
+%% ExitMsg = {rexi_EXIT, blargh},
+%%
+%% {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%% ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%% check_quorum(Acc1, false),
+%%
+%% {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
+%% ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%% check_quorum(Acc2, false),
+%%
+%% {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
+%% ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%% check_quorum(Acc3, true),
+%%
+%% Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
+%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
+%% ?assertEqual(Expect, Resps),
+%% ?assertEqual(ok, resp_health(Resps))
+%% end).
+%%
+%%
+%% t_w2_exit2_accepted() ->
+%% ?_test(begin
+%% Acc0 = create_init_acc(2),
+%% Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
+%% ExitMsg = {rexi_EXIT, blargh},
+%%
+%% {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%% ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%% check_quorum(Acc1, false),
+%%
+%% {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
+%% ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%% check_quorum(Acc2, false),
+%%
+%% {stop, Acc3} = handle_message(ExitMsg, worker(3, Acc0), Acc2),
+%% ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%% check_quorum(Acc3, true),
+%%
+%% Expect = [{accepted, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
+%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
+%% ?assertEqual(Expect, Resps),
+%% ?assertEqual(accepted, resp_health(Resps))
+%% end).
+%%
+%%
+%% t_w2_exit3_error() ->
+%% ?_test(begin
+%% Acc0 = create_init_acc(2),
+%% ExitMsg = {rexi_EXIT, blargh},
+%%
+%% {ok, Acc1} = handle_message(ExitMsg, worker(1, Acc0), Acc0),
+%% ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%% check_quorum(Acc1, false),
+%%
+%% {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
+%% ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%% check_quorum(Acc2, false),
+%%
+%% {stop, Acc3} = handle_message(ExitMsg, worker(3, Acc0), Acc2),
+%% ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%% check_quorum(Acc3, true),
+%%
+%% Expect = [
+%% {error, internal_server_error},
+%% {error, internal_server_error}
+%% ],
+%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
+%% ?assertEqual(Expect, Resps),
+%% ?assertEqual(error, resp_health(Resps))
+%% end).
+%%
+%%
+%% t_w4_accepted() ->
+%% % Make sure we return when all workers have responded
+%% % rather than wait around for a timeout if a user asks
+%% % for a qourum with more than the available number of
+%% % shards.
+%% ?_test(begin
+%% Acc0 = create_init_acc(4),
+%% Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
+%%
+%% {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%% ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%% check_quorum(Acc1, false),
+%%
+%% {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
+%% ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%% check_quorum(Acc2, false),
+%%
+%% {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
+%% ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%% check_quorum(Acc3, true),
+%%
+%% Expect = [{accepted, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
+%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
+%% ?assertEqual(Expect, Resps),
+%% ?assertEqual(accepted, resp_health(Resps))
+%% end).
+%%
+%%
+%% t_mixed_ok_accepted() ->
+%% ?_test(begin
+%% WorkerUUIDs = [
+%% {#shard{node = a, range = [1, 2]}, [<<"uuid1">>]},
+%% {#shard{node = b, range = [1, 2]}, [<<"uuid1">>]},
+%% {#shard{node = c, range = [1, 2]}, [<<"uuid1">>]},
+%%
+%% {#shard{node = a, range = [3, 4]}, [<<"uuid2">>]},
+%% {#shard{node = b, range = [3, 4]}, [<<"uuid2">>]},
+%% {#shard{node = c, range = [3, 4]}, [<<"uuid2">>]}
+%% ],
+%%
+%% Acc0 = #acc{
+%% worker_uuids = WorkerUUIDs,
+%% resps = dict:from_list([{<<"uuid1">>, []}, {<<"uuid2">>, []}]),
+%% uuid_counts = dict:from_list([{<<"uuid1">>, 3}, {<<"uuid2">>, 3}]),
+%% w = 2
+%% },
+%%
+%% Msg1 = {ok, [{ok, [{1, <<"foo">>}]}]},
+%% Msg2 = {ok, [{ok, [{2, <<"bar">>}]}]},
+%% ExitMsg = {rexi_EXIT, blargh},
+%%
+%% {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
+%% {ok, Acc2} = handle_message(Msg1, worker(2, Acc0), Acc1),
+%% {ok, Acc3} = handle_message(ExitMsg, worker(4, Acc0), Acc2),
+%% {ok, Acc4} = handle_message(ExitMsg, worker(5, Acc0), Acc3),
+%% {stop, Acc5} = handle_message(Msg2, worker(6, Acc0), Acc4),
+%%
+%% Expect = [{ok, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
+%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc5),
+%% ?assertEqual(Expect, Resps),
+%% ?assertEqual(accepted, resp_health(Resps))
+%% end).
+%%
+%%
+%% t_mixed_errors() ->
+%% ?_test(begin
+%% WorkerUUIDs = [
+%% {#shard{node = a, range = [1, 2]}, [<<"uuid1">>]},
+%% {#shard{node = b, range = [1, 2]}, [<<"uuid1">>]},
+%% {#shard{node = c, range = [1, 2]}, [<<"uuid1">>]},
+%%
+%% {#shard{node = a, range = [3, 4]}, [<<"uuid2">>]},
+%% {#shard{node = b, range = [3, 4]}, [<<"uuid2">>]},
+%% {#shard{node = c, range = [3, 4]}, [<<"uuid2">>]}
+%% ],
+%%
+%% Acc0 = #acc{
+%% worker_uuids = WorkerUUIDs,
+%% resps = dict:from_list([{<<"uuid1">>, []}, {<<"uuid2">>, []}]),
+%% uuid_counts = dict:from_list([{<<"uuid1">>, 3}, {<<"uuid2">>, 3}]),
+%% w = 2
+%% },
+%%
+%% Msg = {ok, [{ok, [{1, <<"foo">>}]}]},
+%% ExitMsg = {rexi_EXIT, blargh},
+%%
+%% {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%% {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
+%% {ok, Acc3} = handle_message(ExitMsg, worker(4, Acc0), Acc2),
+%% {ok, Acc4} = handle_message(ExitMsg, worker(5, Acc0), Acc3),
+%% {stop, Acc5} = handle_message(ExitMsg, worker(6, Acc0), Acc4),
+%%
+%% Expect = [{ok, [{1, <<"foo">>}]}, {error, internal_server_error}],
+%% Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc5),
+%% ?assertEqual(Expect, Resps),
+%% ?assertEqual(error, resp_health(Resps))
+%% end).
+%%
+%%
+%% create_init_acc(W) ->
+%% UUID1 = <<"uuid1">>,
+%% UUID2 = <<"uuid2">>,
+%%
+%% Nodes = [node1, node2, node3],
+%% Shards = mem3_util:create_partition_map(<<"foo">>, 3, 1, Nodes),
+%%
+%% % Create our worker_uuids. We're relying on the fact that
+%% % we're using a fake Q=1 db so we don't have to worry
+%% % about any hashing here.
+%% WorkerUUIDs = lists:map(fun(Shard) ->
+%% {Shard#shard{ref = erlang:make_ref()}, [UUID1, UUID2]}
+%% end, Shards),
+%%
+%% #acc{
+%% worker_uuids = WorkerUUIDs,
+%% resps = dict:from_list([{UUID1, []}, {UUID2, []}]),
+%% uuid_counts = dict:from_list([{UUID1, 3}, {UUID2, 3}]),
+%% w = W
+%% }.
+%%
+%%
+%% worker(N, #acc{worker_uuids = WorkerUUIDs}) ->
+%% {Worker, _} = lists:nth(N, WorkerUUIDs),
+%% Worker.
+%%
+%%
+%% check_quorum(Acc, Expect) ->
+%% dict:fold(fun(_Shard, Resps, _) ->
+%% ?assertEqual(Expect, has_quorum(Resps, 3, Acc#acc.w))
+%% end, nil, Acc#acc.resps).
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_doc_update.erl b/src/fabric/src/fabric_doc_update.erl
index 69babc14b..d670e3ccf 100644
--- a/src/fabric/src/fabric_doc_update.erl
+++ b/src/fabric/src/fabric_doc_update.erl
@@ -220,158 +220,158 @@ validate_atomic_update(_DbName, AllDocs, true) ->
throw({aborted, PreCommitFailures}).
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-
-setup_all() ->
- meck:new([couch_log, couch_stats]),
- meck:expect(couch_log, warning, fun(_,_) -> ok end),
- meck:expect(couch_stats, increment_counter, fun(_) -> ok end).
-
-
-teardown_all(_) ->
- meck:unload().
-
-
-doc_update_test_() ->
- {
- setup,
- fun setup_all/0,
- fun teardown_all/1,
- [
- fun doc_update1/0,
- fun doc_update2/0,
- fun doc_update3/0
- ]
- }.
-
-
-% eunits
-doc_update1() ->
- Doc1 = #doc{revs = {1,[<<"foo">>]}},
- Doc2 = #doc{revs = {1,[<<"bar">>]}},
- Docs = [Doc1],
- Docs2 = [Doc2, Doc1],
- Dict = dict:from_list([{Doc,[]} || Doc <- Docs]),
- Dict2 = dict:from_list([{Doc,[]} || Doc <- Docs2]),
-
- Shards =
- mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
-
-
- % test for W = 2
- AccW2 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
- Dict},
-
- {ok,{WaitingCountW2_1,_,_,_,_}=AccW2_1} =
- handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW2),
- ?assertEqual(WaitingCountW2_1,2),
- {stop, FinalReplyW2 } =
- handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW2_1),
- ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW2),
-
- % test for W = 3
- AccW3 = {length(Shards), length(Docs), list_to_integer("3"), GroupedDocs,
- Dict},
-
- {ok,{WaitingCountW3_1,_,_,_,_}=AccW3_1} =
- handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW3),
- ?assertEqual(WaitingCountW3_1,2),
-
- {ok,{WaitingCountW3_2,_,_,_,_}=AccW3_2} =
- handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW3_1),
- ?assertEqual(WaitingCountW3_2,1),
-
- {stop, FinalReplyW3 } =
- handle_message({ok, [{ok, Doc1}]},lists:nth(3,Shards),AccW3_2),
- ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW3),
-
- % test w quorum > # shards, which should fail immediately
-
- Shards2 = mem3_util:create_partition_map("foo",1,1,["node1"]),
- GroupedDocs2 = group_docs_by_shard_hack(<<"foo">>,Shards2,Docs),
-
- AccW4 =
- {length(Shards2), length(Docs), list_to_integer("2"), GroupedDocs2, Dict},
- Bool =
- case handle_message({ok, [{ok, Doc1}]},hd(Shards2),AccW4) of
- {stop, _Reply} ->
- true;
- _ -> false
- end,
- ?assertEqual(Bool,true),
-
- % Docs with no replies should end up as {error, internal_server_error}
- SA1 = #shard{node=a, range=1},
- SB1 = #shard{node=b, range=1},
- SA2 = #shard{node=a, range=2},
- SB2 = #shard{node=b, range=2},
- GroupedDocs3 = [{SA1,[Doc1]}, {SB1,[Doc1]}, {SA2,[Doc2]}, {SB2,[Doc2]}],
- StW5_0 = {length(GroupedDocs3), length(Docs2), 2, GroupedDocs3, Dict2},
- {ok, StW5_1} = handle_message({ok, [{ok, "A"}]}, SA1, StW5_0),
- {ok, StW5_2} = handle_message({rexi_EXIT, nil}, SB1, StW5_1),
- {ok, StW5_3} = handle_message({rexi_EXIT, nil}, SA2, StW5_2),
- {stop, ReplyW5} = handle_message({rexi_EXIT, nil}, SB2, StW5_3),
- ?assertEqual(
- {error, [{Doc1,{accepted,"A"}},{Doc2,{error,internal_server_error}}]},
- ReplyW5
- ).
-
-doc_update2() ->
- Doc1 = #doc{revs = {1,[<<"foo">>]}},
- Doc2 = #doc{revs = {1,[<<"bar">>]}},
- Docs = [Doc2, Doc1],
- Shards =
- mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
- Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
- dict:from_list([{Doc,[]} || Doc <- Docs])},
-
- {ok,{WaitingCount1,_,_,_,_}=Acc1} =
- handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
- ?assertEqual(WaitingCount1,2),
-
- {ok,{WaitingCount2,_,_,_,_}=Acc2} =
- handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
- ?assertEqual(WaitingCount2,1),
-
- {stop, Reply} =
- handle_message({rexi_EXIT, 1},lists:nth(3,Shards),Acc2),
-
- ?assertEqual({accepted, [{Doc1,{accepted,Doc2}}, {Doc2,{accepted,Doc1}}]},
- Reply).
-
-doc_update3() ->
- Doc1 = #doc{revs = {1,[<<"foo">>]}},
- Doc2 = #doc{revs = {1,[<<"bar">>]}},
- Docs = [Doc2, Doc1],
- Shards =
- mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
- Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
- dict:from_list([{Doc,[]} || Doc <- Docs])},
-
- {ok,{WaitingCount1,_,_,_,_}=Acc1} =
- handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
- ?assertEqual(WaitingCount1,2),
-
- {ok,{WaitingCount2,_,_,_,_}=Acc2} =
- handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
- ?assertEqual(WaitingCount2,1),
-
- {stop, Reply} =
- handle_message({ok, [{ok, Doc1},{ok, Doc2}]},lists:nth(3,Shards),Acc2),
-
- ?assertEqual({ok, [{Doc1, {ok, Doc2}},{Doc2, {ok,Doc1}}]},Reply).
-
-% needed for testing to avoid having to start the mem3 application
-group_docs_by_shard_hack(_DbName, Shards, Docs) ->
- dict:to_list(lists:foldl(fun(#doc{id=_Id} = Doc, D0) ->
- lists:foldl(fun(Shard, D1) ->
- dict:append(Shard, Doc, D1)
- end, D0, Shards)
- end, dict:new(), Docs)).
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%%
+%% setup_all() ->
+%% meck:new([couch_log, couch_stats]),
+%% meck:expect(couch_log, warning, fun(_,_) -> ok end),
+%% meck:expect(couch_stats, increment_counter, fun(_) -> ok end).
+%%
+%%
+%% teardown_all(_) ->
+%% meck:unload().
+%%
+%%
+%% doc_update_test_() ->
+%% {
+%% setup,
+%% fun setup_all/0,
+%% fun teardown_all/1,
+%% [
+%% fun doc_update1/0,
+%% fun doc_update2/0,
+%% fun doc_update3/0
+%% ]
+%% }.
+%%
+%%
+%% % eunits
+%% doc_update1() ->
+%% Doc1 = #doc{revs = {1,[<<"foo">>]}},
+%% Doc2 = #doc{revs = {1,[<<"bar">>]}},
+%% Docs = [Doc1],
+%% Docs2 = [Doc2, Doc1],
+%% Dict = dict:from_list([{Doc,[]} || Doc <- Docs]),
+%% Dict2 = dict:from_list([{Doc,[]} || Doc <- Docs2]),
+%%
+%% Shards =
+%% mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
+%% GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
+%%
+%%
+%% % test for W = 2
+%% AccW2 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
+%% Dict},
+%%
+%% {ok,{WaitingCountW2_1,_,_,_,_}=AccW2_1} =
+%% handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW2),
+%% ?assertEqual(WaitingCountW2_1,2),
+%% {stop, FinalReplyW2 } =
+%% handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW2_1),
+%% ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW2),
+%%
+%% % test for W = 3
+%% AccW3 = {length(Shards), length(Docs), list_to_integer("3"), GroupedDocs,
+%% Dict},
+%%
+%% {ok,{WaitingCountW3_1,_,_,_,_}=AccW3_1} =
+%% handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW3),
+%% ?assertEqual(WaitingCountW3_1,2),
+%%
+%% {ok,{WaitingCountW3_2,_,_,_,_}=AccW3_2} =
+%% handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW3_1),
+%% ?assertEqual(WaitingCountW3_2,1),
+%%
+%% {stop, FinalReplyW3 } =
+%% handle_message({ok, [{ok, Doc1}]},lists:nth(3,Shards),AccW3_2),
+%% ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW3),
+%%
+%% % test w quorum > # shards, which should fail immediately
+%%
+%% Shards2 = mem3_util:create_partition_map("foo",1,1,["node1"]),
+%% GroupedDocs2 = group_docs_by_shard_hack(<<"foo">>,Shards2,Docs),
+%%
+%% AccW4 =
+%% {length(Shards2), length(Docs), list_to_integer("2"), GroupedDocs2, Dict},
+%% Bool =
+%% case handle_message({ok, [{ok, Doc1}]},hd(Shards2),AccW4) of
+%% {stop, _Reply} ->
+%% true;
+%% _ -> false
+%% end,
+%% ?assertEqual(Bool,true),
+%%
+%% % Docs with no replies should end up as {error, internal_server_error}
+%% SA1 = #shard{node=a, range=1},
+%% SB1 = #shard{node=b, range=1},
+%% SA2 = #shard{node=a, range=2},
+%% SB2 = #shard{node=b, range=2},
+%% GroupedDocs3 = [{SA1,[Doc1]}, {SB1,[Doc1]}, {SA2,[Doc2]}, {SB2,[Doc2]}],
+%% StW5_0 = {length(GroupedDocs3), length(Docs2), 2, GroupedDocs3, Dict2},
+%% {ok, StW5_1} = handle_message({ok, [{ok, "A"}]}, SA1, StW5_0),
+%% {ok, StW5_2} = handle_message({rexi_EXIT, nil}, SB1, StW5_1),
+%% {ok, StW5_3} = handle_message({rexi_EXIT, nil}, SA2, StW5_2),
+%% {stop, ReplyW5} = handle_message({rexi_EXIT, nil}, SB2, StW5_3),
+%% ?assertEqual(
+%% {error, [{Doc1,{accepted,"A"}},{Doc2,{error,internal_server_error}}]},
+%% ReplyW5
+%% ).
+%%
+%% doc_update2() ->
+%% Doc1 = #doc{revs = {1,[<<"foo">>]}},
+%% Doc2 = #doc{revs = {1,[<<"bar">>]}},
+%% Docs = [Doc2, Doc1],
+%% Shards =
+%% mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
+%% GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
+%% Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
+%% dict:from_list([{Doc,[]} || Doc <- Docs])},
+%%
+%% {ok,{WaitingCount1,_,_,_,_}=Acc1} =
+%% handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
+%% ?assertEqual(WaitingCount1,2),
+%%
+%% {ok,{WaitingCount2,_,_,_,_}=Acc2} =
+%% handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
+%% ?assertEqual(WaitingCount2,1),
+%%
+%% {stop, Reply} =
+%% handle_message({rexi_EXIT, 1},lists:nth(3,Shards),Acc2),
+%%
+%% ?assertEqual({accepted, [{Doc1,{accepted,Doc2}}, {Doc2,{accepted,Doc1}}]},
+%% Reply).
+%%
+%% doc_update3() ->
+%% Doc1 = #doc{revs = {1,[<<"foo">>]}},
+%% Doc2 = #doc{revs = {1,[<<"bar">>]}},
+%% Docs = [Doc2, Doc1],
+%% Shards =
+%% mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
+%% GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
+%% Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
+%% dict:from_list([{Doc,[]} || Doc <- Docs])},
+%%
+%% {ok,{WaitingCount1,_,_,_,_}=Acc1} =
+%% handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
+%% ?assertEqual(WaitingCount1,2),
+%%
+%% {ok,{WaitingCount2,_,_,_,_}=Acc2} =
+%% handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
+%% ?assertEqual(WaitingCount2,1),
+%%
+%% {stop, Reply} =
+%% handle_message({ok, [{ok, Doc1},{ok, Doc2}]},lists:nth(3,Shards),Acc2),
+%%
+%% ?assertEqual({ok, [{Doc1, {ok, Doc2}},{Doc2, {ok,Doc1}}]},Reply).
+%%
+%% % needed for testing to avoid having to start the mem3 application
+%% group_docs_by_shard_hack(_DbName, Shards, Docs) ->
+%% dict:to_list(lists:foldl(fun(#doc{id=_Id} = Doc, D0) ->
+%% lists:foldl(fun(Shard, D1) ->
+%% dict:append(Shard, Doc, D1)
+%% end, D0, Shards)
+%% end, dict:new(), Docs)).
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_rpc.erl b/src/fabric/src/fabric_rpc.erl
index 85da3ff12..6fdc76595 100644
--- a/src/fabric/src/fabric_rpc.erl
+++ b/src/fabric/src/fabric_rpc.erl
@@ -643,22 +643,22 @@ uuid(Db) ->
uuid_prefix_len() ->
list_to_integer(config:get("fabric", "uuid_prefix_len", "7")).
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-maybe_filtered_json_doc_no_filter_test() ->
- Body = {[{<<"a">>, 1}]},
- Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body},
- {JDocProps} = maybe_filtered_json_doc(Doc, [], x),
- ExpectedProps = [{<<"_id">>, <<"1">>}, {<<"_rev">>, <<"1-r1">>}, {<<"a">>, 1}],
- ?assertEqual(lists:keysort(1, JDocProps), ExpectedProps).
-
-maybe_filtered_json_doc_with_filter_test() ->
- Body = {[{<<"a">>, 1}]},
- Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body},
- Fields = [<<"a">>, <<"nonexistent">>],
- Filter = {selector, main_only, {some_selector, Fields}},
- {JDocProps} = maybe_filtered_json_doc(Doc, [], Filter),
- ?assertEqual(JDocProps, [{<<"a">>, 1}]).
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% maybe_filtered_json_doc_no_filter_test() ->
+%% Body = {[{<<"a">>, 1}]},
+%% Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body},
+%% {JDocProps} = maybe_filtered_json_doc(Doc, [], x),
+%% ExpectedProps = [{<<"_id">>, <<"1">>}, {<<"_rev">>, <<"1-r1">>}, {<<"a">>, 1}],
+%% ?assertEqual(lists:keysort(1, JDocProps), ExpectedProps).
+%%
+%% maybe_filtered_json_doc_with_filter_test() ->
+%% Body = {[{<<"a">>, 1}]},
+%% Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body},
+%% Fields = [<<"a">>, <<"nonexistent">>],
+%% Filter = {selector, main_only, {some_selector, Fields}},
+%% {JDocProps} = maybe_filtered_json_doc(Doc, [], Filter),
+%% ?assertEqual(JDocProps, [{<<"a">>, 1}]).
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_streams.erl b/src/fabric/src/fabric_streams.erl
index 59c8b8a6b..98e285081 100644
--- a/src/fabric/src/fabric_streams.erl
+++ b/src/fabric/src/fabric_streams.erl
@@ -192,82 +192,83 @@ add_worker_to_cleaner(CoordinatorPid, Worker) ->
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-worker_cleaner_test_() ->
- {
- "Fabric spawn_worker_cleaner test", {
- setup, fun setup/0, fun teardown/1,
- fun(_) -> [
- should_clean_workers(),
- does_not_fire_if_cleanup_called(),
- should_clean_additional_worker_too()
- ] end
- }
- }.
-
-
-should_clean_workers() ->
- ?_test(begin
- meck:reset(rexi),
- erase(?WORKER_CLEANER),
- Workers = [
- #shard{node = 'n1', ref = make_ref()},
- #shard{node = 'n2', ref = make_ref()}
- ],
- {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
- Cleaner = spawn_worker_cleaner(Coord, Workers),
- Ref = erlang:monitor(process, Cleaner),
- Coord ! die,
- receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
- ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
- end).
-
-
-does_not_fire_if_cleanup_called() ->
- ?_test(begin
- meck:reset(rexi),
- erase(?WORKER_CLEANER),
- Workers = [
- #shard{node = 'n1', ref = make_ref()},
- #shard{node = 'n2', ref = make_ref()}
- ],
- {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
- Cleaner = spawn_worker_cleaner(Coord, Workers),
- Ref = erlang:monitor(process, Cleaner),
- cleanup(Workers),
- Coord ! die,
- receive {'DOWN', Ref, _, _, _} -> ok end,
- % 2 calls would be from cleanup/1 function. If cleanup process fired
- % too it would have been 4 calls total.
- ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
- end).
-
-
-should_clean_additional_worker_too() ->
- ?_test(begin
- meck:reset(rexi),
- erase(?WORKER_CLEANER),
- Workers = [
- #shard{node = 'n1', ref = make_ref()}
- ],
- {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
- Cleaner = spawn_worker_cleaner(Coord, Workers),
- add_worker_to_cleaner(Coord, #shard{node = 'n2', ref = make_ref()}),
- Ref = erlang:monitor(process, Cleaner),
- Coord ! die,
- receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
- ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
- end).
-
-
-setup() ->
- ok = meck:expect(rexi, kill_all, fun(_) -> ok end).
-
-
-teardown(_) ->
- meck:unload().
-
--endif.
+%% -ifdef(TEST).
+%%
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% worker_cleaner_test_() ->
+%% {
+%% "Fabric spawn_worker_cleaner test", {
+%% setup, fun setup/0, fun teardown/1,
+%% fun(_) -> [
+%% should_clean_workers(),
+%% does_not_fire_if_cleanup_called(),
+%% should_clean_additional_worker_too()
+%% ] end
+%% }
+%% }.
+%%
+%%
+%% should_clean_workers() ->
+%% ?_test(begin
+%% meck:reset(rexi),
+%% erase(?WORKER_CLEANER),
+%% Workers = [
+%% #shard{node = 'n1', ref = make_ref()},
+%% #shard{node = 'n2', ref = make_ref()}
+%% ],
+%% {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
+%% Cleaner = spawn_worker_cleaner(Coord, Workers),
+%% Ref = erlang:monitor(process, Cleaner),
+%% Coord ! die,
+%% receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
+%% ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
+%% end).
+%%
+%%
+%% does_not_fire_if_cleanup_called() ->
+%% ?_test(begin
+%% meck:reset(rexi),
+%% erase(?WORKER_CLEANER),
+%% Workers = [
+%% #shard{node = 'n1', ref = make_ref()},
+%% #shard{node = 'n2', ref = make_ref()}
+%% ],
+%% {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
+%% Cleaner = spawn_worker_cleaner(Coord, Workers),
+%% Ref = erlang:monitor(process, Cleaner),
+%% cleanup(Workers),
+%% Coord ! die,
+%% receive {'DOWN', Ref, _, _, _} -> ok end,
+%% % 2 calls would be from cleanup/1 function. If cleanup process fired
+%% % too it would have been 4 calls total.
+%% ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
+%% end).
+%%
+%%
+%% should_clean_additional_worker_too() ->
+%% ?_test(begin
+%% meck:reset(rexi),
+%% erase(?WORKER_CLEANER),
+%% Workers = [
+%% #shard{node = 'n1', ref = make_ref()}
+%% ],
+%% {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
+%% Cleaner = spawn_worker_cleaner(Coord, Workers),
+%% add_worker_to_cleaner(Coord, #shard{node = 'n2', ref = make_ref()}),
+%% Ref = erlang:monitor(process, Cleaner),
+%% Coord ! die,
+%% receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
+%% ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
+%% end).
+%%
+%%
+%% setup() ->
+%% ok = meck:expect(rexi, kill_all, fun(_) -> ok end).
+%%
+%%
+%% teardown(_) ->
+%% meck:unload().
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_util.erl b/src/fabric/src/fabric_util.erl
index 8aa14e73a..1c1ee80b7 100644
--- a/src/fabric/src/fabric_util.erl
+++ b/src/fabric/src/fabric_util.erl
@@ -192,30 +192,30 @@ create_monitors(Shards) ->
]),
rexi_monitor:start(MonRefs).
-%% verify only id and rev are used in key.
-update_counter_test() ->
- Reply = {ok, #doc{id = <<"id">>, revs = <<"rev">>,
- body = <<"body">>, atts = <<"atts">>}},
- ?assertEqual([{{<<"id">>,<<"rev">>}, {Reply, 1}}],
- update_counter(Reply, 1, [])).
-
-remove_ancestors_test() ->
- Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
- Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
- Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
- Bar2 = {not_found, {1,<<"bar">>}},
- ?assertEqual(
- [kv(Bar1,1), kv(Foo1,1)],
- remove_ancestors([kv(Bar1,1), kv(Foo1,1)], [])
- ),
- ?assertEqual(
- [kv(Bar1,1), kv(Foo2,2)],
- remove_ancestors([kv(Bar1,1), kv(Foo1,1), kv(Foo2,1)], [])
- ),
- ?assertEqual(
- [kv(Bar1,2)],
- remove_ancestors([kv(Bar2,1), kv(Bar1,1)], [])
- ).
+%% %% verify only id and rev are used in key.
+%% update_counter_test() ->
+%% Reply = {ok, #doc{id = <<"id">>, revs = <<"rev">>,
+%% body = <<"body">>, atts = <<"atts">>}},
+%% ?assertEqual([{{<<"id">>,<<"rev">>}, {Reply, 1}}],
+%% update_counter(Reply, 1, [])).
+%%
+%% remove_ancestors_test() ->
+%% Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
+%% Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
+%% Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
+%% Bar2 = {not_found, {1,<<"bar">>}},
+%% ?assertEqual(
+%% [kv(Bar1,1), kv(Foo1,1)],
+%% remove_ancestors([kv(Bar1,1), kv(Foo1,1)], [])
+%% ),
+%% ?assertEqual(
+%% [kv(Bar1,1), kv(Foo2,2)],
+%% remove_ancestors([kv(Bar1,1), kv(Foo1,1), kv(Foo2,1)], [])
+%% ),
+%% ?assertEqual(
+%% [kv(Bar1,2)],
+%% remove_ancestors([kv(Bar2,1), kv(Bar1,1)], [])
+%% ).
is_replicator_db(DbName) ->
path_ends_with(DbName, <<"_replicator">>).
diff --git a/src/fabric/src/fabric_view.erl b/src/fabric/src/fabric_view.erl
index 425f864c4..6c33e1e32 100644
--- a/src/fabric/src/fabric_view.erl
+++ b/src/fabric/src/fabric_view.erl
@@ -413,66 +413,66 @@ remove_finalizer(Args) ->
couch_mrview_util:set_extra(Args, finalizer, null).
-remove_overlapping_shards_test() ->
- Cb = undefined,
-
- Shards = mk_cnts([[0, 10], [11, 20], [21, ?RING_END]], 3),
-
- % Simple (exact) overlap
- Shard1 = mk_shard("node-3", [11, 20]),
- Shards1 = fabric_dict:store(Shard1, nil, Shards),
- R1 = remove_overlapping_shards(Shard1, Shards1, Cb),
- ?assertEqual([{0, 10}, {11, 20}, {21, ?RING_END}],
- fabric_util:worker_ranges(R1)),
- ?assert(fabric_dict:is_key(Shard1, R1)),
-
- % Split overlap (shard overlap multiple workers)
- Shard2 = mk_shard("node-3", [0, 20]),
- Shards2 = fabric_dict:store(Shard2, nil, Shards),
- R2 = remove_overlapping_shards(Shard2, Shards2, Cb),
- ?assertEqual([{0, 20}, {21, ?RING_END}],
- fabric_util:worker_ranges(R2)),
- ?assert(fabric_dict:is_key(Shard2, R2)).
-
-
-get_shard_replacements_test() ->
- Unused = [mk_shard(N, [B, E]) || {N, B, E} <- [
- {"n1", 11, 20}, {"n1", 21, ?RING_END},
- {"n2", 0, 4}, {"n2", 5, 10}, {"n2", 11, 20},
- {"n3", 0, 21, ?RING_END}
- ]],
- Used = [mk_shard(N, [B, E]) || {N, B, E} <- [
- {"n2", 21, ?RING_END},
- {"n3", 0, 10}, {"n3", 11, 20}
- ]],
- Res = lists:sort(get_shard_replacements_int(Unused, Used)),
- % Notice that [0, 10] range can be replaced by spawning the [0, 4] and [5,
- % 10] workers on n1
- Expect = [
- {[0, 10], [mk_shard("n2", [0, 4]), mk_shard("n2", [5, 10])]},
- {[11, 20], [mk_shard("n1", [11, 20]), mk_shard("n2", [11, 20])]},
- {[21, ?RING_END], [mk_shard("n1", [21, ?RING_END])]}
- ],
- ?assertEqual(Expect, Res).
-
-
-mk_cnts(Ranges, NoNodes) ->
- orddict:from_list([{Shard,nil}
- || Shard <-
- lists:flatten(lists:map(
- fun(Range) ->
- mk_shards(NoNodes,Range,[])
- end, Ranges))]
- ).
-
-mk_shards(0,_Range,Shards) ->
- Shards;
-mk_shards(NoNodes,Range,Shards) ->
- Name ="node-" ++ integer_to_list(NoNodes),
- mk_shards(NoNodes-1,Range, [mk_shard(Name, Range) | Shards]).
-
-
-mk_shard(Name, Range) ->
- Node = list_to_atom(Name),
- BName = list_to_binary(Name),
- #shard{name = BName, node = Node, range = Range}.
+%% remove_overlapping_shards_test() ->
+%% Cb = undefined,
+%%
+%% Shards = mk_cnts([[0, 10], [11, 20], [21, ?RING_END]], 3),
+%%
+%% % Simple (exact) overlap
+%% Shard1 = mk_shard("node-3", [11, 20]),
+%% Shards1 = fabric_dict:store(Shard1, nil, Shards),
+%% R1 = remove_overlapping_shards(Shard1, Shards1, Cb),
+%% ?assertEqual([{0, 10}, {11, 20}, {21, ?RING_END}],
+%% fabric_util:worker_ranges(R1)),
+%% ?assert(fabric_dict:is_key(Shard1, R1)),
+%%
+%% % Split overlap (shard overlap multiple workers)
+%% Shard2 = mk_shard("node-3", [0, 20]),
+%% Shards2 = fabric_dict:store(Shard2, nil, Shards),
+%% R2 = remove_overlapping_shards(Shard2, Shards2, Cb),
+%% ?assertEqual([{0, 20}, {21, ?RING_END}],
+%% fabric_util:worker_ranges(R2)),
+%% ?assert(fabric_dict:is_key(Shard2, R2)).
+%%
+%%
+%% get_shard_replacements_test() ->
+%% Unused = [mk_shard(N, [B, E]) || {N, B, E} <- [
+%% {"n1", 11, 20}, {"n1", 21, ?RING_END},
+%% {"n2", 0, 4}, {"n2", 5, 10}, {"n2", 11, 20},
+%% {"n3", 0, 21, ?RING_END}
+%% ]],
+%% Used = [mk_shard(N, [B, E]) || {N, B, E} <- [
+%% {"n2", 21, ?RING_END},
+%% {"n3", 0, 10}, {"n3", 11, 20}
+%% ]],
+%% Res = lists:sort(get_shard_replacements_int(Unused, Used)),
+%% % Notice that [0, 10] range can be replaced by spawning the [0, 4] and [5,
+%% % 10] workers on n1
+%% Expect = [
+%% {[0, 10], [mk_shard("n2", [0, 4]), mk_shard("n2", [5, 10])]},
+%% {[11, 20], [mk_shard("n1", [11, 20]), mk_shard("n2", [11, 20])]},
+%% {[21, ?RING_END], [mk_shard("n1", [21, ?RING_END])]}
+%% ],
+%% ?assertEqual(Expect, Res).
+%%
+%%
+%% mk_cnts(Ranges, NoNodes) ->
+%% orddict:from_list([{Shard,nil}
+%% || Shard <-
+%% lists:flatten(lists:map(
+%% fun(Range) ->
+%% mk_shards(NoNodes,Range,[])
+%% end, Ranges))]
+%% ).
+%%
+%% mk_shards(0,_Range,Shards) ->
+%% Shards;
+%% mk_shards(NoNodes,Range,Shards) ->
+%% Name ="node-" ++ integer_to_list(NoNodes),
+%% mk_shards(NoNodes-1,Range, [mk_shard(Name, Range) | Shards]).
+%%
+%%
+%% mk_shard(Name, Range) ->
+%% Node = list_to_atom(Name),
+%% BName = list_to_binary(Name),
+%% #shard{name = BName, node = Node, range = Range}.
diff --git a/src/fabric/src/fabric_view_changes.erl b/src/fabric/src/fabric_view_changes.erl
index febbd3169..3f684a3cc 100644
--- a/src/fabric/src/fabric_view_changes.erl
+++ b/src/fabric/src/fabric_view_changes.erl
@@ -637,184 +637,184 @@ increment_changes_epoch() ->
application:set_env(fabric, changes_epoch, os:timestamp()).
-unpack_seq_setup() ->
- meck:new(mem3),
- meck:new(fabric_view),
- meck:expect(mem3, get_shard, fun(_, _, _) -> {ok, #shard{}} end),
- meck:expect(fabric_ring, is_progress_possible, fun(_) -> true end),
- ok.
-
-
-unpack_seqs_test_() ->
- {
- setup,
- fun unpack_seq_setup/0,
- fun (_) -> meck:unload() end,
- [
- t_unpack_seqs()
- ]
- }.
-
-
-t_unpack_seqs() ->
- ?_test(begin
- % BigCouch 0.3 style.
- assert_shards("23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA"),
-
- % BigCouch 0.4 style.
- assert_shards([23423,<<"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA">>]),
-
- % BigCouch 0.4 style (as string).
- assert_shards("[23423,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
- assert_shards("[23423 ,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
- assert_shards("[23423, \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
- assert_shards("[23423 , \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-
- % with internal hypen
- assert_shards("651-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
- "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
- "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"),
- assert_shards([651,"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
- "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
- "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"]),
-
- % CouchDB 1.2 style
- assert_shards("\"23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"")
- end).
-
-
-assert_shards(Packed) ->
- ?assertMatch([{#shard{},_}|_], unpack_seqs(Packed, <<"foo">>)).
-
-
-find_replacements_test() ->
- % None of the workers are in the live list of shard but there is a
- % replacement on n3 for the full range. It should get picked instead of
- % the two smaller one on n2.
- Workers1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
- AllShards1 = [
- mk_shard("n1", 11, ?RING_END),
- mk_shard("n2", 0, 4),
- mk_shard("n2", 5, 10),
- mk_shard("n3", 0, ?RING_END)
- ],
- {WorkersRes1, Dead1, Reps1} = find_replacements(Workers1, AllShards1),
- ?assertEqual([], WorkersRes1),
- ?assertEqual(Workers1, Dead1),
- ?assertEqual([mk_shard("n3", 0, ?RING_END)], Reps1),
-
- % None of the workers are in the live list of shards and there is a
- % split replacement from n2 (range [0, 10] replaced with [0, 4], [5, 10])
- Workers2 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
- AllShards2 = [
- mk_shard("n1", 11, ?RING_END),
- mk_shard("n2", 0, 4),
- mk_shard("n2", 5, 10)
- ],
- {WorkersRes2, Dead2, Reps2} = find_replacements(Workers2, AllShards2),
- ?assertEqual([], WorkersRes2),
- ?assertEqual(Workers2, Dead2),
- ?assertEqual([
- mk_shard("n1", 11, ?RING_END),
- mk_shard("n2", 0, 4),
- mk_shard("n2", 5, 10)
- ], lists:sort(Reps2)),
-
- % One worker is available and one needs to be replaced. Replacement will be
- % from two split shards
- Workers3 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
- AllShards3 = [
- mk_shard("n1", 11, ?RING_END),
- mk_shard("n2", 0, 4),
- mk_shard("n2", 5, 10),
- mk_shard("n2", 11, ?RING_END)
- ],
- {WorkersRes3, Dead3, Reps3} = find_replacements(Workers3, AllShards3),
- ?assertEqual(mk_workers([{"n2", 11, ?RING_END}]), WorkersRes3),
- ?assertEqual(mk_workers([{"n1", 0, 10}]), Dead3),
- ?assertEqual([
- mk_shard("n2", 0, 4),
- mk_shard("n2", 5, 10)
- ], lists:sort(Reps3)),
-
- % All workers are available. Make sure they are not killed even if there is
- % a longer (single) shard to replace them.
- Workers4 = mk_workers([{"n1", 0, 10}, {"n1", 11, ?RING_END}]),
- AllShards4 = [
- mk_shard("n1", 0, 10),
- mk_shard("n1", 11, ?RING_END),
- mk_shard("n2", 0, 4),
- mk_shard("n2", 5, 10),
- mk_shard("n3", 0, ?RING_END)
- ],
- {WorkersRes4, Dead4, Reps4} = find_replacements(Workers4, AllShards4),
- ?assertEqual(Workers4, WorkersRes4),
- ?assertEqual([], Dead4),
- ?assertEqual([], Reps4).
-
-
-mk_workers(NodesRanges) ->
- mk_workers(NodesRanges, nil).
-
-mk_workers(NodesRanges, Val) ->
- orddict:from_list([{mk_shard(N, B, E), Val} || {N, B, E} <- NodesRanges]).
-
-
-mk_shard(Name, B, E) ->
- Node = list_to_atom(Name),
- BName = list_to_binary(Name),
- #shard{name = BName, node = Node, range = [B, E]}.
-
-
-find_split_shard_replacements_test() ->
- % One worker is can be replaced and one can't
- Dead1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
- Shards1 = [
- mk_shard("n1", 0, 4),
- mk_shard("n1", 5, 10),
- mk_shard("n3", 11, ?RING_END)
- ],
- {Workers1, ShardsLeft1} = find_split_shard_replacements(Dead1, Shards1),
- ?assertEqual(mk_workers([{"n1", 0, 4}, {"n1", 5, 10}], 42), Workers1),
- ?assertEqual([mk_shard("n3", 11, ?RING_END)], ShardsLeft1),
-
- % All workers can be replaced - one by 1 shard, another by 3 smaller shards
- Dead2 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
- Shards2 = [
- mk_shard("n1", 0, 10),
- mk_shard("n2", 11, 12),
- mk_shard("n2", 13, 14),
- mk_shard("n2", 15, ?RING_END)
- ],
- {Workers2, ShardsLeft2} = find_split_shard_replacements(Dead2, Shards2),
- ?assertEqual(mk_workers([
- {"n1", 0, 10},
- {"n2", 11, 12},
- {"n2", 13, 14},
- {"n2", 15, ?RING_END}
- ], 42), Workers2),
- ?assertEqual([], ShardsLeft2),
-
- % No workers can be replaced. Ranges match but they are on different nodes
- Dead3 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
- Shards3 = [
- mk_shard("n2", 0, 10),
- mk_shard("n3", 11, ?RING_END)
- ],
- {Workers3, ShardsLeft3} = find_split_shard_replacements(Dead3, Shards3),
- ?assertEqual([], Workers3),
- ?assertEqual(Shards3, ShardsLeft3).
+%% unpack_seq_setup() ->
+%% meck:new(mem3),
+%% meck:new(fabric_view),
+%% meck:expect(mem3, get_shard, fun(_, _, _) -> {ok, #shard{}} end),
+%% meck:expect(fabric_ring, is_progress_possible, fun(_) -> true end),
+%% ok.
+%%
+%%
+%% unpack_seqs_test_() ->
+%% {
+%% setup,
+%% fun unpack_seq_setup/0,
+%% fun (_) -> meck:unload() end,
+%% [
+%% t_unpack_seqs()
+%% ]
+%% }.
+%%
+%%
+%% t_unpack_seqs() ->
+%% ?_test(begin
+%% % BigCouch 0.3 style.
+%% assert_shards("23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%% "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%% "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA"),
+%%
+%% % BigCouch 0.4 style.
+%% assert_shards([23423,<<"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%% "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%% "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA">>]),
+%%
+%% % BigCouch 0.4 style (as string).
+%% assert_shards("[23423,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%% "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%% "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
+%% assert_shards("[23423 ,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%% "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%% "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
+%% assert_shards("[23423, \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%% "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%% "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
+%% assert_shards("[23423 , \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%% "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%% "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
+%%
+%% % with internal hypen
+%% assert_shards("651-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
+%% "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
+%% "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"),
+%% assert_shards([651,"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
+%% "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
+%% "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"]),
+%%
+%% % CouchDB 1.2 style
+%% assert_shards("\"23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%% "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%% "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"")
+%% end).
+%%
+%%
+%% assert_shards(Packed) ->
+%% ?assertMatch([{#shard{},_}|_], unpack_seqs(Packed, <<"foo">>)).
+%%
+%%
+%% find_replacements_test() ->
+%% % None of the workers are in the live list of shard but there is a
+%% % replacement on n3 for the full range. It should get picked instead of
+%% % the two smaller one on n2.
+%% Workers1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
+%% AllShards1 = [
+%% mk_shard("n1", 11, ?RING_END),
+%% mk_shard("n2", 0, 4),
+%% mk_shard("n2", 5, 10),
+%% mk_shard("n3", 0, ?RING_END)
+%% ],
+%% {WorkersRes1, Dead1, Reps1} = find_replacements(Workers1, AllShards1),
+%% ?assertEqual([], WorkersRes1),
+%% ?assertEqual(Workers1, Dead1),
+%% ?assertEqual([mk_shard("n3", 0, ?RING_END)], Reps1),
+%%
+%% % None of the workers are in the live list of shards and there is a
+%% % split replacement from n2 (range [0, 10] replaced with [0, 4], [5, 10])
+%% Workers2 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
+%% AllShards2 = [
+%% mk_shard("n1", 11, ?RING_END),
+%% mk_shard("n2", 0, 4),
+%% mk_shard("n2", 5, 10)
+%% ],
+%% {WorkersRes2, Dead2, Reps2} = find_replacements(Workers2, AllShards2),
+%% ?assertEqual([], WorkersRes2),
+%% ?assertEqual(Workers2, Dead2),
+%% ?assertEqual([
+%% mk_shard("n1", 11, ?RING_END),
+%% mk_shard("n2", 0, 4),
+%% mk_shard("n2", 5, 10)
+%% ], lists:sort(Reps2)),
+%%
+%% % One worker is available and one needs to be replaced. Replacement will be
+%% % from two split shards
+%% Workers3 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
+%% AllShards3 = [
+%% mk_shard("n1", 11, ?RING_END),
+%% mk_shard("n2", 0, 4),
+%% mk_shard("n2", 5, 10),
+%% mk_shard("n2", 11, ?RING_END)
+%% ],
+%% {WorkersRes3, Dead3, Reps3} = find_replacements(Workers3, AllShards3),
+%% ?assertEqual(mk_workers([{"n2", 11, ?RING_END}]), WorkersRes3),
+%% ?assertEqual(mk_workers([{"n1", 0, 10}]), Dead3),
+%% ?assertEqual([
+%% mk_shard("n2", 0, 4),
+%% mk_shard("n2", 5, 10)
+%% ], lists:sort(Reps3)),
+%%
+%% % All workers are available. Make sure they are not killed even if there is
+%% % a longer (single) shard to replace them.
+%% Workers4 = mk_workers([{"n1", 0, 10}, {"n1", 11, ?RING_END}]),
+%% AllShards4 = [
+%% mk_shard("n1", 0, 10),
+%% mk_shard("n1", 11, ?RING_END),
+%% mk_shard("n2", 0, 4),
+%% mk_shard("n2", 5, 10),
+%% mk_shard("n3", 0, ?RING_END)
+%% ],
+%% {WorkersRes4, Dead4, Reps4} = find_replacements(Workers4, AllShards4),
+%% ?assertEqual(Workers4, WorkersRes4),
+%% ?assertEqual([], Dead4),
+%% ?assertEqual([], Reps4).
+%%
+%%
+%% mk_workers(NodesRanges) ->
+%% mk_workers(NodesRanges, nil).
+%%
+%% mk_workers(NodesRanges, Val) ->
+%% orddict:from_list([{mk_shard(N, B, E), Val} || {N, B, E} <- NodesRanges]).
+%%
+%%
+%% mk_shard(Name, B, E) ->
+%% Node = list_to_atom(Name),
+%% BName = list_to_binary(Name),
+%% #shard{name = BName, node = Node, range = [B, E]}.
+%%
+%%
+%% find_split_shard_replacements_test() ->
+%% % One worker is can be replaced and one can't
+%% Dead1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
+%% Shards1 = [
+%% mk_shard("n1", 0, 4),
+%% mk_shard("n1", 5, 10),
+%% mk_shard("n3", 11, ?RING_END)
+%% ],
+%% {Workers1, ShardsLeft1} = find_split_shard_replacements(Dead1, Shards1),
+%% ?assertEqual(mk_workers([{"n1", 0, 4}, {"n1", 5, 10}], 42), Workers1),
+%% ?assertEqual([mk_shard("n3", 11, ?RING_END)], ShardsLeft1),
+%%
+%% % All workers can be replaced - one by 1 shard, another by 3 smaller shards
+%% Dead2 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
+%% Shards2 = [
+%% mk_shard("n1", 0, 10),
+%% mk_shard("n2", 11, 12),
+%% mk_shard("n2", 13, 14),
+%% mk_shard("n2", 15, ?RING_END)
+%% ],
+%% {Workers2, ShardsLeft2} = find_split_shard_replacements(Dead2, Shards2),
+%% ?assertEqual(mk_workers([
+%% {"n1", 0, 10},
+%% {"n2", 11, 12},
+%% {"n2", 13, 14},
+%% {"n2", 15, ?RING_END}
+%% ], 42), Workers2),
+%% ?assertEqual([], ShardsLeft2),
+%%
+%% % No workers can be replaced. Ranges match but they are on different nodes
+%% Dead3 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
+%% Shards3 = [
+%% mk_shard("n2", 0, 10),
+%% mk_shard("n3", 11, ?RING_END)
+%% ],
+%% {Workers3, ShardsLeft3} = find_split_shard_replacements(Dead3, Shards3),
+%% ?assertEqual([], Workers3),
+%% ?assertEqual(Shards3, ShardsLeft3).
diff --git a/src/fabric/test/eunit/fabric_rpc_purge_tests.erl b/src/fabric/test/eunit/fabric_rpc_purge_tests.erl
deleted file mode 100644
index 6db6a70aa..000000000
--- a/src/fabric/test/eunit/fabric_rpc_purge_tests.erl
+++ /dev/null
@@ -1,307 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_rpc_purge_tests).
-
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--define(TDEF(A), {A, fun A/1}).
-
-% TODO: Add tests:
-% - filter some updates
-% - allow for an update that was filtered by a node
-% - ignore lagging nodes
-
-main_test_() ->
- {
- setup,
- spawn,
- fun setup_all/0,
- fun teardown_all/1,
- [
- {
- foreach,
- fun setup_no_purge/0,
- fun teardown_no_purge/1,
- lists:map(fun wrap/1, [
- ?TDEF(t_no_purge_no_filter)
- ])
- },
- {
- foreach,
- fun setup_single_purge/0,
- fun teardown_single_purge/1,
- lists:map(fun wrap/1, [
- ?TDEF(t_filter),
- ?TDEF(t_filter_unknown_node),
- ?TDEF(t_filter_local_node),
- ?TDEF(t_no_filter_old_node),
- ?TDEF(t_no_filter_different_node),
- ?TDEF(t_no_filter_after_repl)
- ])
- },
- {
- foreach,
- fun setup_multi_purge/0,
- fun teardown_multi_purge/1,
- lists:map(fun wrap/1, [
- ?TDEF(t_filter),
- ?TDEF(t_filter_unknown_node),
- ?TDEF(t_filter_local_node),
- ?TDEF(t_no_filter_old_node),
- ?TDEF(t_no_filter_different_node),
- ?TDEF(t_no_filter_after_repl)
- ])
- }
- ]
- }.
-
-
-setup_all() ->
- test_util:start_couch().
-
-
-teardown_all(Ctx) ->
- test_util:stop_couch(Ctx).
-
-
-setup_no_purge() ->
- {ok, Db} = create_db(),
- populate_db(Db),
- couch_db:name(Db).
-
-
-teardown_no_purge(DbName) ->
- ok = couch_server:delete(DbName, []).
-
-
-setup_single_purge() ->
- DbName = setup_no_purge(),
- DocId = <<"0003">>,
- {ok, OldDoc} = open_doc(DbName, DocId),
- purge_doc(DbName, DocId),
- {DbName, DocId, OldDoc, 1}.
-
-
-teardown_single_purge({DbName, _, _, _}) ->
- teardown_no_purge(DbName).
-
-
-setup_multi_purge() ->
- DbName = setup_no_purge(),
- DocId = <<"0003">>,
- {ok, OldDoc} = open_doc(DbName, DocId),
- lists:foreach(fun(I) ->
- PDocId = iolist_to_binary(io_lib:format("~4..0b", [I])),
- purge_doc(DbName, PDocId)
- end, lists:seq(1, 5)),
- {DbName, DocId, OldDoc, 3}.
-
-
-teardown_multi_purge(Ctx) ->
- teardown_single_purge(Ctx).
-
-
-t_no_purge_no_filter(DbName) ->
- DocId = <<"0003">>,
-
- {ok, OldDoc} = open_doc(DbName, DocId),
- NewDoc = create_update(OldDoc, 2),
-
- rpc_update_doc(DbName, NewDoc),
-
- {ok, CurrDoc} = open_doc(DbName, DocId),
- ?assert(CurrDoc /= OldDoc),
- ?assert(CurrDoc == NewDoc).
-
-
-t_filter({DbName, DocId, OldDoc, _PSeq}) ->
- ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
- create_purge_checkpoint(DbName, 0),
-
- rpc_update_doc(DbName, OldDoc),
-
- ?assertEqual({not_found, missing}, open_doc(DbName, DocId)).
-
-
-t_filter_unknown_node({DbName, DocId, OldDoc, _PSeq}) ->
- % Unknown nodes are assumed to start at PurgeSeq = 0
- ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
- create_purge_checkpoint(DbName, 0),
-
- {Pos, [Rev | _]} = OldDoc#doc.revs,
- RROpt = {read_repair, [{'blargh@127.0.0.1', [{Pos, Rev}]}]},
- rpc_update_doc(DbName, OldDoc, [RROpt]),
-
- ?assertEqual({not_found, missing}, open_doc(DbName, DocId)).
-
-
-t_no_filter_old_node({DbName, DocId, OldDoc, PSeq}) ->
- ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
- create_purge_checkpoint(DbName, PSeq),
-
- % The random UUID is to generate a badarg exception when
- % we try and convert it to an existing atom.
- create_purge_checkpoint(DbName, 0, couch_uuids:random()),
-
- rpc_update_doc(DbName, OldDoc),
-
- ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
-
-t_no_filter_different_node({DbName, DocId, OldDoc, PSeq}) ->
- ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
- create_purge_checkpoint(DbName, PSeq),
-
- % Create a valid purge for a different node
- TgtNode = list_to_binary(atom_to_list('notfoo@127.0.0.1')),
- create_purge_checkpoint(DbName, 0, TgtNode),
-
- rpc_update_doc(DbName, OldDoc),
-
- ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
-
-t_filter_local_node({DbName, DocId, OldDoc, PSeq}) ->
- ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
- create_purge_checkpoint(DbName, PSeq),
-
- % Create a valid purge for a different node
- TgtNode = list_to_binary(atom_to_list('notfoo@127.0.0.1')),
- create_purge_checkpoint(DbName, 0, TgtNode),
-
- % Add a local node rev to the list of node revs. It should
- % be filtered out
- {Pos, [Rev | _]} = OldDoc#doc.revs,
- RROpts = [{read_repair, [
- {tgt_node(), [{Pos, Rev}]},
- {node(), [{1, <<"123">>}]}
- ]}],
- rpc_update_doc(DbName, OldDoc, RROpts),
-
- ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
-
-t_no_filter_after_repl({DbName, DocId, OldDoc, PSeq}) ->
- ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
- create_purge_checkpoint(DbName, PSeq),
-
- rpc_update_doc(DbName, OldDoc),
-
- ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
-
-wrap({Name, Fun}) ->
- fun(Arg) ->
- {timeout, 60, {atom_to_list(Name), fun() ->
- process_flag(trap_exit, true),
- Fun(Arg)
- end}}
- end.
-
-
-create_db() ->
- DbName = ?tempdb(),
- couch_db:create(DbName, [?ADMIN_CTX]).
-
-
-populate_db(Db) ->
- Docs = lists:map(fun(Idx) ->
- DocId = lists:flatten(io_lib:format("~4..0b", [Idx])),
- #doc{
- id = list_to_binary(DocId),
- body = {[{<<"int">>, Idx}, {<<"vsn">>, 2}]}
- }
- end, lists:seq(1, 100)),
- {ok, _} = couch_db:update_docs(Db, Docs).
-
-
-open_doc(DbName, DocId) ->
- couch_util:with_db(DbName, fun(Db) ->
- couch_db:open_doc(Db, DocId, [])
- end).
-
-
-create_update(Doc, NewVsn) ->
- #doc{
- id = DocId,
- revs = {Pos, [Rev | _] = Revs},
- body = {Props}
- } = Doc,
- NewProps = lists:keyreplace(<<"vsn">>, 1, Props, {<<"vsn">>, NewVsn}),
- NewRev = couch_hash:md5_hash(term_to_binary({DocId, Rev, {NewProps}})),
- Doc#doc{
- revs = {Pos + 1, [NewRev | Revs]},
- body = {NewProps}
- }.
-
-
-purge_doc(DbName, DocId) ->
- {ok, Doc} = open_doc(DbName, DocId),
- {Pos, [Rev | _]} = Doc#doc.revs,
- PInfo = {couch_uuids:random(), DocId, [{Pos, Rev}]},
- Resp = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, [PInfo], [])
- end),
- ?assertEqual({ok, [{ok, [{Pos, Rev}]}]}, Resp).
-
-
-create_purge_checkpoint(DbName, PurgeSeq) ->
- create_purge_checkpoint(DbName, PurgeSeq, tgt_node_bin()).
-
-
-create_purge_checkpoint(DbName, PurgeSeq, TgtNode) when is_binary(TgtNode) ->
- Resp = couch_util:with_db(DbName, fun(Db) ->
- SrcUUID = couch_db:get_uuid(Db),
- TgtUUID = couch_uuids:random(),
- CPDoc = #doc{
- id = mem3_rep:make_purge_id(SrcUUID, TgtUUID),
- body = {[
- {<<"target_node">>, TgtNode},
- {<<"purge_seq">>, PurgeSeq}
- ]}
- },
- couch_db:update_docs(Db, [CPDoc], [])
- end),
- ?assertMatch({ok, [_]}, Resp).
-
-
-rpc_update_doc(DbName, Doc) ->
- {Pos, [Rev | _]} = Doc#doc.revs,
- RROpt = {read_repair, [{tgt_node(), [{Pos, Rev}]}]},
- rpc_update_doc(DbName, Doc, [RROpt]).
-
-
-rpc_update_doc(DbName, Doc, Opts) ->
- Ref = erlang:make_ref(),
- put(rexi_from, {self(), Ref}),
- fabric_rpc:update_docs(DbName, [Doc], Opts),
- Reply = test_util:wait(fun() ->
- receive
- {Ref, Reply} ->
- Reply
- after 0 ->
- wait
- end
- end),
- ?assertEqual({ok, []}, Reply).
-
-
-tgt_node() ->
- 'foo@127.0.0.1'.
-
-
-tgt_node_bin() ->
- iolist_to_binary(atom_to_list(tgt_node())).
diff --git a/src/fabric/test/fabric2_active_tasks_tests.erl b/src/fabric/test/fabric2_active_tasks_tests.erl
new file mode 100644
index 000000000..891450027
--- /dev/null
+++ b/src/fabric/test/fabric2_active_tasks_tests.erl
@@ -0,0 +1,120 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_active_tasks_tests).
+
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include("fabric2_test.hrl").
+
+
+-define(JOB_TYPE, <<"fabric2_active_tasks_tests_type">>).
+-define(JOB_ID, <<"job_id">>).
+
+
+active_tasks_test_() ->
+ {
+ "Test cleanup of stale indices",
+ {
+ setup,
+ fun setup_all/0,
+ fun cleanup_all/1,
+ {
+ foreach,
+ fun setup/0,
+ fun cleanup/1,
+ [
+ ?TDEF_FE(no_active_tasks_defined),
+ ?TDEF_FE(empty_map_info),
+ ?TDEF_FE(can_read_active_tasks),
+ ?TDEF_FE(only_running_tasks_appear)
+ ]
+ }
+ }
+ }.
+
+
+setup_all() ->
+ Ctx = test_util:start_couch([fabric, couch_jobs]),
+ couch_jobs:set_type_timeout(?JOB_TYPE, 5000),
+ meck:new(couch_jobs, [passthrough]),
+ meck:expect(couch_jobs, get_types, 1, [?JOB_TYPE]),
+ Ctx.
+
+
+cleanup_all(Ctx) ->
+ meck:unload(),
+ test_util:stop_couch(Ctx).
+
+
+setup() ->
+ ok = couch_jobs:add(undefined, ?JOB_TYPE, ?JOB_ID, #{}),
+ ok.
+
+
+cleanup(_) ->
+ meck:reset(couch_jobs),
+ couch_jobs:remove(undefined, ?JOB_TYPE, ?JOB_ID).
+
+
+no_active_tasks_defined(_) ->
+ {ok, Job1, #{}} = couch_jobs:accept(?JOB_TYPE),
+ ?assertEqual([], fabric2_active_tasks:get_active_tasks()),
+ ok = couch_jobs:finish(undefined, Job1).
+
+
+empty_map_info(_) ->
+ {ok, Job1, Data} = couch_jobs:accept(?JOB_TYPE),
+
+ Data1 = fabric2_active_tasks:update_active_task_info(Data, #{}),
+ {ok, Job2} = couch_jobs:update(undefined, Job1, Data1),
+ ?assertEqual([], fabric2_active_tasks:get_active_tasks()),
+ ok = couch_jobs:finish(undefined, Job2).
+
+
+can_read_active_tasks(_) ->
+ {ok, Job1, Data} = couch_jobs:accept(?JOB_TYPE),
+
+ Info = #{<<"x">> => 1},
+ Data1 = fabric2_active_tasks:update_active_task_info(Data, Info),
+ {ok, Job2} = couch_jobs:update(undefined, Job1, Data1),
+ ?assertEqual([#{<<"x">> => 1}], fabric2_active_tasks:get_active_tasks()),
+
+ Info1 = fabric2_active_tasks:get_active_task_info(Data1),
+ Info2 = Info1#{<<"y">> => 2},
+ Data2 = fabric2_active_tasks:update_active_task_info(Data1, Info2),
+ {ok, Job3} = couch_jobs:update(undefined, Job2, Data2),
+ ?assertEqual([#{<<"x">> => 1, <<"y">> => 2}],
+ fabric2_active_tasks:get_active_tasks()),
+ ok = couch_jobs:finish(undefined, Job3).
+
+
+only_running_tasks_appear(_) ->
+ {ok, Job1, Data} = couch_jobs:accept(?JOB_TYPE),
+
+ Info = #{<<"x">> => 1},
+ Data1 = fabric2_active_tasks:update_active_task_info(Data, Info),
+ {ok, Job2} = couch_jobs:update(undefined, Job1, Data1),
+
+ ?assertEqual([#{<<"x">> => 1}], fabric2_active_tasks:get_active_tasks()),
+ {ok, _} = couch_jobs:resubmit(undefined, Job2),
+
+ ok = couch_jobs:finish(undefined, Job2),
+
+ ?assertEqual([], fabric2_active_tasks:get_active_tasks()),
+ {ok, Job3, #{}} = couch_jobs:accept(?JOB_TYPE),
+ ?assertEqual([#{<<"x">> => 1}], fabric2_active_tasks:get_active_tasks()),
+
+ ok = couch_jobs:finish(undefined, Job3),
+ ?assertEqual([], fabric2_active_tasks:get_active_tasks()).
diff --git a/src/fabric/test/fabric2_changes_fold_tests.erl b/src/fabric/test/fabric2_changes_fold_tests.erl
new file mode 100644
index 000000000..8541d973c
--- /dev/null
+++ b/src/fabric/test/fabric2_changes_fold_tests.erl
@@ -0,0 +1,241 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_changes_fold_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+-define(DOC_COUNT, 25).
+
+
+changes_fold_test_() ->
+ {
+ "Test changes fold operations",
+ {
+ setup,
+ fun setup_all/0,
+ fun teardown_all/1,
+ {
+ foreach,
+ fun setup/0,
+ fun cleanup/1,
+ [
+ ?TDEF_FE(fold_changes_basic),
+ ?TDEF_FE(fold_changes_since_now),
+ ?TDEF_FE(fold_changes_since_seq),
+ ?TDEF_FE(fold_changes_basic_rev),
+ ?TDEF_FE(fold_changes_since_now_rev),
+ ?TDEF_FE(fold_changes_since_seq_rev),
+ ?TDEF_FE(fold_changes_basic_tx_too_old),
+ ?TDEF_FE(fold_changes_reverse_tx_too_old),
+ ?TDEF_FE(fold_changes_tx_too_old_with_single_row_emits),
+ ?TDEF_FE(fold_changes_since_seq_tx_too_old),
+ ?TDEF_FE(fold_changes_not_progressing)
+ ]
+ }
+ }
+ }.
+
+
+setup_all() ->
+ Ctx = test_util:start_couch([fabric]),
+ meck:new(erlfdb, [passthrough]),
+ Ctx.
+
+
+teardown_all(Ctx) ->
+ meck:unload(),
+ test_util:stop_couch(Ctx).
+
+
+setup() ->
+ fabric2_test_util:tx_too_old_mock_erlfdb(),
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ Rows = lists:map(fun(Val) ->
+ DocId = fabric2_util:uuid(),
+ Doc = #doc{
+ id = DocId,
+ body = {[{<<"value">>, Val}]}
+ },
+ {ok, RevId} = fabric2_db:update_doc(Db, Doc, []),
+ UpdateSeq = fabric2_db:get_update_seq(Db),
+ #{
+ id => DocId,
+ sequence => UpdateSeq,
+ deleted => false,
+ rev_id => RevId
+ }
+ end, lists:seq(1, ?DOC_COUNT)),
+ {Db, Rows}.
+
+
+cleanup({Db, _DocIdRevs}) ->
+ fabric2_test_util:tx_too_old_reset_errors(),
+ ok = fabric2_db:delete(fabric2_db:name(Db), []).
+
+
+fold_changes_basic({Db, DocRows}) ->
+ ?assertEqual(lists:reverse(DocRows), changes(Db)).
+
+
+fold_changes_since_now({Db, _}) ->
+ ?assertEqual([], changes(Db, now, [])).
+
+
+fold_changes_since_seq({_, []}) ->
+ ok;
+
+fold_changes_since_seq({Db, [Row | RestRows]}) ->
+ #{sequence := Since} = Row,
+ ?assertEqual(lists:reverse(RestRows), changes(Db, Since, [])),
+ fold_changes_since_seq({Db, RestRows}).
+
+
+fold_changes_basic_rev({Db, _}) ->
+ ?assertEqual([], changes(Db, 0, [{dir, rev}])).
+
+
+fold_changes_since_now_rev({Db, DocRows}) ->
+ ?assertEqual(DocRows, changes(Db, now, [{dir, rev}])).
+
+
+fold_changes_since_seq_rev({_, []}) ->
+ ok;
+
+fold_changes_since_seq_rev({Db, DocRows}) ->
+ #{sequence := Since} = lists:last(DocRows),
+ Opts = [{dir, rev}],
+ ?assertEqual(DocRows, changes(Db, Since, Opts)),
+ RestRows = lists:sublist(DocRows, length(DocRows) - 1),
+ fold_changes_since_seq_rev({Db, RestRows}).
+
+
+fold_changes_basic_tx_too_old({Db, DocRows0}) ->
+ DocRows = lists:reverse(DocRows0),
+
+ fabric2_test_util:tx_too_old_setup_errors(0, 1),
+ ?assertEqual(DocRows, changes(Db)),
+
+ fabric2_test_util:tx_too_old_setup_errors(1, 0),
+ ?assertEqual(DocRows, changes(Db)),
+
+ % Blow up in user fun but after emitting one row successfully.
+ fabric2_test_util:tx_too_old_setup_errors({1, 1}, 0),
+ ?assertEqual(DocRows, changes(Db)),
+
+ % Blow up before last document
+ fabric2_test_util:tx_too_old_setup_errors({?DOC_COUNT - 1, 1}, 0),
+ ?assertEqual(DocRows, changes(Db)),
+
+ % Emit one value, then blow up in user function and then blow up twice in
+ % fold_range. But it is not enough to stop the iteration.
+ fabric2_test_util:tx_too_old_setup_errors({1, 1}, {1, 2}),
+ ?assertEqual(DocRows, changes(Db)).
+
+
+fold_changes_reverse_tx_too_old({Db, DocRows}) ->
+ Opts = [{dir, rev}],
+
+ fabric2_test_util:tx_too_old_setup_errors(0, 1),
+ ?assertEqual([], changes(Db, 0, Opts)),
+
+ fabric2_test_util:tx_too_old_setup_errors(1, 0),
+ ?assertEqual([], changes(Db, 0, Opts)),
+
+ fabric2_test_util:tx_too_old_setup_errors(1, 0),
+ ?assertEqual(DocRows, changes(Db, now, Opts)),
+
+ fabric2_test_util:tx_too_old_setup_errors(1, 0),
+ ?assertEqual(DocRows, changes(Db, now, Opts)),
+
+ % Blow up in user fun but after emitting one row successfully.
+ fabric2_test_util:tx_too_old_setup_errors({1, 1}, 0),
+ ?assertEqual(DocRows, changes(Db, now, Opts)),
+
+ % Blow up before last document
+ fabric2_test_util:tx_too_old_setup_errors({?DOC_COUNT - 1, 1}, 0),
+ ?assertEqual(DocRows, changes(Db, now, Opts)),
+
+ % Emit value, blow up in user function, and twice in fold_range
+ fabric2_test_util:tx_too_old_setup_errors({1, 1}, {1, 2}),
+ ?assertEqual(DocRows, changes(Db, now, Opts)).
+
+
+fold_changes_tx_too_old_with_single_row_emits({Db, DocRows0}) ->
+ % This test does a few basic operations while forcing erlfdb range fold to
+ % emit a single row at a time, thus forcing it to use continuations while
+ % also inducing tx errors
+ Opts = [{target_bytes, 1}],
+ DocRows = lists:reverse(DocRows0),
+
+ fabric2_test_util:tx_too_old_setup_errors(0, 1),
+ ?assertEqual(DocRows, changes(Db, 0, Opts)),
+
+ fabric2_test_util:tx_too_old_setup_errors(1, 0),
+ ?assertEqual(DocRows, changes(Db, 0, Opts)),
+
+ % Blow up in user fun but after emitting one row successfully.
+ fabric2_test_util:tx_too_old_setup_errors({1, 1}, 0),
+ ?assertEqual(DocRows, changes(Db, 0, Opts)),
+
+ % Blow up before last document
+ fabric2_test_util:tx_too_old_setup_errors({?DOC_COUNT - 1, 1}, 0),
+ ?assertEqual(DocRows, changes(Db, 0, Opts)).
+
+
+fold_changes_since_seq_tx_too_old({Db, Rows}) ->
+ % Blow up after after a successful emit, then twice
+ % in range fold call. Also re-use already existing basic
+ % fold_changes_since_seq test function.
+ fabric2_test_util:tx_too_old_setup_errors({1, 1}, {1, 2}),
+ fold_changes_since_seq({Db, Rows}).
+
+
+fold_changes_not_progressing({Db, _}) ->
+ % Fail in first fold range call.
+ fabric2_test_util:tx_too_old_setup_errors(5, 0),
+ ?assertError(fold_range_not_progressing, changes(Db)),
+
+ % Fail in first user fun call.
+ fabric2_test_util:tx_too_old_setup_errors(0, 5),
+ ?assertError(fold_range_not_progressing, changes(Db)),
+
+ % Blow up in last user fun call
+ fabric2_test_util:tx_too_old_setup_errors({?DOC_COUNT - 1, 5}, 0),
+ ?assertError(fold_range_not_progressing, changes(Db)),
+
+ % Blow up in user function after one success.
+ fabric2_test_util:tx_too_old_setup_errors({1, 5}, 0),
+ ?assertError(fold_range_not_progressing, changes(Db)),
+
+ % Emit value, blow up in user function, then keep blowing up in fold_range.
+ fabric2_test_util:tx_too_old_setup_errors({1, 1}, {1, 4}),
+ ?assertError(fold_range_not_progressing, changes(Db)).
+
+
+fold_fun(#{} = Change, Acc) ->
+ fabric2_test_util:tx_too_old_raise_in_user_fun(),
+ {ok, [Change | Acc]}.
+
+
+changes(Db) ->
+ changes(Db, 0, []).
+
+
+changes(Db, Since, Opts) ->
+ {ok, Rows} = fabric2_db:fold_changes(Db, Since, fun fold_fun/2, [], Opts),
+ Rows.
diff --git a/src/fabric/test/fabric2_db_crud_tests.erl b/src/fabric/test/fabric2_db_crud_tests.erl
new file mode 100644
index 000000000..3d90c65b5
--- /dev/null
+++ b/src/fabric/test/fabric2_db_crud_tests.erl
@@ -0,0 +1,750 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_db_crud_tests).
+
+
+-include_lib("fabric/include/fabric2.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+-define(PDICT_RAISE_IN_ERLFDB_WAIT, '$pdict_raise_in_erlfdb_wait').
+
+
+crud_test_() ->
+ {
+ "Test database CRUD operations",
+ {
+ setup,
+ fun setup_all/0,
+ fun teardown_all/1,
+ {
+ foreach,
+ fun setup/0,
+ fun cleanup/1,
+ [
+ ?TDEF_FE(create_db),
+ ?TDEF_FE(open_db),
+ ?TDEF_FE(delete_db),
+ ?TDEF_FE(recreate_db),
+ ?TDEF_FE(recreate_db_interactive),
+ ?TDEF_FE(recreate_db_non_interactive),
+ ?TDEF_FE(undelete_db),
+ ?TDEF_FE(remove_deleted_db),
+ ?TDEF_FE(scheduled_remove_deleted_db, 15),
+ ?TDEF_FE(scheduled_remove_deleted_dbs, 15),
+ ?TDEF_FE(old_db_handle),
+ ?TDEF_FE(list_dbs),
+ ?TDEF_FE(list_dbs_user_fun),
+ ?TDEF_FE(list_dbs_user_fun_partial),
+ ?TDEF_FE(list_dbs_info),
+ ?TDEF_FE(list_dbs_info_partial),
+ ?TDEF_FE(list_dbs_tx_too_old),
+ ?TDEF_FE(list_dbs_info_tx_too_old, 15),
+ ?TDEF_FE(list_deleted_dbs_info),
+ ?TDEF_FE(list_deleted_dbs_info_user_fun),
+ ?TDEF_FE(list_deleted_dbs_info_user_fun_partial),
+ ?TDEF_FE(list_deleted_dbs_info_with_timestamps),
+ ?TDEF_FE(get_info_wait_retry_on_tx_too_old),
+ ?TDEF_FE(get_info_wait_retry_on_tx_abort)
+ ]
+ }
+ }
+ }.
+
+
+scheduled_db_remove_error_test_() ->
+ {
+ "Test scheduled database remove operations",
+ {
+ setup,
+ fun setup_all/0,
+ fun teardown_all/1,
+ {
+ foreach,
+ fun setup/0,
+ fun cleanup/1,
+ [
+ ?TDEF_FE(scheduled_remove_deleted_dbs_with_error)
+ ]
+ }
+ }
+ }.
+
+
+setup_all() ->
+ meck:new(config, [passthrough]),
+ meck:expect(config, get_integer, fun
+ ("couchdb", "db_expiration_schedule_sec", _) -> 2;
+ ("couchdb", "db_expiration_retention_sec", _) -> 0;
+ (_, _, Default) -> Default
+ end),
+ Ctx = test_util:start_couch([fabric, couch_jobs]),
+ meck:new(erlfdb, [passthrough]),
+ meck:new(fabric2_db_expiration, [passthrough]),
+ Ctx.
+
+
+teardown_all(Ctx) ->
+ meck:unload(),
+ test_util:stop_couch(Ctx).
+
+
+setup() ->
+ fabric2_test_util:tx_too_old_mock_erlfdb().
+
+
+cleanup(_) ->
+ ok = config:set("couchdb", "db_expiration_enabled", "false", false),
+ ok = config:set("couchdb", "enable_database_recovery", "false", false),
+ fabric2_test_util:tx_too_old_reset_errors(),
+ reset_fail_erfdb_wait(),
+ meck:reset([fabric2_db_expiration]),
+ meck:reset([config]),
+ meck:reset([erlfdb]).
+
+
+create_db(_) ->
+ DbName = ?tempdb(),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(true, ets:member(fabric2_server, DbName)),
+ ?assertEqual({error, file_exists}, fabric2_db:create(DbName, [])).
+
+
+open_db(_) ->
+ DbName = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:open(DbName, [])),
+
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(true, ets:member(fabric2_server, DbName)),
+
+ % Opening the cached version
+ ?assertMatch({ok, _}, fabric2_db:open(DbName, [])),
+
+ % Remove from cache and re-open
+ true = ets:delete(fabric2_server, DbName),
+ ?assertMatch({ok, _}, fabric2_db:open(DbName, [])).
+
+
+delete_db(_) ->
+ DbName = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName, [])),
+
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(true, ets:member(fabric2_server, DbName)),
+
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ ?assertEqual(false, ets:member(fabric2_server, DbName)),
+
+ ?assertError(database_does_not_exist, fabric2_db:open(DbName, [])).
+
+
+recreate_db(_) ->
+ DbName = ?tempdb(),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+
+ {ok, Db1} = fabric2_db:open(DbName, []),
+
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+
+ ?assertError(database_does_not_exist, fabric2_db:get_db_info(Db1)),
+
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+
+ {ok, Db2} = fabric2_db:open(DbName, []),
+
+ CurOpts = [{uuid, fabric2_db:get_uuid(Db2)}],
+ ?assertMatch({ok, #{}}, fabric2_db:open(DbName, CurOpts)),
+
+ % Remove from cache to force it to open through fabric2_fdb:open
+ fabric2_server:remove(DbName),
+ ?assertMatch({ok, #{}}, fabric2_db:open(DbName, CurOpts)),
+
+ BadOpts = [{uuid, fabric2_util:uuid()}],
+ ?assertError(database_does_not_exist, fabric2_db:open(DbName, BadOpts)),
+
+ % Remove from cache to force it to open through fabric2_fdb:open
+ fabric2_server:remove(DbName),
+ ?assertError(database_does_not_exist, fabric2_db:open(DbName, BadOpts)).
+
+
+recreate_db_interactive(_) ->
+ DbName = ?tempdb(),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+
+ {ok, Db1} = fabric2_db:open(DbName, [{interactive, true}]),
+
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+
+ ?assertMatch({ok, _}, fabric2_db:get_db_info(Db1)).
+
+
+recreate_db_non_interactive(_) ->
+ % This is also the default case, but we check that parsing the `false` open
+ % value works correctly.
+ DbName = ?tempdb(),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+
+ {ok, Db1} = fabric2_db:open(DbName, [{interactive, false}]),
+
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+
+ ?assertError(database_does_not_exist, fabric2_db:get_db_info(Db1)).
+
+
+undelete_db(_) ->
+ DbName = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName, [])),
+
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(true, ets:member(fabric2_server, DbName)),
+
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ ?assertEqual(false, ets:member(fabric2_server, DbName)),
+
+
+ {ok, Infos} = fabric2_db:list_deleted_dbs_info(),
+ [DeletedDbInfo] = [Info || Info <- Infos,
+ DbName == proplists:get_value(db_name, Info)
+ ],
+ Timestamp = proplists:get_value(timestamp, DeletedDbInfo),
+
+ OldTS = <<"2020-01-01T12:00:00Z">>,
+ ?assertEqual(not_found, fabric2_db:undelete(DbName, DbName, OldTS, [])),
+ BadDbName = <<"bad_dbname">>,
+ ?assertEqual(not_found,
+ fabric2_db:undelete(BadDbName, BadDbName, Timestamp, [])),
+
+ ok = fabric2_db:undelete(DbName, DbName, Timestamp, []),
+ {ok, AllDbInfos} = fabric2_db:list_dbs_info(),
+ ?assert(is_db_info_member(DbName, AllDbInfos)).
+
+
+remove_deleted_db(_) ->
+ DbName = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName, [])),
+
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(true, ets:member(fabric2_server, DbName)),
+
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ ?assertEqual(false, ets:member(fabric2_server, DbName)),
+
+ {ok, Infos} = fabric2_db:list_deleted_dbs_info(),
+ [DeletedDbInfo] = [Info || Info <- Infos,
+ DbName == proplists:get_value(db_name, Info)
+ ],
+ Timestamp = proplists:get_value(timestamp, DeletedDbInfo),
+ OldTS = <<"2020-01-01T12:00:00Z">>,
+ ?assertEqual(not_found,
+ fabric2_db:delete(DbName, [{deleted_at, OldTS}])),
+ BadDbName = <<"bad_dbname">>,
+ ?assertEqual(not_found,
+ fabric2_db:delete(BadDbName, [{deleted_at, Timestamp}])),
+
+ ok = fabric2_db:delete(DbName, [{deleted_at, Timestamp}]),
+ {ok, Infos2} = fabric2_db:list_deleted_dbs_info(),
+ DeletedDbs = [proplists:get_value(db_name, Info) || Info <- Infos2],
+ ?assert(not lists:member(DbName, DeletedDbs)).
+
+
+scheduled_remove_deleted_db(_) ->
+ ok = config:set("couchdb", "db_expiration_enabled", "true", false),
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+ DbName = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName, [])),
+
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(true, ets:member(fabric2_server, DbName)),
+
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ ?assertEqual(false, ets:member(fabric2_server, DbName)),
+
+ meck:reset(fabric2_db_expiration),
+ meck:wait(fabric2_db_expiration, process_expirations, '_', 7000),
+
+ ?assertEqual(ok, test_util:wait(fun() ->
+ {ok, Infos} = fabric2_db:list_deleted_dbs_info(),
+ DeletedDbs = [proplists:get_value(db_name, Info) || Info <- Infos],
+ case lists:member(DbName, DeletedDbs) of
+ true -> wait;
+ false -> ok
+ end
+ end)).
+
+
+scheduled_remove_deleted_dbs(_) ->
+ ok = config:set("couchdb", "db_expiration_enabled", "true", false),
+ ok = config:set("couchdb", "db_expiration_batch", "2", false),
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+ DbNameList = [create_and_delete_db() || _I <- lists:seq(1, 5)],
+ meck:reset(fabric2_db_expiration),
+ meck:wait(fabric2_db_expiration, process_expirations, '_', 7000),
+
+ {ok, Infos} = fabric2_db:list_deleted_dbs_info(),
+ DeletedDbs = [proplists:get_value(db_name, Info) || Info <- Infos],
+ lists:map(fun(DbName) ->
+ ?assert(not lists:member(DbName, DeletedDbs))
+ end, DbNameList).
+
+
+scheduled_remove_deleted_dbs_with_error(_) ->
+ meck:expect(fabric2_db_expiration, process_expirations, fun(_, _) ->
+ throw(process_expirations_error)
+ end),
+
+ {Pid, Ref} = spawn_monitor(fun() ->
+ fabric2_db_expiration:cleanup(true)
+ end),
+ receive
+ {'DOWN', Ref, process, Pid, Error} ->
+ ?assertMatch({job_error, process_expirations_error, _}, Error)
+ end,
+ JobType = <<"db_expiration">>,
+ JobId = <<"db_expiration_job">>,
+ FQJobId = <<JobId/binary, "-", 1:16/integer>>,
+
+ ?assertMatch({ok, _}, couch_jobs:get_job_data(undefined, JobType, FQJobId)),
+ {ok, JobState} = couch_jobs:get_job_state(undefined, JobType, FQJobId),
+ ?assert(lists:member(JobState, [pending, running])).
+
+
+old_db_handle(_) ->
+ % db hard deleted
+ DbName1 = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName1, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName1, [])),
+ {ok, Db1} = fabric2_db:open(DbName1, []),
+ ?assertMatch({ok, _}, fabric2_db:get_db_info(Db1)),
+ ?assertEqual(ok, fabric2_db:delete(DbName1, [])),
+ ?assertError(database_does_not_exist, fabric2_db:get_db_info(Db1)),
+
+ % db soft deleted
+ DbName2 = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName2, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName2, [])),
+ {ok, Db2} = fabric2_db:open(DbName2, []),
+ ?assertMatch({ok, _}, fabric2_db:get_db_info(Db2)),
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+ ?assertEqual(ok, fabric2_db:delete(DbName2, [])),
+ ?assertError(database_does_not_exist, fabric2_db:get_db_info(Db2)),
+
+ % db soft deleted and re-created
+ DbName3 = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName3, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName3, [])),
+ {ok, Db3} = fabric2_db:open(DbName3, []),
+ ?assertMatch({ok, _}, fabric2_db:get_db_info(Db3)),
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+ ?assertEqual(ok, fabric2_db:delete(DbName3, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName3, [])),
+ ?assertError(database_does_not_exist, fabric2_db:get_db_info(Db3)),
+
+ % db soft deleted and undeleted
+ DbName4 = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName4, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName4, [])),
+ {ok, Db4} = fabric2_db:open(DbName4, []),
+ ?assertMatch({ok, _}, fabric2_db:get_db_info(Db4)),
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+ ?assertEqual(ok, fabric2_db:delete(DbName4, [])),
+ {ok, Infos} = fabric2_db:list_deleted_dbs_info(),
+ [DeletedDbInfo] = [Info || Info <- Infos,
+ DbName4 == proplists:get_value(db_name, Info)
+ ],
+ Timestamp = proplists:get_value(timestamp, DeletedDbInfo),
+ ok = fabric2_db:undelete(DbName4, DbName4, Timestamp, []),
+ ?assertMatch({ok, _}, fabric2_db:get_db_info(Db4)),
+
+ % db hard deleted and re-created
+ DbName5 = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName5, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName5, [])),
+ {ok, Db5} = fabric2_db:open(DbName5, []),
+ ?assertMatch({ok, _}, fabric2_db:get_db_info(Db5)),
+ ok = config:set("couchdb", "enable_database_recovery", "false", false),
+ ?assertEqual(ok, fabric2_db:delete(DbName5, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName5, [])),
+ ?assertError(database_does_not_exist, fabric2_db:get_db_info(Db5)).
+
+
+list_dbs(_) ->
+ DbName = ?tempdb(),
+ AllDbs1 = fabric2_db:list_dbs(),
+
+ ?assert(is_list(AllDbs1)),
+ ?assert(not lists:member(DbName, AllDbs1)),
+
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ AllDbs2 = fabric2_db:list_dbs(),
+ ?assert(lists:member(DbName, AllDbs2)),
+
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ AllDbs3 = fabric2_db:list_dbs(),
+ ?assert(not lists:member(DbName, AllDbs3)).
+
+
+list_dbs_user_fun(_) ->
+ ?assertMatch({ok, _}, fabric2_db:create(?tempdb(), [])),
+
+ UserFun = fun(Row, Acc) -> {ok, [Row | Acc]} end,
+ {ok, UserAcc} = fabric2_db:list_dbs(UserFun, [], []),
+
+ Base = lists:foldl(fun(DbName, Acc) ->
+ [{row, [{id, DbName}]} | Acc]
+ end, [{meta, []}], fabric2_db:list_dbs()),
+ Expect = lists:reverse(Base, [complete]),
+
+ ?assertEqual(Expect, lists:reverse(UserAcc)).
+
+
+list_dbs_user_fun_partial(_) ->
+ UserFun = fun(Row, Acc) -> {stop, [Row | Acc]} end,
+ {ok, UserAcc} = fabric2_db:list_dbs(UserFun, [], []),
+ ?assertEqual([{meta, []}], UserAcc).
+
+
+list_dbs_info(_) ->
+ DbName = ?tempdb(),
+ {ok, AllDbInfos1} = fabric2_db:list_dbs_info(),
+
+ ?assert(is_list(AllDbInfos1)),
+ ?assert(not is_db_info_member(DbName, AllDbInfos1)),
+
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ {ok, AllDbInfos2} = fabric2_db:list_dbs_info(),
+ ?assert(is_db_info_member(DbName, AllDbInfos2)),
+
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ {ok, AllDbInfos3} = fabric2_db:list_dbs_info(),
+ ?assert(not is_db_info_member(DbName, AllDbInfos3)).
+
+
+list_dbs_info_partial(_) ->
+ UserFun = fun(Row, Acc) -> {stop, [Row | Acc]} end,
+ {ok, UserAcc} = fabric2_db:list_dbs_info(UserFun, [], []),
+ ?assertEqual([{meta, []}], UserAcc).
+
+
+list_dbs_tx_too_old(_) ->
+ DbName1 = ?tempdb(),
+ DbName2 = ?tempdb(),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName1, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName2, [])),
+
+ UserFun = fun(Row, Acc) ->
+ fabric2_test_util:tx_too_old_raise_in_user_fun(),
+ {ok, [Row | Acc]}
+ end,
+
+ % Get get expected output without any transactions timing out
+ Dbs = fabric2_db:list_dbs(UserFun, [], []),
+
+ % Blow up in fold range
+ fabric2_test_util:tx_too_old_setup_errors(0, 1),
+ ?assertEqual(Dbs, fabric2_db:list_dbs(UserFun, [], [])),
+
+ % Blow up in fold_range after emitting one row
+ fabric2_test_util:tx_too_old_setup_errors(0, {1, 1}),
+ ?assertEqual(Dbs, fabric2_db:list_dbs(UserFun, [], [])),
+
+ % Blow up in user fun
+ fabric2_test_util:tx_too_old_setup_errors(1, 0),
+ ?assertEqual(Dbs, fabric2_db:list_dbs(UserFun, [], [])),
+
+ % Blow up in user fun after emitting one row
+ fabric2_test_util:tx_too_old_setup_errors({1, 1}, 0),
+ ?assertEqual(Dbs, fabric2_db:list_dbs(UserFun, [], [])),
+
+ % Blow up in in user fun and fold range
+ fabric2_test_util:tx_too_old_setup_errors(1, {1, 1}),
+ ?assertEqual(Dbs, fabric2_db:list_dbs(UserFun, [], [])),
+
+ ok = fabric2_db:delete(DbName1, []),
+ ok = fabric2_db:delete(DbName2, []).
+
+
+list_dbs_info_tx_too_old(_) ->
+ % list_dbs_info uses a queue of 100 futures to fetch db infos in parallel
+ % so create more than 100 dbs so make sure we have 100+ dbs in our test
+
+ DbCount = 101,
+ DbNames = fabric2_util:pmap(fun(_) ->
+ DbName = ?tempdb(),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ DbName
+ end, lists:seq(1, DbCount)),
+
+ UserFun = fun(Row, Acc) ->
+ fabric2_test_util:tx_too_old_raise_in_user_fun(),
+ {ok, [Row | Acc]}
+ end,
+
+ % This is the expected return with no tx timeouts
+ {ok, DbInfos} = fabric2_db:list_dbs_info(UserFun, [], []),
+
+ % Blow up in fold range on the first call
+ fabric2_test_util:tx_too_old_setup_errors(0, 1),
+ ?assertEqual({ok, DbInfos}, fabric2_db:list_dbs_info(UserFun, [], [])),
+
+ % Blow up in fold_range after emitting one row
+ fabric2_test_util:tx_too_old_setup_errors(0, {1, 1}),
+ ?assertEqual({ok, DbInfos}, fabric2_db:list_dbs_info(UserFun, [], [])),
+
+ % Blow up in fold_range after emitting 99 rows
+ fabric2_test_util:tx_too_old_setup_errors(0, {DbCount - 2, 1}),
+ ?assertEqual({ok, DbInfos}, fabric2_db:list_dbs_info(UserFun, [], [])),
+
+ % Blow up in fold_range after emitting 100 rows
+ fabric2_test_util:tx_too_old_setup_errors(0, {DbCount - 1, 1}),
+ ?assertEqual({ok, DbInfos}, fabric2_db:list_dbs_info(UserFun, [], [])),
+
+ % Blow up in user fun
+ fabric2_test_util:tx_too_old_setup_errors(1, 0),
+ ?assertEqual({ok, DbInfos}, fabric2_db:list_dbs_info(UserFun, [], [])),
+
+ % Blow up in user fun after emitting one row
+ fabric2_test_util:tx_too_old_setup_errors({1, 1}, 0),
+ ?assertEqual({ok, DbInfos}, fabric2_db:list_dbs_info(UserFun, [], [])),
+
+ % Blow up in user fun after emitting 99 rows
+ fabric2_test_util:tx_too_old_setup_errors({DbCount - 2, 1}, 0),
+ ?assertEqual({ok, DbInfos}, fabric2_db:list_dbs_info(UserFun, [], [])),
+
+ % Blow up in user fun after emitting 100 rows
+ fabric2_test_util:tx_too_old_setup_errors({DbCount - 1, 1}, 0),
+ ?assertEqual({ok, DbInfos}, fabric2_db:list_dbs_info(UserFun, [], [])),
+
+ % Blow up in in user fun and fold range
+ fabric2_test_util:tx_too_old_setup_errors(1, {1, 1}),
+ ?assertEqual({ok, DbInfos}, fabric2_db:list_dbs_info(UserFun, [], [])),
+
+ fabric2_util:pmap(fun(DbName) ->
+ ?assertEqual(ok, fabric2_db:delete(DbName, []))
+ end, DbNames).
+
+
+list_deleted_dbs_info(_) ->
+ DbName = ?tempdb(),
+ AllDbs1 = fabric2_db:list_dbs(),
+
+ ?assert(is_list(AllDbs1)),
+ ?assert(not lists:member(DbName, AllDbs1)),
+
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ AllDbs2 = fabric2_db:list_dbs(),
+ ?assert(lists:member(DbName, AllDbs2)),
+
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+
+ AllDbs3 = fabric2_db:list_dbs(),
+ ?assert(not lists:member(DbName, AllDbs3)),
+ {ok, DeletedDbsInfo} = fabric2_db:list_deleted_dbs_info(),
+ DeletedDbs4 = get_deleted_dbs(DeletedDbsInfo),
+ ?assert(lists:member(DbName, DeletedDbs4)).
+
+
+list_deleted_dbs_info_user_fun(_) ->
+ DbName = ?tempdb(),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+
+ UserFun = fun(Row, Acc) -> {ok, [Row | Acc]} end,
+ {ok, UserAcc} = fabric2_db:list_deleted_dbs_info(UserFun, [], []),
+ {ok, DeletedDbsInfo} = fabric2_db:list_deleted_dbs_info(),
+
+ Base = lists:foldl(fun(DbInfo, Acc) ->
+ [{row, DbInfo} | Acc]
+ end, [{meta, []}], DeletedDbsInfo),
+ Expect = lists:reverse(Base, [complete]),
+
+ ?assertEqual(Expect, lists:reverse(UserAcc)).
+
+
+list_deleted_dbs_info_user_fun_partial(_) ->
+ UserFun = fun(Row, Acc) -> {stop, [Row | Acc]} end,
+ {ok, UserAcc} = fabric2_db:list_deleted_dbs_info(UserFun, [], []),
+ ?assertEqual([{meta, []}], UserAcc).
+
+
+list_deleted_dbs_info_with_timestamps(_) ->
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+
+ % Cycle our database three times to get multiple entries
+ DbName = ?tempdb(),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ timer:sleep(1100),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ timer:sleep(1100),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+
+ UserFun = fun(Row, Acc) ->
+ case Row of
+ {row, Info} -> {ok, [Info | Acc]};
+ _ -> {ok, Acc}
+ end
+ end,
+
+ Options1 = [{start_key, DbName}, {end_key, <<DbName/binary, 255>>}],
+ {ok, Infos1} = fabric2_db:list_deleted_dbs_info(UserFun, [], Options1),
+ TimeStamps1 = [fabric2_util:get_value(timestamp, Info) || Info <- Infos1],
+ ?assertEqual(3, length(TimeStamps1)),
+
+ [FirstTS, MiddleTS, LastTS] = lists:sort(TimeStamps1),
+
+ % Check we can skip over the FirstTS
+ Options2 = [{start_key, [DbName, MiddleTS]}, {end_key, [DbName, LastTS]}],
+ {ok, Infos2} = fabric2_db:list_deleted_dbs_info(UserFun, [], Options2),
+ TimeStamps2 = [fabric2_util:get_value(timestamp, Info) || Info <- Infos2],
+ ?assertEqual(2, length(TimeStamps2)),
+ ?assertEqual([LastTS, MiddleTS], TimeStamps2), % because foldl reverses
+
+ % Check we an end before LastTS
+ Options3 = [{start_key, DbName}, {end_key, [DbName, MiddleTS]}],
+ {ok, Infos3} = fabric2_db:list_deleted_dbs_info(UserFun, [], Options3),
+ TimeStamps3 = [fabric2_util:get_value(timestamp, Info) || Info <- Infos3],
+ ?assertEqual([MiddleTS, FirstTS], TimeStamps3),
+
+ % Check that {dir, rev} works without timestamps
+ Options4 = [{start_key, DbName}, {end_key, DbName}, {dir, rev}],
+ {ok, Infos4} = fabric2_db:list_deleted_dbs_info(UserFun, [], Options4),
+ TimeStamps4 = [fabric2_util:get_value(timestamp, Info) || Info <- Infos4],
+ ?assertEqual([FirstTS, MiddleTS, LastTS], TimeStamps4),
+
+ % Check that reverse with keys returns correctly
+ Options5 = [
+ {start_key, [DbName, MiddleTS]},
+ {end_key, [DbName, FirstTS]},
+ {dir, rev}
+ ],
+ {ok, Infos5} = fabric2_db:list_deleted_dbs_info(UserFun, [], Options5),
+ TimeStamps5 = [fabric2_util:get_value(timestamp, Info) || Info <- Infos5],
+ ?assertEqual([FirstTS, MiddleTS], TimeStamps5).
+
+
+get_info_wait_retry_on_tx_too_old(_) ->
+ DbName = ?tempdb(),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+
+ {ok, Db} = fabric2_db:open(DbName, []),
+
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+
+ % Simulate being in a list_dbs_info callback
+ ok = erlfdb:set_option(Tx, disallow_writes),
+
+ InfoF = fabric2_fdb:get_info_future(Tx, DbPrefix),
+ {info_future, _, _, ChangesF, _, _, _} = InfoF,
+
+ raise_in_erlfdb_wait(ChangesF, {erlfdb_error, 1007}, 3),
+ ?assertError({erlfdb_error, 1007}, fabric2_fdb:get_info_wait(InfoF)),
+
+ raise_in_erlfdb_wait(ChangesF, {erlfdb_error, 1007}, 2),
+ ?assertMatch([{_, _} | _], fabric2_fdb:get_info_wait(InfoF)),
+
+ ?assertEqual(ok, fabric2_db:delete(DbName, []))
+ end).
+
+
+get_info_wait_retry_on_tx_abort(_)->
+ DbName = ?tempdb(),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+
+ {ok, Db} = fabric2_db:open(DbName, []),
+
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ #{
+ tx := Tx,
+ db_prefix := DbPrefix
+ } = TxDb,
+
+ % Simulate being in a list_dbs_info callback
+ ok = erlfdb:set_option(Tx, disallow_writes),
+
+ InfoF = fabric2_fdb:get_info_future(Tx, DbPrefix),
+ {info_future, _, _, ChangesF, _, _, _} = InfoF,
+
+ raise_in_erlfdb_wait(ChangesF, {erlfdb_error, 1025}, 3),
+ ?assertError({erlfdb_error, 1025}, fabric2_fdb:get_info_wait(InfoF)),
+
+ raise_in_erlfdb_wait(ChangesF, {erlfdb_error, 1025}, 2),
+ ?assertMatch([{_, _} | _], fabric2_fdb:get_info_wait(InfoF)),
+
+ ?assertEqual(ok, fabric2_db:delete(DbName, []))
+ end).
+
+
+reset_fail_erfdb_wait() ->
+ erase(?PDICT_RAISE_IN_ERLFDB_WAIT),
+ meck:expect(erlfdb, wait, fun(F) -> meck:passthrough([F]) end).
+
+
+raise_in_erlfdb_wait(Future, Error, Count) ->
+ put(?PDICT_RAISE_IN_ERLFDB_WAIT, Count),
+ meck:expect(erlfdb, wait, fun
+ (F) when F =:= Future ->
+ case get(?PDICT_RAISE_IN_ERLFDB_WAIT) of
+ N when is_integer(N), N > 0 ->
+ put(?PDICT_RAISE_IN_ERLFDB_WAIT, N - 1),
+ error(Error);
+ _ ->
+ meck:passthrough([F])
+ end;
+ (F) ->
+ meck:passthrough([F])
+ end).
+
+
+is_db_info_member(_, []) ->
+ false;
+
+is_db_info_member(DbName, [DbInfo | RestInfos]) ->
+ case lists:keyfind(db_name, 1, DbInfo) of
+ {db_name, DbName} ->
+ true;
+ _E ->
+ is_db_info_member(DbName, RestInfos)
+ end.
+
+get_deleted_dbs(DeletedDbInfos) ->
+ lists:foldl(fun(DbInfo, Acc) ->
+ DbName = fabric2_util:get_value(db_name, DbInfo),
+ [DbName | Acc]
+ end, [], DeletedDbInfos).
+
+
+create_and_delete_db() ->
+ DbName = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName, [])),
+
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(true, ets:member(fabric2_server, DbName)),
+
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ ?assertEqual(false, ets:member(fabric2_server, DbName)),
+ DbName.
diff --git a/src/fabric/test/fabric2_db_fold_doc_docids_tests.erl b/src/fabric/test/fabric2_db_fold_doc_docids_tests.erl
new file mode 100644
index 000000000..b55da5363
--- /dev/null
+++ b/src/fabric/test/fabric2_db_fold_doc_docids_tests.erl
@@ -0,0 +1,150 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_db_fold_doc_docids_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+doc_fold_test_() ->
+ {
+ "Test document fold operations",
+ {
+ setup,
+ fun setup_all/0,
+ fun teardown_all/1,
+ {
+ foreach,
+ fun setup/0,
+ fun cleanup/1,
+ [
+ ?TDEF_FE(fold_docs_simple),
+ ?TDEF_FE(fold_docs_lots),
+ ?TDEF_FE(fold_docs_local),
+ ?TDEF_FE(fold_docs_mixed)
+]
+ }
+ }
+ }.
+
+
+setup_all() ->
+ test_util:start_couch([fabric]).
+
+
+teardown_all(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+
+setup() ->
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ Db.
+
+
+cleanup(Db) ->
+ ok = fabric2_db:delete(fabric2_db:name(Db), []).
+
+
+fold_docs_simple(Db) ->
+ Docs = create_docs(Db, 10),
+ run_fold(Db, Docs).
+
+
+fold_docs_lots(Db) ->
+ Docs = create_docs(Db, 110),
+ run_fold(Db, Docs).
+
+
+fold_docs_local(Db) ->
+ Docs = create_local_docs(Db, 10),
+ run_fold(Db, Docs).
+
+
+fold_docs_mixed(Db) ->
+ Docs = create_mixed_docs(Db, 200),
+ run_fold(Db, Docs).
+
+
+run_fold(Db, Docs) ->
+ SortedIds = get_ids(Docs),
+ Ids = shuffle(SortedIds),
+ Returned = fabric2_fdb:transactional(Db, fun (TxDb) ->
+ fold_docs_return_ids(TxDb, Ids)
+ end),
+ ?assertEqual(Returned, Ids).
+
+
+fold_docs_return_ids(TxDb, Ids) ->
+ CB = fun(DocId, _Doc, Acc) ->
+ {ok, Acc ++ [DocId]}
+ end,
+ {ok, Acc} = fabric2_db:fold_docs(TxDb, Ids, CB, [], []),
+ Acc.
+
+get_ids(Docs) ->
+ lists:map(fun (#doc{id = Id}) -> Id end, Docs).
+
+
+create_mixed_docs(Db, Size) ->
+ fabric2_fdb:transactional(Db, fun (TxDb) ->
+ Docs = lists:map(fun (Id) ->
+ case Id rem 3 == 0 of
+ true -> create_local_doc(Id);
+ false -> create_doc(Id)
+ end
+ end, lists:seq(0, Size)),
+ {ok, _} = fabric2_db:update_docs(TxDb, Docs),
+ Docs
+ end).
+
+
+create_local_docs(Db, Size) ->
+ fabric2_fdb:transactional(Db, fun (TxDb) ->
+ Docs = lists:map(fun (Id) ->
+ create_local_doc(Id)
+ end, lists:seq(0, Size)),
+ {ok, _} = fabric2_db:update_docs(TxDb, Docs),
+ Docs
+ end).
+
+
+create_docs(Db, Size) ->
+ fabric2_fdb:transactional(Db, fun (TxDb) ->
+ Docs = lists:map(fun (Id) ->
+ create_doc(Id)
+ end, lists:seq(0, Size)),
+ {ok, _} = fabric2_db:update_docs(TxDb, Docs),
+ Docs
+ end).
+
+
+create_doc(Id) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, list_to_binary([<<"doc-">>, integer_to_binary(Id)])},
+ {<<"value">>, 1}
+ ]}).
+
+
+create_local_doc(Id) ->
+ couch_doc:from_json_obj({[
+ {<<"_id">>, list_to_binary([<<"_local/doc-">>, integer_to_binary(Id)])},
+ {<<"value">>, 1}
+ ]}).
+
+
+shuffle(List) when is_list(List) ->
+ Tagged = [{rand:uniform(), Item} || Item <- List],
+ {_, Randomized} = lists:unzip(lists:sort(Tagged)),
+ Randomized.
diff --git a/src/fabric/test/fabric2_db_misc_tests.erl b/src/fabric/test/fabric2_db_misc_tests.erl
new file mode 100644
index 000000000..23532144d
--- /dev/null
+++ b/src/fabric/test/fabric2_db_misc_tests.erl
@@ -0,0 +1,445 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_db_misc_tests).
+
+
+% Used in events_listener test
+-export([
+ event_listener_callback/3
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2.hrl").
+-include("fabric2_test.hrl").
+
+
+misc_test_() ->
+ {
+ "Test database miscellaney",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ with([
+ ?TDEF(empty_db_info),
+ ?TDEF(accessors),
+ ?TDEF(set_revs_limit),
+ ?TDEF(set_security),
+ ?TDEF(get_security_cached),
+ ?TDEF(is_system_db),
+ ?TDEF(validate_dbname),
+ ?TDEF(validate_doc_ids),
+ ?TDEF(get_doc_info),
+ ?TDEF(get_doc_info_not_found),
+ ?TDEF(get_full_doc_info),
+ ?TDEF(get_full_doc_info_not_found),
+ ?TDEF(get_full_doc_infos),
+ ?TDEF(ensure_full_commit),
+ ?TDEF(metadata_bump),
+ ?TDEF(db_version_bump),
+ ?TDEF(db_cache_doesnt_evict_newer_handles),
+ ?TDEF(events_listener)
+ ])
+ }
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([fabric]),
+ DbName = ?tempdb(),
+ {ok, Db} = fabric2_db:create(DbName, [{user_ctx, ?ADMIN_USER}]),
+ {DbName, Db, Ctx}.
+
+
+cleanup({_DbName, Db, Ctx}) ->
+ meck:unload(),
+ ok = fabric2_db:delete(fabric2_db:name(Db), []),
+ test_util:stop_couch(Ctx).
+
+
+empty_db_info({DbName, Db, _}) ->
+ {ok, Info} = fabric2_db:get_db_info(Db),
+ ?assertEqual(DbName, fabric2_util:get_value(db_name, Info)),
+ ?assertEqual(0, fabric2_util:get_value(doc_count, Info)),
+ ?assertEqual(0, fabric2_util:get_value(doc_del_count, Info)),
+ ?assert(is_binary(fabric2_util:get_value(update_seq, Info))),
+ InfoUUID = fabric2_util:get_value(uuid, Info),
+ UUID = fabric2_db:get_uuid(Db),
+ ?assertEqual(UUID, InfoUUID).
+
+
+accessors({DbName, Db, _}) ->
+ SeqZero = fabric2_fdb:vs_to_seq(fabric2_util:seq_zero_vs()),
+ ?assertEqual(DbName, fabric2_db:name(Db)),
+ ?assertEqual(0, fabric2_db:get_instance_start_time(Db)),
+ ?assertEqual(nil, fabric2_db:get_pid(Db)),
+ ?assertEqual(undefined, fabric2_db:get_before_doc_update_fun(Db)),
+ ?assertEqual(undefined, fabric2_db:get_after_doc_read_fun(Db)),
+ ?assertEqual(SeqZero, fabric2_db:get_committed_update_seq(Db)),
+ ?assertEqual(SeqZero, fabric2_db:get_compacted_seq(Db)),
+ ?assertEqual(SeqZero, fabric2_db:get_update_seq(Db)),
+ ?assertEqual(nil, fabric2_db:get_compactor_pid(Db)),
+ ?assertEqual(1000, fabric2_db:get_revs_limit(Db)),
+ ?assertMatch(<<_:32/binary>>, fabric2_db:get_uuid(Db)),
+ ?assertEqual(true, fabric2_db:is_db(Db)),
+ ?assertEqual(false, fabric2_db:is_db(#{})),
+ ?assertEqual(false, fabric2_db:is_partitioned(Db)),
+ ?assertEqual(false, fabric2_db:is_clustered(Db)).
+
+
+set_revs_limit({DbName, Db, _}) ->
+ ?assertEqual(ok, fabric2_db:set_revs_limit(Db, 500)),
+ {ok, Db2} = fabric2_db:open(DbName, []),
+ ?assertEqual(500, fabric2_db:get_revs_limit(Db2)).
+
+
+set_security({DbName, Db, _}) ->
+ SecObj = {[
+ {<<"admins">>, {[
+ {<<"names">>, []},
+ {<<"roles">>, []}
+ ]}}
+ ]},
+ ?assertEqual(ok, fabric2_db:set_security(Db, SecObj)),
+ {ok, Db2} = fabric2_db:open(DbName, []),
+ ?assertEqual(SecObj, fabric2_db:get_security(Db2)).
+
+
+get_security_cached({DbName, Db, _}) ->
+ OldSecObj = fabric2_db:get_security(Db),
+ SecObj = {[
+ {<<"admins">>, {[
+ {<<"names">>, [<<"foo1">>]},
+ {<<"roles">>, []}
+ ]}}
+ ]},
+
+ % Set directly so we don't auto-update the local cache
+ {ok, Db1} = fabric2_db:open(DbName, [?ADMIN_CTX]),
+ ?assertMatch({ok, #{}}, fabric2_fdb:transactional(Db1, fun(TxDb) ->
+ fabric2_fdb:set_config(TxDb, security_doc, SecObj)
+ end)),
+
+ {ok, Db2} = fabric2_db:open(DbName, [?ADMIN_CTX]),
+ ?assertEqual(OldSecObj, fabric2_db:get_security(Db2, [{max_age, 1000}])),
+
+ timer:sleep(100),
+ ?assertEqual(SecObj, fabric2_db:get_security(Db2, [{max_age, 50}])),
+
+ ?assertEqual(ok, fabric2_db:set_security(Db2, OldSecObj)).
+
+
+is_system_db({DbName, Db, _}) ->
+ ?assertEqual(false, fabric2_db:is_system_db(Db)),
+ ?assertEqual(false, fabric2_db:is_system_db_name("foo")),
+ ?assertEqual(false, fabric2_db:is_system_db_name(DbName)),
+ ?assertEqual(true, fabric2_db:is_system_db_name(<<"_replicator">>)),
+ ?assertEqual(true, fabric2_db:is_system_db_name("_replicator")),
+ ?assertEqual(true, fabric2_db:is_system_db_name(<<"foo/_replicator">>)),
+ ?assertEqual(false, fabric2_db:is_system_db_name(<<"f.o/_replicator">>)),
+ ?assertEqual(false, fabric2_db:is_system_db_name(<<"foo/bar">>)).
+
+
+validate_dbname(_) ->
+ Tests = [
+ {ok, <<"foo">>},
+ {ok, "foo"},
+ {ok, <<"_replicator">>},
+ {error, illegal_database_name, <<"Foo">>},
+ {error, illegal_database_name, <<"foo|bar">>},
+ {error, illegal_database_name, <<"Foo">>},
+ {error, database_name_too_long, <<
+ "0123456789012345678901234567890123456789"
+ "0123456789012345678901234567890123456789"
+ "0123456789012345678901234567890123456789"
+ "0123456789012345678901234567890123456789"
+ "0123456789012345678901234567890123456789"
+ "0123456789012345678901234567890123456789"
+ >>}
+ ],
+ CheckFun = fun
+ ({ok, DbName}) ->
+ ?assertEqual(ok, fabric2_db:validate_dbname(DbName));
+ ({error, Reason, DbName}) ->
+ Expect = {error, {Reason, DbName}},
+ ?assertEqual(Expect, fabric2_db:validate_dbname(DbName))
+ end,
+ try
+ % Don't allow epi plugins to interfere with test results
+ meck:new(couch_epi, [passthrough]),
+ meck:expect(couch_epi, decide, 5, no_decision),
+ lists:foreach(CheckFun, Tests)
+ after
+ % Unload within the test to minimize interference with other tests
+ meck:unload()
+ end.
+
+
+validate_doc_ids(_) ->
+ % Basic test with default max infinity length
+ ?assertEqual(ok, fabric2_db:validate_docid(<<"foo">>)),
+
+ Tests = [
+ {ok, <<"_local/foo">>},
+ {ok, <<"_design/foo">>},
+ {ok, <<"0123456789012345">>},
+ {illegal_docid, <<"">>},
+ {illegal_docid, <<"_design/">>},
+ {illegal_docid, <<"_local/">>},
+ {illegal_docid, <<"01234567890123456">>},
+ {illegal_docid, <<16#FF>>},
+ {illegal_docid, <<"_bad">>},
+ {illegal_docid, null}
+ ],
+ CheckFun = fun
+ ({ok, DocId}) ->
+ ?assertEqual(ok, fabric2_db:validate_docid(DocId));
+ ({illegal_docid, DocId}) ->
+ ?assertThrow({illegal_docid, _}, fabric2_db:validate_docid(DocId))
+ end,
+
+ try
+ meck:new(config, [passthrough]),
+ meck:expect(
+ config,
+ get,
+ ["couchdb", "max_document_id_length", "infinity"],
+ "16"
+ ),
+ lists:foreach(CheckFun, Tests),
+
+ % Check that fabric2_db_plugin can't allow for
+ % underscore prefixed dbs
+ meck:new(fabric2_db_plugin, [passthrough]),
+ meck:expect(fabric2_db_plugin, validate_docid, ['_'], true),
+ ?assertEqual(ok, fabric2_db:validate_docid(<<"_wheee">>))
+ after
+ % Unloading within the test as the config mock
+ % interferes with the db version bump test.
+ meck:unload()
+ end.
+
+
+get_doc_info({_, Db, _}) ->
+ DocId = couch_uuids:random(),
+ InsertDoc = #doc{
+ id = DocId,
+ body = {[{<<"foo">>, true}]}
+ },
+ {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, InsertDoc, []),
+
+ DI = fabric2_db:get_doc_info(Db, DocId),
+ ?assert(is_record(DI, doc_info)),
+ #doc_info{
+ id = DIDocId,
+ high_seq = HighSeq,
+ revs = Revs
+ } = DI,
+
+ ?assertEqual(DocId, DIDocId),
+ ?assert(is_binary(HighSeq)),
+ ?assertMatch([#rev_info{}], Revs),
+
+ [#rev_info{
+ rev = DIRev,
+ seq = Seq,
+ deleted = Deleted,
+ body_sp = BodySp
+ }] = Revs,
+
+ ?assertEqual({Pos, Rev}, DIRev),
+ ?assert(is_binary(Seq)),
+ ?assert(not Deleted),
+ ?assertMatch(undefined, BodySp).
+
+
+get_doc_info_not_found({_, Db, _}) ->
+ DocId = couch_uuids:random(),
+ ?assertEqual(not_found, fabric2_db:get_doc_info(Db, DocId)).
+
+
+get_full_doc_info({_, Db, _}) ->
+ DocId = couch_uuids:random(),
+ InsertDoc = #doc{
+ id = DocId,
+ body = {[{<<"foo">>, true}]}
+ },
+ {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, InsertDoc, []),
+ FDI = fabric2_db:get_full_doc_info(Db, DocId),
+
+ ?assert(is_record(FDI, full_doc_info)),
+ #full_doc_info{
+ id = FDIDocId,
+ update_seq = UpdateSeq,
+ deleted = Deleted,
+ rev_tree = RevTree,
+ sizes = SizeInfo
+ } = FDI,
+
+ ?assertEqual(DocId, FDIDocId),
+ ?assert(is_binary(UpdateSeq)),
+ ?assert(not Deleted),
+ ?assertMatch([{Pos, {Rev, _, []}}], RevTree),
+ ?assertEqual(#size_info{}, SizeInfo).
+
+
+get_full_doc_info_not_found({_, Db, _}) ->
+ DocId = couch_uuids:random(),
+ ?assertEqual(not_found, fabric2_db:get_full_doc_info(Db, DocId)).
+
+
+get_full_doc_infos({_, Db, _}) ->
+ DocIds = lists:map(fun(_) ->
+ DocId = couch_uuids:random(),
+ Doc = #doc{id = DocId},
+ {ok, _} = fabric2_db:update_doc(Db, Doc, []),
+ DocId
+ end, lists:seq(1, 5)),
+
+ FDIs = fabric2_db:get_full_doc_infos(Db, DocIds),
+ lists:zipwith(fun(DocId, FDI) ->
+ ?assertEqual(DocId, FDI#full_doc_info.id)
+ end, DocIds, FDIs).
+
+
+ensure_full_commit({_, Db, _}) ->
+ ?assertEqual({ok, 0}, fabric2_db:ensure_full_commit(Db)),
+ ?assertEqual({ok, 0}, fabric2_db:ensure_full_commit(Db, 5)).
+
+
+metadata_bump({DbName, _, _}) ->
+ % Call open again here to make sure we have a version in the cache
+ % as we'll be checking if that version gets its metadata bumped
+ {ok, Db} = fabric2_db:open(DbName, [{user_ctx, ?ADMIN_USER}]),
+
+ % Emulate a remote client bumping the metadataversion
+ {ok, Fdb} = application:get_env(fabric, db),
+ erlfdb:transactional(Fdb, fun(Tx) ->
+ erlfdb:set_versionstamped_value(Tx, ?METADATA_VERSION_KEY, <<0:112>>)
+ end),
+ NewMDVersion = erlfdb:transactional(Fdb, fun(Tx) ->
+ erlfdb:wait(erlfdb:get(Tx, ?METADATA_VERSION_KEY))
+ end),
+
+ % Save timetamp before ensure_current/1 is called
+ TsBeforeEnsureCurrent = erlang:monotonic_time(millisecond),
+
+ % Perform a random operation which calls ensure_current
+ {ok, _} = fabric2_db:get_db_info(Db),
+
+ % Check that db handle in the cache got the new metadata version
+ % and that check_current_ts was updated
+ CachedDb = fabric2_server:fetch(DbName, undefined),
+ ?assertMatch(#{
+ md_version := NewMDVersion,
+ check_current_ts := Ts
+ } when Ts >= TsBeforeEnsureCurrent, CachedDb).
+
+
+db_version_bump({DbName, _, _}) ->
+ % Call open again here to make sure we have a version in the cache
+ % as we'll be checking if that version gets its metadata bumped
+ {ok, Db} = fabric2_db:open(DbName, [{user_ctx, ?ADMIN_USER}]),
+
+ % Emulate a remote client bumping db version. We don't go through the
+ % regular db open + update security doc or something like that to make sure
+ % we don't touch the local cache
+ #{db_prefix := DbPrefix} = Db,
+ DbVersionKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
+ {ok, Fdb} = application:get_env(fabric, db),
+ NewDbVersion = fabric2_util:uuid(),
+ erlfdb:transactional(Fdb, fun(Tx) ->
+ erlfdb:set(Tx, DbVersionKey, NewDbVersion),
+ erlfdb:set_versionstamped_value(Tx, ?METADATA_VERSION_KEY, <<0:112>>)
+ end),
+
+ % Perform a random operation which calls ensure_current
+ {ok, _} = fabric2_db:get_db_info(Db),
+
+ % After previous operation, the cache should have been cleared
+ ?assertMatch(undefined, fabric2_server:fetch(DbName, undefined)),
+
+ % Call open again and check that we have the latest db version
+ {ok, Db2} = fabric2_db:open(DbName, [{user_ctx, ?ADMIN_USER}]),
+
+ % Check that db handle in the cache got the new metadata version
+ ?assertMatch(#{db_version := NewDbVersion}, Db2).
+
+
+db_cache_doesnt_evict_newer_handles({DbName, _, _}) ->
+ {ok, Db} = fabric2_db:open(DbName, [{user_ctx, ?ADMIN_USER}]),
+ CachedDb = fabric2_server:fetch(DbName, undefined),
+
+ StaleDb = Db#{md_version := <<0>>},
+
+ ok = fabric2_server:store(StaleDb),
+ ?assertEqual(CachedDb, fabric2_server:fetch(DbName, undefined)),
+
+ ?assert(not fabric2_server:maybe_update(StaleDb)),
+ ?assertEqual(CachedDb, fabric2_server:fetch(DbName, undefined)),
+
+ ?assert(not fabric2_server:maybe_remove(StaleDb)),
+ ?assertEqual(CachedDb, fabric2_server:fetch(DbName, undefined)).
+
+
+events_listener({DbName, Db, _}) ->
+ Opts = [
+ {dbname, DbName},
+ {uuid, fabric2_db:get_uuid(Db)},
+ {timeout, 100}
+ ],
+
+ Fun = event_listener_callback,
+ {ok, Pid} = fabric2_events:link_listener(?MODULE, Fun, self(), Opts),
+ unlink(Pid),
+ Ref = monitor(process, Pid),
+
+ NextEvent = fun(Timeout) ->
+ receive
+ {Pid, Evt} when is_pid(Pid) -> Evt;
+ {'DOWN', Ref, _, _, normal} -> exited_normal
+ after Timeout ->
+ timeout
+ end
+ end,
+
+ Doc1 = #doc{id = couch_uuids:random()},
+ {ok, _} = fabric2_db:update_doc(Db, Doc1, []),
+ ?assertEqual(updated, NextEvent(1000)),
+
+ % Just one update, then expect a timeout
+ ?assertEqual(timeout, NextEvent(500)),
+
+ Doc2 = #doc{id = couch_uuids:random()},
+ {ok, _} = fabric2_db:update_doc(Db, Doc2, []),
+ ?assertEqual(updated, NextEvent(1000)),
+
+ % Process is still alive
+ ?assert(is_process_alive(Pid)),
+
+ % Recreate db
+ ok = fabric2_db:delete(DbName, [?ADMIN_CTX]),
+ {ok, _} = fabric2_db:create(DbName, [?ADMIN_CTX]),
+ ?assertEqual(deleted, NextEvent(1000)),
+
+ % After db is deleted or re-created listener should die
+ ?assertEqual(exited_normal, NextEvent(1000)).
+
+
+% Callback for event_listener function
+event_listener_callback(_DbName, Event, TestPid) ->
+ TestPid ! {self(), Event},
+ {ok, TestPid}.
diff --git a/src/fabric/test/fabric2_db_security_tests.erl b/src/fabric/test/fabric2_db_security_tests.erl
new file mode 100644
index 000000000..3d7167a00
--- /dev/null
+++ b/src/fabric/test/fabric2_db_security_tests.erl
@@ -0,0 +1,219 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_db_security_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+security_test_() ->
+ {
+ "Test database security operations",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ with([
+ ?TDEF(check_is_admin),
+ ?TDEF(check_is_not_admin),
+ ?TDEF(check_is_admin_role),
+ ?TDEF(check_is_not_admin_role),
+ ?TDEF(check_is_member_name),
+ ?TDEF(check_is_not_member_name),
+ ?TDEF(check_is_member_role),
+ ?TDEF(check_is_not_member_role),
+ ?TDEF(check_admin_is_member),
+ ?TDEF(check_is_member_of_public_db),
+ ?TDEF(check_set_user_ctx),
+ ?TDEF(check_forbidden),
+ ?TDEF(check_fail_no_opts),
+ ?TDEF(check_fail_name_null),
+ ?TDEF(check_forbidden_with_interactive_reopen)
+ ])
+ }
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([fabric]),
+ DbName = ?tempdb(),
+ PubDbName = ?tempdb(),
+ {ok, Db1} = fabric2_db:create(DbName, [?ADMIN_CTX]),
+ ok = set_test_security(Db1),
+ {ok, _} = fabric2_db:create(PubDbName, [?ADMIN_CTX]),
+ {DbName, PubDbName, Ctx}.
+
+
+cleanup({DbName, PubDbName, Ctx}) ->
+ ok = fabric2_db:delete(DbName, []),
+ ok = fabric2_db:delete(PubDbName, []),
+ test_util:stop_couch(Ctx).
+
+
+set_test_security(Db) ->
+ SecProps = {[
+ {<<"admins">>, {[
+ {<<"names">>, [<<"admin_name1">>, <<"admin_name2">>]},
+ {<<"roles">>, [<<"admin_role1">>, <<"admin_role2">>]}
+ ]}},
+ {<<"members">>, {[
+ {<<"names">>, [<<"member_name1">>, <<"member_name2">>]},
+ {<<"roles">>, [<<"member_role1">>, <<"member_role2">>]}
+ ]}}
+ ]},
+ ok = fabric2_db:set_security(Db, SecProps).
+
+
+check_is_admin({DbName, _, _}) ->
+ UserCtx = #user_ctx{name = <<"admin_name1">>},
+ {ok, Db} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+ ?assertEqual(ok, fabric2_db:check_is_admin(Db)).
+
+
+check_is_not_admin({DbName, _, _}) ->
+ {ok, Db1} = fabric2_db:open(DbName, [{user_ctx, #user_ctx{}}]),
+ ?assertThrow(
+ {unauthorized, <<"You are not authorized", _/binary>>},
+ fabric2_db:check_is_admin(Db1)
+ ),
+
+ UserCtx = #user_ctx{name = <<"member_name1">>},
+ {ok, Db2} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+ ?assertThrow(
+ {forbidden, <<"You are not a db or server admin.">>},
+ fabric2_db:check_is_admin(Db2)
+ ).
+
+
+check_is_admin_role({DbName, _, _}) ->
+ UserCtx = #user_ctx{roles = [<<"admin_role1">>]},
+ {ok, Db} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+ ?assertEqual(ok, fabric2_db:check_is_admin(Db)).
+
+
+check_is_not_admin_role({DbName, _, _}) ->
+ UserCtx = #user_ctx{
+ name = <<"member_name1">>,
+ roles = [<<"member_role1">>]
+ },
+ {ok, Db} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+ ?assertThrow(
+ {forbidden, <<"You are not a db or server admin.">>},
+ fabric2_db:check_is_admin(Db)
+ ).
+
+
+check_is_member_name({DbName, _, _}) ->
+ UserCtx = #user_ctx{name = <<"member_name1">>},
+ {ok, Db} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+ ?assertEqual(ok, fabric2_db:check_is_member(Db)).
+
+
+check_is_not_member_name({DbName, _, _}) ->
+ {ok, Db1} = fabric2_db:open(DbName, [{user_ctx, #user_ctx{}}]),
+ ?assertThrow(
+ {unauthorized, <<"You are not authorized", _/binary>>},
+ fabric2_db:check_is_member(Db1)
+ ),
+
+ UserCtx = #user_ctx{name = <<"foo">>},
+ {ok, Db2} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+ ?assertThrow(
+ {forbidden, <<"You are not allowed to access", _/binary>>},
+ fabric2_db:check_is_member(Db2)
+ ).
+
+
+check_is_member_role({DbName, _, _}) ->
+ UserCtx = #user_ctx{name = <<"foo">>, roles = [<<"member_role1">>]},
+ {ok, Db} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+ ?assertEqual(ok, fabric2_db:check_is_member(Db)).
+
+
+check_is_not_member_role({DbName, _, _}) ->
+ UserCtx = #user_ctx{name = <<"foo">>, roles = [<<"bar">>]},
+ {ok, Db} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+ ?assertThrow(
+ {forbidden, <<"You are not allowed to access", _/binary>>},
+ fabric2_db:check_is_member(Db)
+ ).
+
+
+check_admin_is_member({DbName, _, _}) ->
+ UserCtx = #user_ctx{name = <<"admin_name1">>},
+ {ok, Db} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+ ?assertEqual(ok, fabric2_db:check_is_member(Db)).
+
+
+check_is_member_of_public_db({_, PubDbName, _}) ->
+ {ok, Db1} = fabric2_db:open(PubDbName, [{user_ctx, #user_ctx{}}]),
+ ?assertEqual(ok, fabric2_db:check_is_member(Db1)),
+
+ UserCtx = #user_ctx{name = <<"foo">>, roles = [<<"bar">>]},
+ {ok, Db2} = fabric2_db:open(PubDbName, [{user_ctx, UserCtx}]),
+ ?assertEqual(ok, fabric2_db:check_is_member(Db2)).
+
+
+check_set_user_ctx({DbName, _, _}) ->
+ UserCtx = #user_ctx{name = <<"foo">>, roles = [<<"admin_role1">>]},
+ {ok, Db1} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+ ?assertEqual(UserCtx, fabric2_db:get_user_ctx(Db1)).
+
+
+check_forbidden({DbName, _, _}) ->
+ UserCtx = #user_ctx{name = <<"foo">>, roles = [<<"bar">>]},
+ {ok, Db} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+ ?assertThrow({forbidden, _}, fabric2_db:get_db_info(Db)).
+
+
+check_fail_no_opts({DbName, _, _}) ->
+ {ok, Db} = fabric2_db:open(DbName, []),
+ ?assertThrow({unauthorized, _}, fabric2_db:get_db_info(Db)).
+
+
+check_fail_name_null({DbName, _, _}) ->
+ UserCtx = #user_ctx{name = null},
+ {ok, Db} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+ ?assertThrow({unauthorized, _}, fabric2_db:get_db_info(Db)).
+
+
+check_forbidden_with_interactive_reopen({DbName, _, _}) ->
+ UserCtx = #user_ctx{name = <<"foo">>},
+ Options = [{user_ctx, UserCtx}, {interactive, true}],
+
+ {ok, Db1} = fabric2_db:open(DbName, Options),
+
+ % Verify foo is forbidden by default
+ ?assertThrow({forbidden, _}, fabric2_db:get_db_info(Db1)),
+
+ % Allow foo
+ {ok, Db2} = fabric2_db:open(DbName, [?ADMIN_CTX]),
+ AllowFoo = {[
+ {<<"members">>, {[
+ {<<"names">>, [<<"foo">>]}
+ ]}}
+ ]},
+ ok = fabric2_db:set_security(Db2, AllowFoo),
+
+ ?assertMatch({ok, _}, fabric2_db:get_db_info(Db1)),
+
+ % Recreate test db instance with the default security
+ ok = fabric2_db:delete(DbName, [?ADMIN_CTX]),
+ {ok, Db3} = fabric2_db:create(DbName, [?ADMIN_CTX]),
+ ok = set_test_security(Db3),
+
+ % Original handle is forbidden to again
+ ?assertThrow({forbidden, _}, fabric2_db:get_db_info(Db1)).
diff --git a/src/fabric/test/fabric2_db_size_tests.erl b/src/fabric/test/fabric2_db_size_tests.erl
new file mode 100644
index 000000000..0bb9c7a8e
--- /dev/null
+++ b/src/fabric/test/fabric2_db_size_tests.erl
@@ -0,0 +1,918 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_db_size_tests).
+
+-export([
+ random_body/0
+]).
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+db_size_test_() ->
+ {
+ "Test database size calculations",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ with([
+ ?TDEF(new_doc),
+ ?TDEF(replicate_new_doc),
+ ?TDEF(edit_doc),
+ ?TDEF(delete_doc),
+ ?TDEF(create_conflict),
+ ?TDEF(replicate_new_winner),
+ ?TDEF(replicate_deep_deleted),
+ ?TDEF(delete_winning_revision),
+ ?TDEF(delete_conflict_revision),
+ ?TDEF(replicate_existing_revision),
+ ?TDEF(replicate_shared_history),
+ ?TDEF(create_doc_with_attachment),
+ ?TDEF(add_attachment_in_update),
+ ?TDEF(add_second_attachment),
+ ?TDEF(delete_attachment),
+ ?TDEF(delete_one_attachment),
+ ?TDEF(delete_all_attachments),
+ ?TDEF(re_add_attachment),
+ ?TDEF(update_and_remove_attachment),
+ ?TDEF(replicate_new_doc_with_attachment),
+ ?TDEF(replicate_remove_attachment),
+ ?TDEF(replicate_stub_attachment),
+ ?TDEF(replicate_stub_and_new_attachment),
+ ?TDEF(replicate_new_att_to_winner),
+ ?TDEF(replicate_change_att_to_winner),
+ ?TDEF(replicate_rem_att_from_winner),
+ ?TDEF(replicate_stub_to_winner),
+ ?TDEF(replicate_new_att_to_conflict),
+ ?TDEF(replicate_change_att_to_conflict),
+ ?TDEF(replicate_rem_att_from_conflict),
+ ?TDEF(replicate_stub_to_conflict),
+ ?TDEF(create_local_doc),
+ ?TDEF(update_local_doc),
+ ?TDEF(delete_local_doc),
+ ?TDEF(recreate_local_doc)
+ ])
+ }
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([fabric]),
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ {Db, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+ ok = fabric2_db:delete(fabric2_db:name(Db), []),
+ test_util:stop_couch(Ctx).
+
+
+new_doc({Db, _}) ->
+ check(Db, [
+ {create, #{tgt => rev1}}
+ ]).
+
+
+replicate_new_doc({Db, _}) ->
+ check(Db, [
+ {replicate, #{tgt => rev1}}
+ ]).
+
+
+edit_doc({Db, _}) ->
+ check(Db, [
+ {create, #{tgt => rev1}},
+ {update, #{src => rev1, tgt => rev2}}
+ ]).
+
+
+delete_doc({Db, _}) ->
+ check(Db, [
+ {create, #{tgt => rev1}},
+ {delete, #{src => rev1, tgt => rev2}}
+ ]).
+
+
+create_conflict({Db, _}) ->
+ check(Db, [
+ {create, #{tgt => rev1}},
+ {replicate, #{tgt => rev2}}
+ ]).
+
+
+replicate_new_winner({Db, _}) ->
+ check(Db, [
+ {create, #{tgt => rev1}},
+ {replicate, #{tgt => rev2, depth => 3}}
+ ]).
+
+
+replicate_deep_deleted({Db, _}) ->
+ check(Db, [
+ {create, #{tgt => rev1, depth => 2}},
+ {replicate, #{tgt => rev2, depth => 5, deleted => true}}
+ ]).
+
+
+delete_winning_revision({Db, _}) ->
+ check(Db, [
+ {create, #{tgt => rev1}},
+ {replicate, #{tgt => rev2}},
+ {delete, #{src => {winner, [rev1, rev2]}, tgt => rev3}}
+ ]).
+
+
+delete_conflict_revision({Db, _}) ->
+ check(Db, [
+ {create, #{tgt => rev1}},
+ {replicate, #{tgt => rev2}},
+ {delete, #{src => {conflict, [rev1, rev2]}, tgt => rev3}}
+ ]).
+
+
+replicate_existing_revision({Db, _}) ->
+ check(Db, [
+ {create, #{tgt => rev1}},
+ {replicate, #{src => rev1, tgt => rev2, depth => 0}}
+ ]).
+
+
+replicate_shared_history({Db, _}) ->
+ check(Db, [
+ {create, #{tgt => rev1, depth => 5}},
+ {update, #{src => rev1, tgt => rev2, depth => 5}},
+ {replicate, #{
+ src => rev1,
+ src_exists => false,
+ tgt => rev3,
+ depth => 5
+ }}
+ ]).
+
+
+create_doc_with_attachment({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {create, #{tgt => rev1, atts => [att1]}}
+ ]).
+
+
+add_attachment_in_update({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {create, #{tgt => rev1}},
+ {update, #{src => rev1, tgt => rev2, atts => [att1]}}
+ ]).
+
+
+add_second_attachment({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {mk_att, #{tgt => att2}},
+ {create, #{tgt => rev1, atts => [att1]}},
+ {update, #{src => rev1, tgt => rev2, atts => [att1, att2]}}
+ ]).
+
+
+delete_attachment({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {create, #{tgt => rev1, atts => [att1]}},
+ {update, #{src => rev1, tgt => rev2}}
+ ]).
+
+
+delete_one_attachment({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {mk_att, #{tgt => att2}},
+ {mk_att, #{tgt => att3, stub => att1, revpos => 1}},
+ {create, #{tgt => rev1, atts => [att1, att2]}},
+ {update, #{src => rev1, tgt => rev2, atts => [att3]}}
+ ]).
+
+
+delete_all_attachments({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {mk_att, #{tgt => att2}},
+ {create, #{tgt => rev1, atts => [att1, att2]}},
+ {update, #{src => rev1, tgt => rev2, atts => []}}
+ ]).
+
+
+re_add_attachment({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {create, #{tgt => rev1, atts => [att1]}},
+ {update, #{src => rev1, tgt => rev2}},
+ {update, #{src => rev2, tgt => rev3, atts => [att1]}}
+ ]).
+
+
+update_and_remove_attachment({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {mk_att, #{tgt => att2}},
+ {mk_att, #{tgt => att3, stub => att1, revpos => 1}},
+ {mk_att, #{tgt => att4}},
+ {create, #{tgt => rev1, atts => [att1, att2]}},
+ {update, #{src => rev1, tgt => rev2, atts => [att3, att4]}}
+ ]).
+
+
+replicate_new_doc_with_attachment({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {replicate, #{tgt => rev1, atts => [att1]}}
+ ]).
+
+
+replicate_remove_attachment({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {create, #{tgt => rev1, atts => [att1]}},
+ {replicate, #{src => rev1, tgt => rev2}}
+ ]).
+
+
+replicate_stub_attachment({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {mk_att, #{tgt => att2, stub => att1, revpos => 1}},
+ {create, #{tgt => rev1, atts => [att1]}},
+ {replicate, #{src => rev1, tgt => rev2, atts => [att2]}}
+ ]).
+
+
+replicate_stub_and_new_attachment({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {mk_att, #{tgt => att2, stub => att1, revpos => 1}},
+ {mk_att, #{tgt => att3}},
+ {create, #{tgt => rev1, atts => [att1]}},
+ {replicate, #{src => rev1, tgt => rev2, atts => [att2, att3]}}
+ ]).
+
+
+replicate_new_att_to_winner({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {create, #{tgt => rev1}},
+ {replicate, #{tgt => rev2}},
+ {replicate, #{
+ src => {winner, [rev1, rev2]},
+ tgt => rev3,
+ atts => [att1]}
+ }
+ ]).
+
+
+replicate_change_att_to_winner({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {mk_att, #{tgt => att2}},
+ {create, #{tgt => rev1, atts => [att1]}},
+ {replicate, #{tgt => rev2, atts => [att1]}},
+ {replicate, #{
+ src => {winner, [rev1, rev2]},
+ tgt => rev3,
+ atts => [att2]}
+ }
+ ]).
+
+
+replicate_rem_att_from_winner({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {create, #{tgt => rev1, atts => [att1]}},
+ {replicate, #{tgt => rev2, atts => [att1]}},
+ {replicate, #{src => {winner, [rev1, rev2]}, tgt => rev3}}
+ ]).
+
+
+replicate_stub_to_winner({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {mk_att, #{tgt => att2, stub => att1, revpos => 1}},
+ {create, #{tgt => rev1, atts => [att1]}},
+ {replicate, #{tgt => rev2, atts => [att1]}},
+ {replicate, #{
+ src => {winner, [rev1, rev2]},
+ tgt => rev3,
+ atts => [att2]}}
+ ]).
+
+
+replicate_new_att_to_conflict({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {create, #{tgt => rev1}},
+ {replicate, #{tgt => rev2}},
+ {replicate, #{
+ src => {conflict, [rev1, rev2]},
+ tgt => rev3,
+ atts => [att1]}
+ }
+ ]).
+
+
+replicate_change_att_to_conflict({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {mk_att, #{tgt => att2}},
+ {create, #{tgt => rev1, atts => [att1]}},
+ {replicate, #{tgt => rev2, atts => [att1]}},
+ {replicate, #{
+ src => {conflict, [rev1, rev2]},
+ tgt => rev3,
+ atts => [att2]}
+ }
+ ]).
+
+
+replicate_rem_att_from_conflict({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {create, #{tgt => rev1, atts => [att1]}},
+ {replicate, #{tgt => rev2, atts => [att1]}},
+ {replicate, #{src => {conflict, [rev1, rev2]}, tgt => rev3}}
+ ]).
+
+
+replicate_stub_to_conflict({Db, _}) ->
+ check(Db, [
+ {mk_att, #{tgt => att1}},
+ {mk_att, #{tgt => att2, stub => att1, revpos => 1}},
+ {create, #{tgt => rev1, atts => [att1]}},
+ {replicate, #{tgt => rev2, atts => [att1]}},
+ {replicate, #{
+ src => {conflict, [rev1, rev2]},
+ tgt => rev3,
+ atts => [att2]}}
+ ]).
+
+
+create_local_doc({Db, _}) ->
+ check(Db, #{local => true}, [
+ {create, #{tgt => rev1}}
+ ]).
+
+
+update_local_doc({Db, _}) ->
+ check(Db, #{local => true}, [
+ {create, #{tgt => rev1}},
+ {update, #{src => rev1, tgt => rev2}}
+ ]).
+
+
+delete_local_doc({Db, _}) ->
+ check(Db, #{local => true}, [
+ {create, #{tgt => rev1}},
+ {update, #{src => rev1, tgt => rev2}},
+ {delete, #{src => rev2, tgt => rev3}}
+ ]).
+
+
+recreate_local_doc({Db, _}) ->
+ check(Db, #{local => true}, [
+ {create, #{tgt => rev1}},
+ {update, #{src => rev1, tgt => rev2}},
+ {delete, #{src => rev2, tgt => rev3}},
+ {create, #{tgt => rev4}}
+ ]).
+
+
+check(Db, Actions) ->
+ check(Db, #{}, Actions).
+
+
+check(Db, CheckOpts, Actions) ->
+ DocId = case maps:get(local, CheckOpts, false) of
+ true ->
+ Base = couch_uuids:random(),
+ <<"_local/", Base/binary>>;
+ false ->
+ couch_uuids:random()
+ end,
+ InitSt = #{
+ doc_id => DocId,
+ revs => #{},
+ atts => #{},
+ size => db_size(Db)
+ },
+ lists:foldl(fun({Action, Opts}, StAcc) ->
+ case Action of
+ create -> create_doc(Db, Opts, StAcc);
+ update -> update_doc(Db, Opts, StAcc);
+ delete -> delete_doc(Db, Opts, StAcc);
+ replicate -> replicate_doc(Db, Opts, StAcc);
+ mk_att -> make_attachment(Opts, StAcc);
+ log_state -> log_state(Opts, StAcc)
+ end
+ end, InitSt, Actions).
+
+
+create_doc(Db, Opts, St) ->
+ #{
+ doc_id := DocId,
+ revs := Revs,
+ atts := Atts,
+ size := InitDbSize
+ } = St,
+
+ ?assert(maps:is_key(tgt, Opts)),
+
+ Tgt = maps:get(tgt, Opts),
+ AttKeys = maps:get(atts, Opts, []),
+ Depth = maps:get(depth, Opts, 1),
+
+ ?assert(not maps:is_key(Tgt, Revs)),
+ lists:foreach(fun(AttKey) ->
+ ?assert(maps:is_key(AttKey, Atts))
+ end, AttKeys),
+ ?assert(Depth >= 1),
+
+ AttRecords = lists:map(fun(AttKey) ->
+ maps:get(AttKey, Atts)
+ end, AttKeys),
+
+ InitDoc = #doc{id = DocId},
+ FinalDoc = lists:foldl(fun(Iter, Doc0) ->
+ #doc{
+ revs = {_OldStart, OldRevs}
+ } = Doc1 = randomize_doc(Doc0),
+ Doc2 = if Iter < Depth -> Doc1; true ->
+ Doc1#doc{atts = AttRecords}
+ end,
+ {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, Doc2),
+ Doc2#doc{revs = {Pos, [Rev | OldRevs]}}
+ end, InitDoc, lists:seq(1, Depth)),
+
+ FinalDocSize = doc_size(FinalDoc),
+ FinalDbSize = db_size(Db),
+
+ ?assertEqual(FinalDbSize - InitDbSize, FinalDocSize),
+
+ store_rev(Db, St, FinalDbSize, Tgt, FinalDoc).
+
+
+update_doc(Db, Opts, St) ->
+ #{
+ doc_id := DocId,
+ revs := Revs,
+ atts := Atts,
+ size := InitDbSize
+ } = St,
+
+ IsLocal = case DocId of
+ <<"_local/", _/binary>> -> true;
+ _ -> false
+ end,
+
+ ?assert(maps:is_key(src, Opts)),
+ ?assert(maps:is_key(tgt, Opts)),
+
+ Src = pick_rev(Revs, maps:get(src, Opts)),
+ Tgt = maps:get(tgt, Opts),
+ AttKeys = maps:get(atts, Opts, []),
+ Depth = maps:get(depth, Opts, 1),
+
+ ?assert(maps:is_key(Src, Revs)),
+ ?assert(not maps:is_key(Tgt, Revs)),
+ lists:foreach(fun(AttKey) ->
+ ?assert(maps:is_key(AttKey, Atts))
+ end, AttKeys),
+ ?assert(Depth >= 1),
+
+ AttRecords = lists:map(fun(AttKey) ->
+ maps:get(AttKey, Atts)
+ end, AttKeys),
+
+ InitDoc = maps:get(Src, Revs),
+ FinalDoc = lists:foldl(fun(Iter, Doc0) ->
+ #doc{
+ revs = {_OldStart, OldRevs}
+ } = Doc1 = randomize_doc(Doc0),
+ Doc2 = if Iter < Depth -> Doc1; true ->
+ Doc1#doc{atts = AttRecords}
+ end,
+ {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, Doc2),
+ case IsLocal of
+ true -> Doc2#doc{revs = {Pos, [Rev]}};
+ false -> Doc2#doc{revs = {Pos, [Rev | OldRevs]}}
+ end
+ end, InitDoc, lists:seq(1, Depth)),
+
+ InitDocSize = doc_size(InitDoc),
+ FinalDocSize = doc_size(FinalDoc),
+ FinalDbSize = db_size(Db),
+
+ ?assertEqual(FinalDbSize - InitDbSize, FinalDocSize - InitDocSize),
+
+ store_rev(Db, St, FinalDbSize, Tgt, FinalDoc).
+
+
+delete_doc(Db, Opts, St) ->
+ #{
+ doc_id := DocId,
+ revs := Revs,
+ size := InitDbSize
+ } = St,
+
+ IsLocal = case DocId of
+ <<"_local/", _/binary>> -> true;
+ _ -> false
+ end,
+
+ ?assert(maps:is_key(src, Opts)),
+ ?assert(maps:is_key(tgt, Opts)),
+
+ Src = pick_rev(Revs, maps:get(src, Opts)),
+ Tgt = maps:get(tgt, Opts),
+
+ ?assert(maps:is_key(Src, Revs)),
+ ?assert(not maps:is_key(Tgt, Revs)),
+
+ InitDoc = maps:get(Src, Revs),
+ #doc{
+ revs = {_OldStart, OldRevs}
+ } = UpdateDoc = randomize_deleted_doc(InitDoc),
+
+ {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, UpdateDoc),
+
+ FinalDoc = case IsLocal of
+ true -> UpdateDoc#doc{revs = {Pos, [Rev]}};
+ false -> UpdateDoc#doc{revs = {Pos, [Rev | OldRevs]}}
+ end,
+
+ InitDocSize = doc_size(InitDoc),
+ FinalDocSize = doc_size(FinalDoc),
+ FinalDbSize = db_size(Db),
+
+ ?assertEqual(FinalDbSize - InitDbSize, FinalDocSize - InitDocSize),
+
+ store_rev(Db, St, FinalDbSize, Tgt, FinalDoc).
+
+
+replicate_doc(Db, Opts, St) ->
+ #{
+ doc_id := DocId,
+ revs := Revs,
+ atts := Atts,
+ size := InitDbSize
+ } = St,
+
+ ?assert(maps:is_key(tgt, Opts)),
+
+ Src = pick_rev(Revs, maps:get(src, Opts, undefined)),
+ SrcExists = maps:get(src_exists, Opts, true),
+ Tgt = maps:get(tgt, Opts),
+ Deleted = maps:get(deleted, Opts, false),
+ AttKeys = maps:get(atts, Opts, []),
+ Depth = maps:get(depth, Opts, 1),
+
+ if Src == undefined -> ok; true ->
+ ?assert(maps:is_key(Src, Revs))
+ end,
+ ?assert(not maps:is_key(Tgt, Revs)),
+ ?assert(is_boolean(Deleted)),
+ lists:foreach(fun(AttKey) ->
+ ?assert(maps:is_key(AttKey, Atts))
+ end, AttKeys),
+ ?assert(Depth >= 0),
+
+ if Depth > 0 -> ok; true ->
+ ?assert(length(AttKeys) == 0)
+ end,
+
+ InitDoc = maps:get(Src, Revs, #doc{id = DocId}),
+ NewRevsDoc = lists:foldl(fun(_, Doc0) ->
+ #doc{
+ revs = {RevStart, RevIds}
+ } = Doc0,
+ NewRev = crypto:strong_rand_bytes(16),
+ Doc0#doc{
+ revs = {RevStart + 1, [NewRev | RevIds]}
+ }
+ end, InitDoc, lists:seq(1, Depth)),
+
+ FinalDoc = if NewRevsDoc == InitDoc -> NewRevsDoc; true ->
+ UpdateDoc = case Deleted of
+ true -> randomize_deleted_doc(NewRevsDoc);
+ false -> randomize_doc(NewRevsDoc)
+ end,
+ #doc{
+ revs = {RevPos, _}
+ } = UpdateDoc,
+ AttRecords = lists:map(fun(AttKey) ->
+ BaseAtt = maps:get(AttKey, Atts),
+ case couch_att:fetch(data, BaseAtt) of
+ stub -> BaseAtt;
+ <<_/binary>> -> couch_att:store(revpos, RevPos, BaseAtt)
+ end
+ end, AttKeys),
+ UpdateDoc#doc{atts = AttRecords}
+ end,
+
+ try
+ {ok, _} = fabric2_db:update_doc(Db, FinalDoc, [replicated_changes])
+ catch throw:{missing_stub, _} ->
+ log_state(#{}, St),
+ ?debugFmt("Replicated: ~p~n", [FinalDoc]),
+ ?assert(false)
+ end,
+
+ InitDocSize = doc_size(InitDoc),
+ FinalDocSize = doc_size(FinalDoc),
+ FinalDbSize = db_size(Db),
+
+ SizeChange = case {Src, SrcExists} of
+ {undefined, _} -> FinalDocSize;
+ {_, false} -> FinalDocSize;
+ {_, _} -> FinalDocSize - InitDocSize
+ end,
+ ?assertEqual(FinalDbSize - InitDbSize, SizeChange),
+
+ store_rev(Db, St, FinalDbSize, Tgt, FinalDoc).
+
+
+make_attachment(Opts, St) ->
+ #{
+ atts := Atts
+ } = St,
+
+ ?assert(maps:is_key(tgt, Opts)),
+
+ Tgt = maps:get(tgt, Opts),
+ Stub = maps:get(stub, Opts, undefined),
+ RevPos = maps:get(revpos, Opts, undefined),
+ NameRaw = maps:get(name, Opts, undefined),
+
+ ?assert(not maps:is_key(Tgt, Atts)),
+ if Stub == undefined -> ok; true ->
+ ?assert(maps:is_key(Stub, Atts))
+ end,
+ ?assert(RevPos == undefined orelse RevPos >= 0),
+
+ Name = if
+ NameRaw == undefined -> undefined;
+ is_atom(NameRaw) -> atom_to_binary(NameRaw, utf8);
+ is_binary(NameRaw) -> NameRaw;
+ is_list(NameRaw) -> list_to_binary(NameRaw)
+ end,
+
+ Att0 = case Stub of
+ undefined ->
+ random_attachment(Name);
+ _ ->
+ SrcAtt = maps:get(Stub, Atts),
+ couch_att:store(data, stub, SrcAtt)
+ end,
+ Att1 = if RevPos == undefined -> Att0; true ->
+ couch_att:store(revpos, RevPos, Att0)
+ end,
+
+ St#{atts := maps:put(Tgt, Att1, Atts)}.
+
+
+log_state(_Opts, St) ->
+ #{
+ doc_id := DocId,
+ revs := Revs,
+ atts := Atts,
+ size := DbSize
+ } = St,
+
+ ?debugFmt("~nDocId: ~p~n", [DocId]),
+ ?debugFmt("Db Size: ~p~n~n", [DbSize]),
+
+ RevKeys = maps:keys(Revs),
+ lists:foreach(fun(RevKey) ->
+ #doc{
+ id = RevDocId,
+ revs = {Pos, [Rev | RestRevs]},
+ body = Body,
+ deleted = Deleted,
+ atts = DocAtts,
+ meta = Meta
+ } = Doc = maps:get(RevKey, Revs),
+ ?debugFmt("Doc: ~p (~p)~n", [RevKey, doc_size(Doc)]),
+ ?debugFmt("Id: ~p~n", [RevDocId]),
+ ?debugFmt("Rev: ~p ~w~n", [Pos, Rev]),
+ lists:foreach(fun(R) ->
+ ?debugFmt(" ~p~n", [R])
+ end, RestRevs),
+ ?debugFmt("Deleted: ~p~n", [Deleted]),
+ ?debugFmt("Atts:~n", []),
+ lists:foreach(fun(Att) ->
+ ?debugFmt(" ~p~n", [Att])
+ end, DocAtts),
+ ?debugFmt("Body: ~p~n", [Body]),
+ ?debugFmt("Meta: ~p~n", [Meta]),
+ ?debugFmt("~n", [])
+ end, lists:sort(RevKeys)),
+
+ AttKeys = maps:keys(Atts),
+ ?debugFmt("~n~nAtts:~n", []),
+ lists:foreach(fun(AttKey) ->
+ Att = maps:get(AttKey, Atts),
+ ?debugFmt("Att: ~p (~p)~n", [AttKey, couch_att:external_size(Att)]),
+ ?debugFmt(" ~p~n", [Att])
+ end, lists:sort(AttKeys)),
+
+ St.
+
+
+pick_rev(_Revs, Rev) when is_atom(Rev) ->
+ Rev;
+pick_rev(Revs, {Op, RevList}) when Op == winner; Op == conflict ->
+ ChooseFrom = lists:map(fun(Rev) ->
+ #doc{
+ revs = {S, [R | _]},
+ deleted = Deleted
+ } = maps:get(Rev, Revs),
+ #{
+ deleted => Deleted,
+ rev_id => {S, R},
+ name => Rev
+ }
+ end, RevList),
+ Sorted = fabric2_util:sort_revinfos(ChooseFrom),
+ RetRev = case Op of
+ winner -> hd(Sorted);
+ conflict -> choose(tl(Sorted))
+ end,
+ maps:get(name, RetRev).
+
+
+store_rev(Db, St, DbSize, Tgt, #doc{id = <<"_local/", _/binary>>} = Doc) ->
+ DbDoc = case fabric2_db:open_doc(Db, Doc#doc.id) of
+ {ok, Found} -> Found;
+ {not_found, _} -> not_found
+ end,
+ store_rev(St, DbSize, Tgt, DbDoc);
+
+store_rev(Db, St, DbSize, Tgt, #doc{} = Doc) ->
+ #doc{
+ id = DocId,
+ revs = {Pos, [Rev | _]}
+ } = Doc,
+ RevId = {Pos, Rev},
+ {ok, [{ok, DbDoc}]} = fabric2_db:open_doc_revs(Db, DocId, [RevId], []),
+ store_rev(St, DbSize, Tgt, DbDoc).
+
+
+store_rev(St, DbSize, Tgt, Doc) ->
+ #{
+ revs := Revs
+ } = St,
+ ?assert(not maps:is_key(Tgt, Revs)),
+ St#{
+ revs := maps:put(Tgt, Doc, Revs),
+ size := DbSize
+ }.
+
+
+randomize_doc(#doc{} = Doc) ->
+ Doc#doc{
+ deleted = false,
+ body = random_body()
+ }.
+
+
+randomize_deleted_doc(Doc) ->
+ NewDoc = case rand:uniform() < 0.05 of
+ true -> randomize_doc(Doc);
+ false -> Doc#doc{body = {[]}}
+ end,
+ NewDoc#doc{deleted = true}.
+
+
+db_size(Info) when is_list(Info) ->
+ {sizes, {Sizes}} = lists:keyfind(sizes, 1, Info),
+ {<<"external">>, External} = lists:keyfind(<<"external">>, 1, Sizes),
+ External;
+db_size(Db) when is_map(Db) ->
+ {ok, Info} = fabric2_db:get_db_info(Db),
+ db_size(Info).
+
+
+doc_size(#doc{id = <<"_local/", _/binary>>} = Doc) ->
+ fabric2_util:ldoc_size(Doc);
+doc_size(#doc{} = Doc) ->
+ fabric2_util:rev_size(Doc).
+
+
+-define(MAX_JSON_ELEMENTS, 5).
+-define(MAX_STRING_LEN, 10).
+-define(MAX_INT, 4294967296).
+
+
+random_body() ->
+ Elems = rand:uniform(?MAX_JSON_ELEMENTS),
+ {Obj, _} = random_json_object(Elems),
+ Obj.
+
+
+random_json(MaxElems) ->
+ case choose([object, array, terminal]) of
+ object -> random_json_object(MaxElems);
+ array -> random_json_array(MaxElems);
+ terminal -> {random_json_terminal(), MaxElems}
+ end.
+
+
+random_json_object(MaxElems) ->
+ NumKeys = rand:uniform(MaxElems + 1) - 1,
+ {Props, RemElems} = lists:mapfoldl(fun(_, Acc1) ->
+ {Value, Acc2} = random_json(Acc1),
+ {{random_json_string(), Value}, Acc2}
+ end, MaxElems - NumKeys, lists:seq(1, NumKeys)),
+ {{Props}, RemElems}.
+
+
+random_json_array(MaxElems) ->
+ NumItems = rand:uniform(MaxElems + 1) - 1,
+ lists:mapfoldl(fun(_, Acc1) ->
+ random_json(Acc1)
+ end, MaxElems - NumItems, lists:seq(1, NumItems)).
+
+
+random_json_terminal() ->
+ case choose([null, true, false, number, string]) of
+ null -> null;
+ true -> true;
+ false -> false;
+ number -> random_json_number();
+ string -> random_json_string()
+ end.
+
+
+random_json_number() ->
+ AbsValue = case choose([integer, double]) of
+ integer -> rand:uniform(?MAX_INT);
+ double -> rand:uniform() * rand:uniform()
+ end,
+ case choose([pos, neg]) of
+ pos -> AbsValue;
+ neg -> -1 * AbsValue
+ end.
+
+
+random_json_string() ->
+ random_string(0, ?MAX_STRING_LEN).
+
+
+random_attachment(undefined) ->
+ random_attachment(random_string(1, 32));
+
+random_attachment(Name) when is_binary(Name) ->
+ Type = random_string(1, 32),
+ Data = random_string(1, 512),
+ Md5 = erlang:md5(Data),
+ couch_att:new([
+ {name, Name},
+ {type, Type},
+ {att_len, size(Data)},
+ {data, Data},
+ {encoding, identity},
+ {md5, Md5}
+ ]).
+
+
+random_string(MinLen, MaxLen) ->
+ Alphabet = [
+ $a, $b, $c, $d, $e, $f, $g, $h, $i, $j, $k, $l, $m,
+ $n, $o, $p, $q, $r, $s, $t, $u, $v, $w, $x, $y, $z,
+ $A, $B, $C, $D, $E, $F, $G, $H, $I, $J, $K, $L, $M,
+ $N, $O, $P, $Q, $R, $S, $T, $U, $V, $W, $Y, $X, $Z,
+ $1, $2, $3, $4, $5, $6, $7, $8, $9, $0,
+ $!, $@, $#, $$, $%, $^, $&, $*, $(, $),
+ $ , ${, $}, $[, $], $", $', $-, $_, $+, $=, $,, $.,
+ $\x{1}, $\x{a2}, $\x{20ac}, $\x{10348}
+ ],
+ Len = MinLen + rand:uniform(MaxLen - MinLen) - 1,
+ Str = lists:map(fun(_) ->
+ choose(Alphabet)
+ end, lists:seq(1, Len)),
+ unicode:characters_to_binary(Str).
+
+
+choose(Options) ->
+ Pos = rand:uniform(length(Options)),
+ lists:nth(Pos, Options).
diff --git a/src/fabric/test/fabric2_dir_prefix_tests.erl b/src/fabric/test/fabric2_dir_prefix_tests.erl
new file mode 100644
index 000000000..2943d6533
--- /dev/null
+++ b/src/fabric/test/fabric2_dir_prefix_tests.erl
@@ -0,0 +1,71 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_dir_prefix_tests).
+
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+dir_prefix_test_() ->
+ {
+ "Test couchdb fdb directory prefix",
+ setup,
+ fun() ->
+ % erlfdb, rexi and mem3 are all dependent apps for fabric. We make
+ % sure to start them so when fabric is started during the test it
+ % already has its dependencies
+ test_util:start_couch([erlfdb, rexi, mem3, ctrace, fabric])
+ end,
+ fun(Ctx) ->
+ config:delete("fabric", "fdb_directory"),
+ test_util:stop_couch(Ctx)
+ end,
+ with([
+ ?TDEF(default_prefix, 15),
+ ?TDEF(custom_prefix, 15)
+ ])
+ }.
+
+
+default_prefix(_) ->
+ erase(fdb_directory),
+ ok = config:delete("fabric", "fdb_directory", false),
+ ok = application:stop(fabric),
+ ok = application:start(fabric),
+
+ ?assertEqual([<<"couchdb">>], fabric2_server:fdb_directory()),
+
+ % Try again to test pdict caching code
+ ?assertEqual([<<"couchdb">>], fabric2_server:fdb_directory()),
+
+ % Check that we can create dbs
+ DbName = ?tempdb(),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])).
+
+
+custom_prefix(_) ->
+ erase(fdb_directory),
+ ok = config:set("fabric", "fdb_directory", "couchdb_foo", false),
+ ok = application:stop(fabric),
+ ok = application:start(fabric),
+
+ ?assertEqual([<<"couchdb_foo">>], fabric2_server:fdb_directory()),
+
+ % Try again to test pdict caching code
+ ?assertEqual([<<"couchdb_foo">>], fabric2_server:fdb_directory()),
+
+ % Check that we can create dbs
+ DbName = ?tempdb(),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])).
diff --git a/src/fabric/test/fabric2_doc_att_tests.erl b/src/fabric/test/fabric2_doc_att_tests.erl
new file mode 100644
index 000000000..5d28b6da0
--- /dev/null
+++ b/src/fabric/test/fabric2_doc_att_tests.erl
@@ -0,0 +1,331 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_doc_att_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2.hrl").
+-include("fabric2_test.hrl").
+
+
+doc_crud_test_() ->
+ {
+ "Test document CRUD operations",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ with([
+ ?TDEF(create_att),
+ ?TDEF(create_att_already_compressed),
+ ?TDEF(delete_att),
+ ?TDEF(multiple_atts),
+ ?TDEF(delete_one_att),
+ ?TDEF(large_att),
+ ?TDEF(att_on_conflict_isolation)
+ ])
+ }
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([fabric]),
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ {Db, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+ ok = fabric2_db:delete(fabric2_db:name(Db), []),
+ test_util:stop_couch(Ctx).
+
+
+create_att({Db, _}) ->
+ DocId = fabric2_util:uuid(),
+ Att1 = couch_att:new([
+ {name, <<"foo.txt">>},
+ {type, <<"application/octet-stream">>},
+ {att_len, 6},
+ {data, <<"foobar">>},
+ {encoding, identity},
+ {md5, <<>>}
+ ]),
+ Doc1 = #doc{
+ id = DocId,
+ atts = [Att1]
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc1),
+ {ok, Doc2} = fabric2_db:open_doc(Db, DocId),
+ #doc{
+ atts = [Att2]
+ } = Doc2,
+ {loc, _Db, DocId, AttId} = couch_att:fetch(data, Att2),
+ AttData = fabric2_db:read_attachment(Db, DocId, AttId),
+ ?assertEqual(<<"foobar">>, AttData),
+
+ % Check that the raw keys exist
+ #{
+ db_prefix := DbPrefix
+ } = Db,
+ IdKey = erlfdb_tuple:pack({?DB_ATT_NAMES, DocId, AttId}, DbPrefix),
+ AttKey = erlfdb_tuple:pack({?DB_ATTS, DocId, AttId}, DbPrefix),
+
+ fabric2_fdb:transactional(fun(Tx) ->
+ IdVal = erlfdb:wait(erlfdb:get(Tx, IdKey)),
+ AttVals = erlfdb:wait(erlfdb:get_range_startswith(Tx, AttKey)),
+
+ ?assertEqual(erlfdb_tuple:pack({0, true}), IdVal),
+ Opts = [{minor_version, 1}, {compressed, 6}],
+ Expect = term_to_binary(<<"foobar">>, Opts),
+ ?assertMatch([{_, Expect}], AttVals)
+ end).
+
+
+create_att_already_compressed({Db, _}) ->
+ DocId = fabric2_util:uuid(),
+ Att1 = couch_att:new([
+ {name, <<"foo.txt">>},
+ {type, <<"application/octet-stream">>},
+ {att_len, 6},
+ {data, <<"foobar">>},
+ {encoding, gzip},
+ {md5, <<>>}
+ ]),
+ Doc1 = #doc{
+ id = DocId,
+ atts = [Att1]
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc1),
+ {ok, Doc2} = fabric2_db:open_doc(Db, DocId),
+ #doc{
+ atts = [Att2]
+ } = Doc2,
+ {loc, _Db, DocId, AttId} = couch_att:fetch(data, Att2),
+ AttData = fabric2_db:read_attachment(Db, DocId, AttId),
+ ?assertEqual(<<"foobar">>, AttData),
+
+ % Check that the raw keys exist
+ #{
+ db_prefix := DbPrefix
+ } = Db,
+ IdKey = erlfdb_tuple:pack({?DB_ATT_NAMES, DocId, AttId}, DbPrefix),
+ AttKey = erlfdb_tuple:pack({?DB_ATTS, DocId, AttId}, DbPrefix),
+
+ fabric2_fdb:transactional(fun(Tx) ->
+ IdVal = erlfdb:wait(erlfdb:get(Tx, IdKey)),
+ AttVals = erlfdb:wait(erlfdb:get_range_startswith(Tx, AttKey)),
+
+ ?assertEqual(erlfdb_tuple:pack({0, false}), IdVal),
+ ?assertMatch([{_, <<"foobar">>}], AttVals)
+ end).
+
+
+delete_att({Db, _}) ->
+ DocId = fabric2_util:uuid(),
+ Att1 = couch_att:new([
+ {name, <<"foo.txt">>},
+ {type, <<"application/octet-stream">>},
+ {att_len, 6},
+ {data, <<"foobar">>},
+ {encoding, identity},
+ {md5, <<>>}
+ ]),
+ Doc1 = #doc{
+ id = DocId,
+ atts = [Att1]
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc1),
+ {ok, Doc2} = fabric2_db:open_doc(Db, DocId),
+ #doc{
+ atts = [Att2]
+ } = Doc2,
+ {loc, _Db, DocId, AttId} = couch_att:fetch(data, Att2),
+
+ Doc3 = Doc2#doc{atts = []},
+ {ok, _} = fabric2_db:update_doc(Db, Doc3),
+
+ {ok, Doc4} = fabric2_db:open_doc(Db, DocId),
+ ?assertEqual([], Doc4#doc.atts),
+
+ % Check that the raw keys were removed
+ #{
+ db_prefix := DbPrefix
+ } = Db,
+ IdKey = erlfdb_tuple:pack({?DB_ATT_NAMES, DocId, AttId}, DbPrefix),
+ AttKey = erlfdb_tuple:pack({?DB_ATTS, DocId, AttId}, DbPrefix),
+
+ fabric2_fdb:transactional(fun(Tx) ->
+ IdVal = erlfdb:wait(erlfdb:get(Tx, IdKey)),
+ AttVals = erlfdb:wait(erlfdb:get_range_startswith(Tx, AttKey)),
+
+ ?assertEqual(not_found, IdVal),
+ ?assertMatch([], AttVals)
+ end).
+
+
+multiple_atts({Db, _}) ->
+ DocId = fabric2_util:uuid(),
+ Atts = [
+ mk_att(<<"foo.txt">>, <<"foobar">>),
+ mk_att(<<"bar.txt">>, <<"barfoo">>),
+ mk_att(<<"baz.png">>, <<"blargh">>)
+ ],
+ {ok, _} = create_doc(Db, DocId, Atts),
+ ?assertEqual(
+ #{
+ <<"foo.txt">> => <<"foobar">>,
+ <<"bar.txt">> => <<"barfoo">>,
+ <<"baz.png">> => <<"blargh">>
+ },
+ read_atts(Db, DocId)
+ ).
+
+
+delete_one_att({Db, _}) ->
+ DocId = fabric2_util:uuid(),
+ Atts1 = [
+ mk_att(<<"foo.txt">>, <<"foobar">>),
+ mk_att(<<"bar.txt">>, <<"barfoo">>),
+ mk_att(<<"baz.png">>, <<"blargh">>)
+ ],
+ {ok, RevId} = create_doc(Db, DocId, Atts1),
+ Atts2 = tl(Atts1),
+ {ok, _} = update_doc(Db, DocId, RevId, stubify(RevId, Atts2)),
+ ?assertEqual(
+ #{
+ <<"bar.txt">> => <<"barfoo">>,
+ <<"baz.png">> => <<"blargh">>
+ },
+ read_atts(Db, DocId)
+ ).
+
+
+large_att({Db, _}) ->
+ DocId = fabric2_util:uuid(),
+ % Total size ~360,000 bytes
+ AttData = iolist_to_binary([
+ <<"foobar">> || _ <- lists:seq(1, 60000)
+ ]),
+ Att1 = mk_att(<<"long.txt">>, AttData, gzip),
+ {ok, _} = create_doc(Db, DocId, [Att1]),
+ ?assertEqual(#{<<"long.txt">> => AttData}, read_atts(Db, DocId)),
+
+ {ok, Doc} = fabric2_db:open_doc(Db, DocId),
+ #doc{atts = [Att2]} = Doc,
+ {loc, _Db, DocId, AttId} = couch_att:fetch(data, Att2),
+
+ #{db_prefix := DbPrefix} = Db,
+ AttKey = erlfdb_tuple:pack({?DB_ATTS, DocId, AttId}, DbPrefix),
+ fabric2_fdb:transactional(fun(Tx) ->
+ AttVals = erlfdb:wait(erlfdb:get_range_startswith(Tx, AttKey)),
+ ?assertEqual(4, length(AttVals))
+ end).
+
+
+att_on_conflict_isolation({Db, _}) ->
+ DocId = fabric2_util:uuid(),
+ [PosRevA1, PosRevB1] = create_conflicts(Db, DocId, []),
+ Att = mk_att(<<"happy_goat.tiff">>, <<":D>">>),
+ {ok, PosRevA2} = update_doc(Db, DocId, PosRevA1, [Att]),
+ ?assertEqual(
+ #{<<"happy_goat.tiff">> => <<":D>">>},
+ read_atts(Db, DocId, PosRevA2)
+ ),
+ ?assertEqual(#{}, read_atts(Db, DocId, PosRevB1)).
+
+
+mk_att(Name, Data) ->
+ mk_att(Name, Data, identity).
+
+
+mk_att(Name, Data, Encoding) ->
+ couch_att:new([
+ {name, Name},
+ {type, <<"application/octet-stream">>},
+ {att_len, size(Data)},
+ {data, Data},
+ {encoding, Encoding},
+ {md5, <<>>}
+ ]).
+
+
+stubify(RevId, Atts) when is_list(Atts) ->
+ lists:map(fun(Att) ->
+ stubify(RevId, Att)
+ end, Atts);
+
+stubify({Pos, _Rev}, Att) ->
+ couch_att:store([
+ {data, stub},
+ {revpos, Pos}
+ ], Att).
+
+
+create_doc(Db, DocId, Atts) ->
+ Doc = #doc{
+ id = DocId,
+ atts = Atts
+ },
+ fabric2_db:update_doc(Db, Doc).
+
+
+update_doc(Db, DocId, {Pos, Rev}, Atts) ->
+ Doc = #doc{
+ id = DocId,
+ revs = {Pos, [Rev]},
+ atts = Atts
+ },
+ fabric2_db:update_doc(Db, Doc).
+
+
+create_conflicts(Db, DocId, Atts) ->
+ Base = #doc{
+ id = DocId,
+ atts = Atts
+ },
+ {ok, {_, Rev1} = PosRev} = fabric2_db:update_doc(Db, Base),
+ <<Rev2:16/binary, Rev3:16/binary>> = fabric2_util:uuid(),
+ Doc1 = #doc{
+ id = DocId,
+ revs = {2, [Rev2, Rev1]},
+ atts = stubify(PosRev, Atts)
+ },
+ Doc2 = #doc{
+ id = DocId,
+ revs = {2, [Rev3, Rev1]},
+ atts = stubify(PosRev, Atts)
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+ {ok, _} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+ lists:reverse(lists:sort([{2, Rev2}, {2, Rev3}])).
+
+
+read_atts(Db, DocId) ->
+ {ok, #doc{atts = Atts}} = fabric2_db:open_doc(Db, DocId),
+ atts_to_map(Db, DocId, Atts).
+
+
+read_atts(Db, DocId, PosRev) ->
+ {ok, Docs} = fabric2_db:open_doc_revs(Db, DocId, [PosRev], []),
+ [{ok, #doc{atts = Atts}}] = Docs,
+ atts_to_map(Db, DocId, Atts).
+
+
+atts_to_map(Db, DocId, Atts) ->
+ lists:foldl(fun(Att, Acc) ->
+ [Name, Data] = couch_att:fetch([name, data], Att),
+ {loc, _Db, DocId, AttId} = Data,
+ AttBin = fabric2_db:read_attachment(Db, DocId, AttId),
+ maps:put(Name, AttBin, Acc)
+ end, #{}, Atts).
diff --git a/src/fabric/test/fabric2_doc_count_tests.erl b/src/fabric/test/fabric2_doc_count_tests.erl
new file mode 100644
index 000000000..7aaf288f4
--- /dev/null
+++ b/src/fabric/test/fabric2_doc_count_tests.erl
@@ -0,0 +1,278 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_doc_count_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+-define(DOC_COUNT, 10).
+
+
+doc_count_test_() ->
+ {
+ "Test document counting operations",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ with([
+ ?TDEF(normal_docs),
+ ?TDEF(replicated_docs),
+ ?TDEF(design_docs),
+ ?TDEF(local_docs)
+ ])
+ }
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([fabric]),
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ {Db, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+ ok = fabric2_db:delete(fabric2_db:name(Db), []),
+ test_util:stop_couch(Ctx).
+
+
+normal_docs({Db, _}) ->
+ {DocCount, DelDocCount, DDocCount, LDocCount} = get_doc_counts(Db),
+
+ Docs1 = lists:map(fun(Id) ->
+ Doc = #doc{
+ id = integer_to_binary(Id),
+ body = {[{<<"value">>, Id}]}
+ },
+ {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Doc, []),
+ Doc#doc{revs = {RevPos, [Rev]}}
+ end, lists:seq(1, ?DOC_COUNT)),
+
+ check_doc_counts(
+ Db,
+ DocCount + ?DOC_COUNT,
+ DelDocCount,
+ DDocCount,
+ LDocCount
+ ),
+
+ Docs2 = lists:map(fun(Doc) ->
+ {[{<<"value">>, V}]} = Doc#doc.body,
+ NewDoc = case V rem 2 of
+ 0 -> Doc#doc{deleted = true};
+ 1 -> Doc
+ end,
+ {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, NewDoc, []),
+ NewDoc#doc{revs = {RevPos, [Rev]}}
+ end, Docs1),
+
+ check_doc_counts(
+ Db,
+ DocCount + ?DOC_COUNT div 2,
+ DelDocCount + ?DOC_COUNT div 2,
+ DDocCount,
+ LDocCount
+ ),
+
+ lists:map(fun(Doc) ->
+ case Doc#doc.deleted of
+ true ->
+ Undeleted = Doc#doc{
+ revs = {0, []},
+ deleted = false
+ },
+ {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Undeleted, []),
+ Undeleted#doc{revs = {RevPos, [Rev]}};
+ false ->
+ Doc
+ end
+ end, Docs2),
+
+ check_doc_counts(
+ Db,
+ DocCount + ?DOC_COUNT,
+ DelDocCount,
+ DDocCount,
+ LDocCount
+ ).
+
+
+replicated_docs({Db, _}) ->
+ {DocCount, DelDocCount, DDocCount, LDocCount} = get_doc_counts(Db),
+
+ Opts = [replicated_changes],
+ {R1, R2, R3} = {<<"r1">>, <<"r2">>, <<"r3">>},
+
+ % First case is a simple replicated update
+ Doc1 = #doc{id = <<"rd1">>, revs = {1, [R1]}},
+ {ok, {1, R1}} = fabric2_db:update_doc(Db, Doc1, Opts),
+ check_doc_counts(Db, DocCount + 1, DelDocCount, DDocCount, LDocCount),
+
+ % Here a deleted document is replicated into the db. Doc count should not
+ % change, only deleted doc count.
+ Doc2 = #doc{id = <<"rd2">>, revs = {1, [R2]}, deleted = true},
+ {ok, {1, R2}} = fabric2_db:update_doc(Db, Doc2, Opts),
+ check_doc_counts(Db, DocCount + 1, DelDocCount + 1, DDocCount, LDocCount),
+
+ % Here we extended the deleted document's rev path but keep it deleted.
+ % Deleted doc count doesn't bumped since the document was already counted
+ % as deleted
+ Doc3 = #doc{id = <<"rd2">>, revs = {2, [R3, R2]}, deleted = true},
+ {ok, {2, R3}} = fabric2_db:update_doc(Db, Doc3, Opts),
+ check_doc_counts(Db, DocCount + 1, DelDocCount + 1 , DDocCount, LDocCount).
+
+
+design_docs({Db, _}) ->
+ {DocCount, DelDocCount, DDocCount, LDocCount} = get_doc_counts(Db),
+
+ Docs1 = lists:map(fun(Id) ->
+ BinId = integer_to_binary(Id),
+ DDocId = <<?DESIGN_DOC_PREFIX, BinId/binary>>,
+ Doc = #doc{
+ id = DDocId,
+ body = {[{<<"value">>, Id}]}
+ },
+ {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Doc, []),
+ Doc#doc{revs = {RevPos, [Rev]}}
+ end, lists:seq(1, ?DOC_COUNT)),
+
+ check_doc_counts(
+ Db,
+ DocCount + ?DOC_COUNT,
+ DelDocCount,
+ DDocCount + ?DOC_COUNT,
+ LDocCount
+ ),
+
+ Docs2 = lists:map(fun(Doc) ->
+ {[{<<"value">>, V}]} = Doc#doc.body,
+ NewDoc = case V rem 2 of
+ 0 -> Doc#doc{deleted = true};
+ 1 -> Doc
+ end,
+ {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, NewDoc, []),
+ NewDoc#doc{revs = {RevPos, [Rev]}}
+ end, Docs1),
+
+ check_doc_counts(
+ Db,
+ DocCount + ?DOC_COUNT div 2,
+ DelDocCount + ?DOC_COUNT div 2,
+ DDocCount + ?DOC_COUNT div 2,
+ LDocCount
+ ),
+
+ lists:map(fun(Doc) ->
+ case Doc#doc.deleted of
+ true ->
+ Undeleted = Doc#doc{
+ revs = {0, []},
+ deleted = false
+ },
+ {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Undeleted, []),
+ Undeleted#doc{revs = {RevPos, [Rev]}};
+ false ->
+ Doc
+ end
+ end, Docs2),
+
+ check_doc_counts(
+ Db,
+ DocCount + ?DOC_COUNT,
+ DelDocCount,
+ DDocCount + ?DOC_COUNT,
+ LDocCount
+ ).
+
+
+local_docs({Db, _}) ->
+ {DocCount, DelDocCount, DDocCount, LDocCount} = get_doc_counts(Db),
+
+ Docs1 = lists:map(fun(Id) ->
+ BinId = integer_to_binary(Id),
+ LDocId = <<?LOCAL_DOC_PREFIX, BinId/binary>>,
+ Doc = #doc{
+ id = LDocId,
+ body = {[{<<"value">>, Id}]}
+ },
+ {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Doc, []),
+ Doc#doc{revs = {RevPos, [Rev]}}
+ end, lists:seq(1, ?DOC_COUNT)),
+
+ check_doc_counts(
+ Db,
+ DocCount,
+ DelDocCount,
+ DDocCount,
+ LDocCount + ?DOC_COUNT
+ ),
+
+ Docs2 = lists:map(fun(Doc) ->
+ {[{<<"value">>, V}]} = Doc#doc.body,
+ NewDoc = case V rem 2 of
+ 0 -> Doc#doc{deleted = true};
+ 1 -> Doc
+ end,
+ {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, NewDoc, []),
+ NewDoc#doc{revs = {RevPos, [Rev]}}
+ end, Docs1),
+
+ check_doc_counts(
+ Db,
+ DocCount,
+ DelDocCount,
+ DDocCount,
+ LDocCount + ?DOC_COUNT div 2
+ ),
+
+ lists:map(fun(Doc) ->
+ case Doc#doc.deleted of
+ true ->
+ Undeleted = Doc#doc{
+ revs = {0, []},
+ deleted = false
+ },
+ {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Undeleted, []),
+ Undeleted#doc{revs = {RevPos, [Rev]}};
+ false ->
+ Doc
+ end
+ end, Docs2),
+
+ check_doc_counts(
+ Db,
+ DocCount,
+ DelDocCount,
+ DDocCount,
+ LDocCount + ?DOC_COUNT
+ ).
+
+
+get_doc_counts(Db) ->
+ DocCount = fabric2_db:get_doc_count(Db),
+ DelDocCount = fabric2_db:get_del_doc_count(Db),
+ DDocCount = fabric2_db:get_doc_count(Db, <<"_design">>),
+ LDocCount = fabric2_db:get_doc_count(Db, <<"_local">>),
+ {DocCount, DelDocCount, DDocCount, LDocCount}.
+
+
+check_doc_counts(Db, DocCount, DelDocCount, DDocCount, LDocCount) ->
+ ?assertEqual(DocCount, fabric2_db:get_doc_count(Db)),
+ ?assertEqual(DelDocCount, fabric2_db:get_del_doc_count(Db)),
+ ?assertEqual(DocCount, fabric2_db:get_doc_count(Db, <<"_all_docs">>)),
+ ?assertEqual(DDocCount, fabric2_db:get_doc_count(Db, <<"_design">>)),
+ ?assertEqual(LDocCount, fabric2_db:get_doc_count(Db, <<"_local">>)).
diff --git a/src/fabric/test/fabric2_doc_crud_tests.erl b/src/fabric/test/fabric2_doc_crud_tests.erl
new file mode 100644
index 000000000..7a24b7d52
--- /dev/null
+++ b/src/fabric/test/fabric2_doc_crud_tests.erl
@@ -0,0 +1,1018 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_doc_crud_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2.hrl").
+-include("fabric2_test.hrl").
+
+
+doc_crud_test_() ->
+ {
+ "Test document CRUD operations",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ with([
+ ?TDEF(open_missing_doc),
+ ?TDEF(create_new_doc),
+ ?TDEF(create_ddoc_basic),
+ ?TDEF(create_ddoc_requires_admin),
+ ?TDEF(create_ddoc_requires_validation),
+ ?TDEF(create_ddoc_requires_compilation),
+ ?TDEF(can_create_a_partitioned_ddoc),
+ ?TDEF(update_doc_basic),
+ ?TDEF(update_ddoc_basic),
+ ?TDEF(update_doc_replicated),
+ ?TDEF(update_doc_replicated_add_conflict),
+ ?TDEF(update_doc_replicated_changes_winner),
+ ?TDEF(update_doc_replicated_extension),
+ ?TDEF(update_doc_replicate_existing_rev),
+ ?TDEF(update_winning_conflict_branch),
+ ?TDEF(update_non_winning_conflict_branch),
+ ?TDEF(delete_doc_basic),
+ ?TDEF(delete_changes_winner),
+ ?TDEF(recreate_doc_basic),
+ ?TDEF(conflict_on_create_new_with_rev),
+ ?TDEF(conflict_on_update_with_no_rev),
+ ?TDEF(allow_create_new_as_deleted),
+ ?TDEF(conflict_on_recreate_as_deleted),
+ ?TDEF(conflict_on_extend_deleted),
+ ?TDEF(open_doc_revs_basic),
+ ?TDEF(open_doc_revs_all),
+ ?TDEF(open_doc_revs_latest),
+ ?TDEF(get_missing_revs_basic),
+ ?TDEF(get_missing_revs_on_missing_doc),
+ ?TDEF(open_missing_local_doc),
+ ?TDEF(create_local_doc_basic),
+ ?TDEF(update_local_doc_basic),
+ ?TDEF(delete_local_doc_basic),
+ ?TDEF(recreate_local_doc),
+ ?TDEF(create_local_doc_bad_rev),
+ ?TDEF(create_local_doc_random_rev),
+ ?TDEF(create_a_large_local_doc),
+ ?TDEF(create_2_large_local_docs),
+ ?TDEF(local_doc_with_previous_encoding),
+ ?TDEF(before_doc_update_skips_local_docs),
+ ?TDEF(open_doc_opts)
+ ])
+ }
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([fabric]),
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ {Db, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+ ok = fabric2_db:delete(fabric2_db:name(Db), []),
+ test_util:stop_couch(Ctx).
+
+
+open_missing_doc({Db, _}) ->
+ ?assertEqual({not_found, missing}, fabric2_db:open_doc(Db, <<"foo">>)).
+
+
+create_new_doc({Db, _}) ->
+ Doc = #doc{
+ id = fabric2_util:uuid(),
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Doc),
+ NewDoc = Doc#doc{revs = {RevPos, [Rev]}},
+ ?assertEqual({ok, NewDoc}, fabric2_db:open_doc(Db, Doc#doc.id)).
+
+
+create_ddoc_basic({Db, _}) ->
+ UUID = fabric2_util:uuid(),
+ DDocId = <<"_design/", UUID/binary>>,
+ Doc = #doc{
+ id = DDocId,
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Doc),
+ NewDoc = Doc#doc{revs = {RevPos, [Rev]}},
+ ?assertEqual({ok, NewDoc}, fabric2_db:open_doc(Db, Doc#doc.id)).
+
+
+can_create_a_partitioned_ddoc({Db, _}) ->
+ UUID = fabric2_util:uuid(),
+ DDocId = <<"_design/", UUID/binary>>,
+ Doc = #doc{
+ id = DDocId,
+ body = {[
+ {<<"options">>, {[{<<"partitioned">>, true}]}},
+ {<<"views">>, {[
+ {<<"foo">>, {[
+ {<<"map">>, <<"function(doc) {}">>}
+ ]}}
+ ]}}
+ ]}
+ },
+ ?assertMatch({ok, {_, _}}, fabric2_db:update_doc(Db, Doc)).
+
+
+create_ddoc_requires_admin({Db, _}) ->
+ Db2 = fabric2_db:set_user_ctx(Db, #user_ctx{}),
+ UUID = fabric2_util:uuid(),
+ DDocId = <<"_design/", UUID/binary>>,
+ Doc = #doc{
+ id = DDocId,
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ ?assertThrow({unauthorized, _}, fabric2_db:update_doc(Db2, Doc)).
+
+
+create_ddoc_requires_validation({Db, _}) ->
+ UUID = fabric2_util:uuid(),
+ DDocId = <<"_design/", UUID/binary>>,
+ Doc = #doc{
+ id = DDocId,
+ body = {[
+ {<<"views">>, {[
+ {<<"foo">>, {[
+ {<<"map">>, <<"function(doc) {}">>},
+ {<<"reduce">>, <<"_not_a_builtin_reduce">>}
+ ]}}
+ ]}}
+ ]}
+ },
+ ?assertThrow(
+ {bad_request, invalid_design_doc, _},
+ fabric2_db:update_doc(Db, Doc)
+ ).
+
+
+create_ddoc_requires_compilation({Db, _}) ->
+ UUID = fabric2_util:uuid(),
+ DDocId = <<"_design/", UUID/binary>>,
+ Doc = #doc{
+ id = DDocId,
+ body = {[
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>, {[
+ {<<"foo">>, {[
+ {<<"map">>, <<"Hopefully this is invalid JavaScript">>}
+ ]}}
+ ]}}
+ ]}
+ },
+ ?assertThrow(
+ {bad_request, compilation_error, _},
+ fabric2_db:update_doc(Db, Doc)
+ ).
+
+
+update_doc_basic({Db, _}) ->
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ body = {[{<<"state">>, 1}]}
+ },
+ {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+ Doc2 = Doc1#doc{
+ revs = {Pos1, [Rev1]},
+ body = {[{<<"state">>, 2}]}
+ },
+ {ok, {Pos2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+ Doc3 = Doc2#doc{
+ revs = {Pos2, [Rev2, Rev1]}
+ },
+ ?assertEqual({ok, Doc3}, fabric2_db:open_doc(Db, Doc2#doc.id)).
+
+
+update_ddoc_basic({Db, _}) ->
+ UUID = fabric2_util:uuid(),
+ DDocId = <<"_design/", UUID/binary>>,
+ Doc1 = #doc{
+ id = DDocId,
+ body = {[{<<"state">>, 1}]}
+ },
+ {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+ Doc2 = Doc1#doc{
+ revs = {Pos1, [Rev1]},
+ body = {[{<<"state">>, 2}]}
+ },
+ {ok, {Pos2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+ Doc3 = Doc2#doc{
+ revs = {Pos2, [Rev2, Rev1]}
+ },
+ ?assertEqual({ok, Doc3}, fabric2_db:open_doc(Db, Doc2#doc.id)).
+
+
+update_doc_replicated({Db, _}) ->
+ Doc = #doc{
+ id = fabric2_util:uuid(),
+ revs = {2, [fabric2_util:uuid(), fabric2_util:uuid()]},
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc, [replicated_changes]),
+ ?assertEqual({ok, Doc}, fabric2_db:open_doc(Db, Doc#doc.id)).
+
+
+update_doc_replicated_add_conflict({Db, _}) ->
+ [Rev1, Rev2, Rev3] = lists:sort([
+ fabric2_util:uuid(),
+ fabric2_util:uuid(),
+ fabric2_util:uuid()
+ ]),
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ revs = {2, [Rev3, Rev1]},
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+ ?assertEqual({ok, Doc1}, fabric2_db:open_doc(Db, Doc1#doc.id)),
+ Doc2 = Doc1#doc{
+ revs = {2, [Rev2, Rev1]},
+ body = {[{<<"bar">>, <<"foo">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+ ?assertEqual({ok, Doc1}, fabric2_db:open_doc(Db, Doc2#doc.id)).
+
+
+update_doc_replicated_changes_winner({Db, _}) ->
+ [Rev1, Rev2, Rev3] = lists:sort([
+ fabric2_util:uuid(),
+ fabric2_util:uuid(),
+ fabric2_util:uuid()
+ ]),
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ revs = {2, [Rev2, Rev1]},
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+ ?assertEqual({ok, Doc1}, fabric2_db:open_doc(Db, Doc1#doc.id)),
+ Doc2 = Doc1#doc{
+ revs = {2, [Rev3, Rev1]},
+ body = {[{<<"bar">>, <<"foo">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+ ?assertEqual({ok, Doc2}, fabric2_db:open_doc(Db, Doc2#doc.id)).
+
+
+update_doc_replicated_extension({Db, _}) ->
+ % No sort necessary and avoided on purpose to
+ % demonstrate that this is not sort dependent
+ Rev1 = fabric2_util:uuid(),
+ Rev2 = fabric2_util:uuid(),
+ Rev3 = fabric2_util:uuid(),
+ Rev4 = fabric2_util:uuid(),
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ revs = {2, [Rev2, Rev1]},
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+ Doc2 = Doc1#doc{
+ revs = {4, [Rev4, Rev3, Rev2]},
+ body = {[{<<"bar">>, <<"foo">>}]}
+ },
+ {ok, {4, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+ {ok, Doc3} = fabric2_db:open_doc(Db, Doc2#doc.id),
+ ?assertEqual({4, [Rev4, Rev3, Rev2, Rev1]}, Doc3#doc.revs),
+ ?assertEqual(Doc2#doc{revs = undefined}, Doc3#doc{revs = undefined}).
+
+
+update_doc_replicate_existing_rev({Db, _}) ->
+ Rev1 = fabric2_util:uuid(),
+ Rev2 = fabric2_util:uuid(),
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ revs = {2, [Rev2, Rev1]},
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+ {ok, []} = fabric2_db:update_docs(Db, [Doc1], [replicated_changes]),
+ ?assertEqual({ok, Doc1}, fabric2_db:open_doc(Db, Doc1#doc.id)).
+
+
+update_winning_conflict_branch({Db, _}) ->
+ [Rev1, Rev2, Rev3] = lists:sort([
+ fabric2_util:uuid(),
+ fabric2_util:uuid(),
+ fabric2_util:uuid()
+ ]),
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ revs = {2, [Rev3, Rev1]},
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+ Doc2 = Doc1#doc{
+ revs = {2, [Rev2, Rev1]},
+ body = {[{<<"bar">>, <<"foo">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+ % Update the winning branch
+ Doc3 = Doc1#doc{
+ revs = {2, [Rev3, Rev1]},
+ body = {[{<<"baz">>, 2}]}
+ },
+ {ok, {3, Rev4}} = fabric2_db:update_doc(Db, Doc3),
+ {ok, Doc4} = fabric2_db:open_doc(Db, Doc3#doc.id),
+ % Assert we've got the correct winner
+ ?assertEqual({3, [Rev4, Rev3, Rev1]}, Doc4#doc.revs),
+ ?assertEqual(Doc3#doc{revs = undefined}, Doc4#doc{revs = undefined}).
+
+
+update_non_winning_conflict_branch({Db, _}) ->
+ [Rev1, Rev2, Rev3] = lists:sort([
+ fabric2_util:uuid(),
+ fabric2_util:uuid(),
+ fabric2_util:uuid()
+ ]),
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ revs = {2, [Rev3, Rev1]},
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+ Doc2 = Doc1#doc{
+ revs = {2, [Rev2, Rev1]},
+ body = {[{<<"bar">>, <<"foo">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+ % Update the non winning branch
+ Doc3 = Doc1#doc{
+ revs = {2, [Rev2, Rev1]},
+ body = {[{<<"baz">>, 2}]}
+ },
+ {ok, {3, Rev4}} = fabric2_db:update_doc(Db, Doc3),
+ {ok, Doc4} = fabric2_db:open_doc(Db, Doc3#doc.id),
+ % Assert we've got the correct winner
+ ?assertEqual({3, [Rev4, Rev2, Rev1]}, Doc4#doc.revs),
+ ?assertEqual(Doc3#doc{revs = undefined}, Doc4#doc{revs = undefined}).
+
+
+delete_doc_basic({Db, _}) ->
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ body = {[{<<"state">>, 1}]}
+ },
+ {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+ Doc2 = Doc1#doc{
+ revs = {Pos1, [Rev1]},
+ deleted = true,
+ body = {[{<<"state">>, 2}]}
+ },
+ {ok, {Pos2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+ Doc3 = Doc2#doc{revs = {Pos2, [Rev2, Rev1]}},
+ ?assertEqual({ok, Doc3}, fabric2_db:open_doc(Db, Doc2#doc.id, [deleted])).
+
+
+delete_changes_winner({Db, _}) ->
+ [Rev1, Rev2, Rev3] = lists:sort([
+ fabric2_util:uuid(),
+ fabric2_util:uuid(),
+ fabric2_util:uuid()
+ ]),
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ revs = {2, [Rev3, Rev1]},
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+ Doc2 = Doc1#doc{
+ revs = {2, [Rev2, Rev1]},
+ body = {[{<<"bar">>, <<"foo">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+ % Delete the winning branch
+ Doc3 = Doc1#doc{
+ revs = {2, [Rev3, Rev1]},
+ deleted = true,
+ body = {[]}
+ },
+ {ok, {3, _}} = fabric2_db:update_doc(Db, Doc3),
+ ?assertEqual({ok, Doc2}, fabric2_db:open_doc(Db, Doc3#doc.id)).
+
+
+recreate_doc_basic({Db, _}) ->
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ body = {[{<<"state">>, 1}]}
+ },
+ {ok, {1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+ Doc2 = Doc1#doc{
+ revs = {1, [Rev1]},
+ deleted = true,
+ body = {[{<<"state">>, 2}]}
+ },
+ {ok, {2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+ Doc3 = Doc1#doc{
+ revs = {0, []},
+ deleted = false,
+ body = {[{<<"state">>, 3}]}
+ },
+ {ok, {3, Rev3}} = fabric2_db:update_doc(Db, Doc3),
+ {ok, Doc4} = fabric2_db:open_doc(Db, Doc3#doc.id),
+ ?assertEqual({3, [Rev3, Rev2, Rev1]}, Doc4#doc.revs),
+ ?assertEqual(Doc3#doc{revs = undefined}, Doc4#doc{revs = undefined}).
+
+
+conflict_on_create_new_with_rev({Db, _}) ->
+ Doc = #doc{
+ id = fabric2_util:uuid(),
+ revs = {1, [fabric2_util:uuid()]},
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ ?assertThrow(conflict, fabric2_db:update_doc(Db, Doc)).
+
+
+conflict_on_update_with_no_rev({Db, _}) ->
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ body = {[{<<"state">>, 1}]}
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc1),
+ Doc2 = Doc1#doc{
+ revs = {0, []},
+ body = {[{<<"state">>, 2}]}
+ },
+ ?assertThrow(conflict, fabric2_db:update_doc(Db, Doc2)).
+
+
+allow_create_new_as_deleted({Db, _}) ->
+ Doc = #doc{
+ id = fabric2_util:uuid(),
+ deleted = true,
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {1, Rev}} = fabric2_db:update_doc(Db, Doc),
+ ?assertEqual({not_found, deleted}, fabric2_db:open_doc(Db, Doc#doc.id)),
+ Doc1 = Doc#doc{
+ revs = {1, [Rev]}
+ },
+ ?assertEqual({ok, Doc1}, fabric2_db:open_doc(Db, Doc#doc.id, [deleted])),
+ % Only works when the document has never existed to match CouchDB 3.x
+ % behavior
+ ?assertThrow(conflict, fabric2_db:update_doc(Db, Doc)).
+
+
+conflict_on_recreate_as_deleted({Db, _}) ->
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ body = {[{<<"state">>, 1}]}
+ },
+ {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+ Doc2 = Doc1#doc{
+ revs = {Pos1, [Rev1]},
+ deleted = true,
+ body = {[{<<"state">>, 2}]}
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc2),
+ Doc3 = Doc1#doc{
+ revs = {0, []},
+ deleted = true,
+ body = {[{<<"state">>, 3}]}
+ },
+ ?assertThrow(conflict, fabric2_db:update_doc(Db, Doc3)).
+
+
+conflict_on_extend_deleted({Db, _}) ->
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ body = {[{<<"state">>, 1}]}
+ },
+ {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+ Doc2 = Doc1#doc{
+ revs = {Pos1, [Rev1]},
+ deleted = true,
+ body = {[{<<"state">>, 2}]}
+ },
+ {ok, {Pos2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+ Doc3 = Doc1#doc{
+ revs = {Pos2, [Rev2]},
+ deleted = false,
+ body = {[{<<"state">>, 3}]}
+ },
+ ?assertThrow(conflict, fabric2_db:update_doc(Db, Doc3)).
+
+
+open_doc_revs_basic({Db, _}) ->
+ [Rev1, Rev2, Rev3] = lists:sort([
+ fabric2_util:uuid(),
+ fabric2_util:uuid(),
+ fabric2_util:uuid()
+ ]),
+ DocId = fabric2_util:uuid(),
+ Doc1 = #doc{
+ id = DocId,
+ revs = {2, [Rev3, Rev1]},
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+ Doc2 = Doc1#doc{
+ revs = {2, [Rev2, Rev1]},
+ body = {[{<<"bar">>, <<"foo">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+
+ {ok, [{ok, Doc3}]} = fabric2_db:open_doc_revs(Db, DocId, [{2, Rev3}], []),
+ ?assertEqual(Doc1, Doc3),
+
+ {ok, [{ok, Doc4}]} = fabric2_db:open_doc_revs(Db, DocId, [{2, Rev2}], []),
+ ?assertEqual(Doc2, Doc4),
+
+ Revs = [{2, Rev3}, {2, Rev2}, {1, Rev1}],
+ {ok, Docs} = fabric2_db:open_doc_revs(Db, DocId, Revs, []),
+ ?assert(length(Docs) == 3),
+ ?assert(lists:member({ok, Doc1}, Docs)),
+ ?assert(lists:member({ok, Doc2}, Docs)),
+ ?assert(lists:member({{not_found, missing}, {1, Rev1}}, Docs)),
+
+ % Make sure crazy madeup revisions are accepted
+ MissingRevs = [{5, fabric2_util:uuid()}, {1, fabric2_util:uuid()}],
+ {ok, NFMissing} = fabric2_db:open_doc_revs(Db, DocId, MissingRevs, []),
+ ?assertEqual(2, length(NFMissing)),
+ lists:foreach(fun(MR) ->
+ ?assert(lists:member({{not_found, missing}, MR}, NFMissing))
+ end, MissingRevs).
+
+
+open_doc_revs_all({Db, _}) ->
+ [Rev1, Rev2, Rev3] = lists:sort([
+ fabric2_util:uuid(),
+ fabric2_util:uuid(),
+ fabric2_util:uuid()
+ ]),
+ DocId = fabric2_util:uuid(),
+ Doc1 = #doc{
+ id = DocId,
+ revs = {2, [Rev3, Rev1]},
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+ Doc2 = Doc1#doc{
+ revs = {2, [Rev2, Rev1]},
+ body = {[{<<"bar">>, <<"foo">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+
+ {ok, Docs} = fabric2_db:open_doc_revs(Db, DocId, all, []),
+ ?assert(length(Docs) == 2),
+ ?assert(lists:member({ok, Doc1}, Docs)),
+ ?assert(lists:member({ok, Doc2}, Docs)).
+
+
+open_doc_revs_latest({Db, _}) ->
+ [Rev1, Rev2, Rev3] = lists:sort([
+ fabric2_util:uuid(),
+ fabric2_util:uuid(),
+ fabric2_util:uuid()
+ ]),
+ DocId = fabric2_util:uuid(),
+ Doc1 = #doc{
+ id = DocId,
+ revs = {2, [Rev3, Rev1]},
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+ Doc2 = Doc1#doc{
+ revs = {2, [Rev2, Rev1]},
+ body = {[{<<"bar">>, <<"foo">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+
+ Opts = [latest],
+ {ok, [{ok, Doc3}]} = fabric2_db:open_doc_revs(Db, DocId, [{2, Rev3}], Opts),
+ ?assertEqual(Doc1, Doc3),
+
+ {ok, Docs} = fabric2_db:open_doc_revs(Db, DocId, [{1, Rev1}], Opts),
+ ?assert(length(Docs) == 2),
+ ?assert(lists:member({ok, Doc1}, Docs)),
+ ?assert(lists:member({ok, Doc2}, Docs)).
+
+
+get_missing_revs_basic({Db, _}) ->
+ [Rev1, Rev2, Rev3] = lists:sort([
+ fabric2_util:uuid(),
+ fabric2_util:uuid(),
+ fabric2_util:uuid()
+ ]),
+ DocId = fabric2_util:uuid(),
+ Doc1 = #doc{
+ id = DocId,
+ revs = {2, [Rev3, Rev1]},
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+ Doc2 = Doc1#doc{
+ revs = {2, [Rev2, Rev1]},
+ body = {[{<<"bar">>, <<"foo">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+
+ % Check that we can find all revisions
+ AllRevs = [{1, Rev1}, {2, Rev2}, {2, Rev3}],
+ ?assertEqual(
+ {ok, []},
+ fabric2_db:get_missing_revs(Db, [{DocId, AllRevs}])
+ ),
+
+ % Check that a missing revision is found with no possible ancestors
+ MissingRev = {2, fabric2_util:uuid()},
+ ?assertEqual(
+ {ok, [{DocId, [MissingRev], []}]},
+ fabric2_db:get_missing_revs(Db, [{DocId, [MissingRev]}])
+ ),
+
+ % Check that only a missing rev is returned
+ ?assertEqual(
+ {ok, [{DocId, [MissingRev], []}]},
+ fabric2_db:get_missing_revs(Db, [{DocId, [MissingRev | AllRevs]}])
+ ),
+
+ % Check that we can find possible ancestors
+ MissingWithAncestors = {4, fabric2_util:uuid()},
+ PossibleAncestors = [{2, Rev2}, {2, Rev3}],
+ ?assertEqual(
+ {ok, [{DocId, [MissingWithAncestors], PossibleAncestors}]},
+ fabric2_db:get_missing_revs(Db, [{DocId, [MissingWithAncestors]}])
+ ).
+
+
+get_missing_revs_on_missing_doc({Db, _}) ->
+ Revs = lists:sort([
+ couch_doc:rev_to_str({1, fabric2_util:uuid()}),
+ couch_doc:rev_to_str({2, fabric2_util:uuid()}),
+ couch_doc:rev_to_str({800, fabric2_util:uuid()})
+ ]),
+ DocId = fabric2_util:uuid(),
+ {ok, Resp} = fabric2_db:get_missing_revs(Db, [{DocId, Revs}]),
+ ?assertMatch([{DocId, [_ | _], []}], Resp),
+ [{DocId, Missing, _}] = Resp,
+ MissingStrs = [couch_doc:rev_to_str(Rev) || Rev <- Missing],
+ ?assertEqual(Revs, lists:sort(MissingStrs)).
+
+
+open_missing_local_doc({Db, _}) ->
+ ?assertEqual(
+ {not_found, missing},
+ fabric2_db:open_doc(Db, <<"_local/foo">>, [])
+ ).
+
+
+create_local_doc_basic({Db, _}) ->
+ UUID = fabric2_util:uuid(),
+ LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+ Doc1 = #doc{
+ id = LDocId,
+ revs = {0, []},
+ deleted = false,
+ body = {[{<<"ohai">>, <<"there">>}]}
+ },
+ ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+ {ok, Doc2} = fabric2_db:open_doc(Db, Doc1#doc.id, []),
+ ?assertEqual(Doc1#doc{revs = {0, [<<"1">>]}}, Doc2).
+
+
+update_local_doc_basic({Db, _}) ->
+ UUID = fabric2_util:uuid(),
+ LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+ Doc1 = #doc{
+ id = LDocId,
+ revs = {0, []},
+ deleted = false,
+ body = {[{<<"ohai">>, <<"there">>}]}
+ },
+ ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+ Doc2 = Doc1#doc{
+ revs = {0, [<<"1">>]},
+ body = {[{<<"whiz">>, <<"bang">>}]}
+ },
+ ?assertEqual({ok, {0, <<"2">>}}, fabric2_db:update_doc(Db, Doc2)),
+ {ok, Doc3} = fabric2_db:open_doc(Db, Doc1#doc.id, []),
+ ?assertEqual(Doc2#doc{revs = {0, [<<"2">>]}}, Doc3).
+
+
+delete_local_doc_basic({Db, _}) ->
+ UUID = fabric2_util:uuid(),
+ LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+ Doc1 = #doc{
+ id = LDocId,
+ revs = {0, []},
+ deleted = false,
+ body = {[{<<"ohai">>, <<"there">>}]}
+ },
+ ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+ Doc2 = Doc1#doc{
+ revs = {0, [<<"1">>]},
+ deleted = true,
+ body = {[]}
+ },
+ ?assertEqual({ok, {0, <<"0">>}}, fabric2_db:update_doc(Db, Doc2)),
+ ?assertEqual(
+ {not_found, missing},
+ fabric2_db:open_doc(Db, LDocId)
+ ).
+
+
+recreate_local_doc({Db, _}) ->
+ UUID = fabric2_util:uuid(),
+ LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+ Doc1 = #doc{
+ id = LDocId,
+ revs = {0, []},
+ deleted = false,
+ body = {[{<<"ohai">>, <<"there">>}]}
+ },
+ ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+ Doc2 = Doc1#doc{
+ revs = {0, [<<"1">>]},
+ deleted = true,
+ body = {[]}
+ },
+ ?assertEqual({ok, {0, <<"0">>}}, fabric2_db:update_doc(Db, Doc2)),
+ ?assertEqual(
+ {not_found, missing},
+ fabric2_db:open_doc(Db, LDocId)
+ ),
+
+ ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+ {ok, Doc3} = fabric2_db:open_doc(Db, LDocId),
+ ?assertEqual(Doc1#doc{revs = {0, [<<"1">>]}}, Doc3).
+
+
+create_local_doc_bad_rev({Db, _}) ->
+ UUID = fabric2_util:uuid(),
+ LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+ Doc1 = #doc{
+ id = LDocId,
+ revs = {0, [<<"not a number">>]}
+ },
+ ?assertThrow(<<"Invalid rev format">>, fabric2_db:update_doc(Db, Doc1)),
+
+ Doc2 = Doc1#doc{
+ revs = bad_bad_rev_roy_brown
+ },
+ ?assertThrow(<<"Invalid rev format">>, fabric2_db:update_doc(Db, Doc2)).
+
+
+create_local_doc_random_rev({Db, _}) ->
+ % Local docs don't care what rev is passed as long
+ % as long as its a number.
+ UUID = fabric2_util:uuid(),
+ LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+ Doc1 = #doc{
+ id = LDocId,
+ revs = {0, [<<"42">>]},
+ body = {[{<<"state">>, 1}]}
+ },
+ ?assertEqual({ok, {0, <<"43">>}}, fabric2_db:update_doc(Db, Doc1)),
+ {ok, Doc2} = fabric2_db:open_doc(Db, LDocId, []),
+ ?assertEqual(Doc1#doc{revs = {0, [<<"43">>]}}, Doc2),
+
+ Doc3 = Doc1#doc{
+ revs = {0, [<<"1234567890">>]},
+ body = {[{<<"state">>, 2}]}
+ },
+ ?assertEqual({ok, {0, <<"1234567891">>}}, fabric2_db:update_doc(Db, Doc3)),
+ {ok, Doc4} = fabric2_db:open_doc(Db, LDocId, []),
+ ?assertEqual(Doc3#doc{revs = {0, [<<"1234567891">>]}}, Doc4),
+
+ Doc5 = Doc1#doc{
+ revs = {0, [<<"1">>]},
+ body = {[{<<"state">>, 3}]}
+ },
+ ?assertEqual({ok, {0, <<"2">>}}, fabric2_db:update_doc(Db, Doc5)),
+ {ok, Doc6} = fabric2_db:open_doc(Db, LDocId, []),
+ ?assertEqual(Doc5#doc{revs = {0, [<<"2">>]}}, Doc6).
+
+
+create_a_large_local_doc({Db, _}) ->
+ UUID = fabric2_util:uuid(),
+ LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+ Body = << <<"x">> || _ <- lists:seq(1, 300000) >>,
+ Doc1 = #doc{
+ id = LDocId,
+ revs = {0, []},
+ body = Body
+ },
+ ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+ {ok, Doc2} = fabric2_db:open_doc(Db, Doc1#doc.id, []),
+ ?assertEqual(Doc1#doc{revs = {0, [<<"1">>]}}, Doc2),
+
+ % Read via fold_local_docs
+ {ok, Result} = fabric2_db:fold_local_docs(Db, fun(Data, Acc) ->
+ case Data of
+ {row, [{id, DocId} | _]} when LDocId =:= DocId ->
+ {ok, [Data | Acc]};
+ _ ->
+ {ok, Acc}
+ end
+ end, [], []),
+ ?assertEqual([{row, [
+ {id, LDocId},
+ {key, LDocId},
+ {value, {[{rev, <<"0-1">>}]}}
+ ]}], Result).
+
+
+create_2_large_local_docs({Db, _}) ->
+ % Create a large doc then overwrite with a smaller one. The reason is to
+ % ensure the previous one correctly clears its range before writting the
+ % new smaller one it its place.
+ UUID = fabric2_util:uuid(),
+ LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+ Body1 = << <<"x">> || _ <- lists:seq(1, 400000) >>,
+ Body2 = << <<"y">> || _ <- lists:seq(1, 150000) >>,
+
+ Doc1 = #doc{
+ id = LDocId,
+ revs = {0, []},
+ body = Body1
+ },
+
+ ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+
+ Doc2 = Doc1#doc{body = Body2},
+ ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc2)),
+
+ {ok, Doc3} = fabric2_db:open_doc(Db, LDocId, []),
+ ?assertEqual(Doc2#doc{revs = {0, [<<"1">>]}}, Doc3).
+
+
+local_doc_with_previous_encoding({Db, _}) ->
+ #{db_prefix := DbPrefix} = Db,
+
+ Id = <<"_local/old_doc">>,
+ Body = {[{<<"x">>, 5}]},
+ Rev = <<"1">>,
+ Key = erlfdb_tuple:pack({?DB_LOCAL_DOCS, Id}, DbPrefix),
+
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ #{tx := Tx} = TxDb,
+ Term = term_to_binary({Rev, Body}, [{minor_version, 1}]),
+ ok = erlfdb:set(Tx, Key, Term)
+ end),
+
+ % Read old doc
+ {ok, Doc1} = fabric2_db:open_doc(Db, Id, []),
+ ?assertEqual({0, [<<"1">>]}, Doc1#doc.revs),
+ ?assertEqual({[{<<"x">>, 5}]}, Doc1#doc.body),
+
+ % Read via fold_local_docs.
+ {ok, Result} = fabric2_db:fold_local_docs(Db, fun(Data, Acc) ->
+ case Data of
+ {row, [{id, DocId} | _]} when Id =:= DocId ->
+ {ok, [Data | Acc]};
+ _ ->
+ {ok, Acc}
+ end
+ end, [], []),
+ ?assertEqual([{row, [
+ {id, Id},
+ {key, Id},
+ {value, {[{rev, <<"0-1">>}]}}
+ ]}], Result),
+
+ % Update doc
+ NewBody = {[{<<"y">>, 6}]},
+ Doc2 = Doc1#doc{body = NewBody},
+ ?assertEqual({ok, {0, <<"2">>}}, fabric2_db:update_doc(Db, Doc2)),
+ {ok, Doc3} = fabric2_db:open_doc(Db, Doc2#doc.id, []),
+ ?assertEqual({0, [<<"2">>]}, Doc3#doc.revs),
+ ?assertEqual(NewBody, Doc3#doc.body),
+
+ % Old doc now has only the rev number in it
+ <<255, OldDocBin/binary>> = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ #{tx := Tx} = TxDb,
+ erlfdb:wait(erlfdb:get(Tx, Key))
+ end),
+ Unpacked = erlfdb_tuple:unpack(OldDocBin),
+ ?assertMatch({?CURR_LDOC_FORMAT, <<"2">>, _}, Unpacked).
+
+
+before_doc_update_skips_local_docs({Db0, _}) ->
+
+ BduFun = fun(Doc, _, _) ->
+ Doc#doc{body = {[<<"bdu_was_here">>, true]}}
+ end,
+
+ Db = Db0#{before_doc_update := BduFun},
+
+ LDoc1 = #doc{id = <<"_local/ldoc1">>},
+ Doc1 = #doc{id = <<"doc1">>},
+
+ ?assertMatch({ok, {_, _}}, fabric2_db:update_doc(Db, LDoc1)),
+ ?assertMatch({ok, {_, _}}, fabric2_db:update_doc(Db, Doc1)),
+
+ {ok, LDoc2} = fabric2_db:open_doc(Db, LDoc1#doc.id),
+ {ok, Doc2} = fabric2_db:open_doc(Db, Doc1#doc.id),
+
+ ?assertEqual({[]}, LDoc2#doc.body),
+ ?assertEqual({[<<"bdu_was_here">>, true]}, Doc2#doc.body).
+
+
+open_doc_opts({Db, _}) ->
+ % Build out state so that we can exercise each doc
+ % open option. This requires a live revision with
+ % an attachment, a conflict, and a deleted conflict.
+ DocId = couch_uuids:random(),
+ Att1 = couch_att:new([
+ {name, <<"foo.txt">>},
+ {type, <<"application/octet-stream">>},
+ {att_len, 6},
+ {data, <<"foobar">>},
+ {encoding, identity},
+ {md5, <<>>}
+ ]),
+ Doc1A = #doc{
+ id = DocId,
+ atts = [Att1]
+ },
+ {ok, {Pos1, Rev1A}} = fabric2_db:update_doc(Db, Doc1A),
+ Att2 = couch_att:store([
+ {data, stub},
+ {revpos, 1}
+ ], Att1),
+ Doc1B = Doc1A#doc{
+ revs = {Pos1, [Rev1A]},
+ atts = [Att2]
+ },
+ {ok, {Pos2, Rev1B}} = fabric2_db:update_doc(Db, Doc1B),
+
+ Rev2 = crypto:strong_rand_bytes(16),
+ Rev3 = crypto:strong_rand_bytes(16),
+ Rev4 = crypto:strong_rand_bytes(16),
+
+ % Create a live conflict
+ Doc2 = #doc{
+ id = DocId,
+ revs = {1, [Rev2]}
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+
+ % Create a deleted conflict
+ Doc3 = #doc{
+ id = DocId,
+ revs = {1, [Rev3]}
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc3, [replicated_changes]),
+ Doc4 = #doc{
+ id = DocId,
+ revs = {2, [Rev4, Rev3]},
+ deleted = true
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc4, [replicated_changes]),
+
+ OpenOpts1 = [
+ revs_info,
+ conflicts,
+ deleted_conflicts,
+ local_seq,
+ {atts_since, [{Pos1, Rev1A}]}
+ ],
+ {ok, OpenedDoc1} = fabric2_db:open_doc(Db, DocId, OpenOpts1),
+
+ #doc{
+ id = DocId,
+ revs = {2, [Rev1B, Rev1A]},
+ atts = [Att3],
+ meta = Meta
+ } = OpenedDoc1,
+ ?assertEqual(stub, couch_att:fetch(data, Att3)),
+ ?assertEqual(
+ {revs_info, Pos2, [{Rev1B, available}, {Rev1A, missing}]},
+ lists:keyfind(revs_info, 1, Meta)
+ ),
+ ?assertEqual(
+ {conflicts, [{1, Rev2}]},
+ lists:keyfind(conflicts, 1, Meta)
+ ),
+ ?assertEqual(
+ {deleted_conflicts, [{2, Rev4}]},
+ lists:keyfind(deleted_conflicts, 1, Meta)
+ ),
+ ?assertMatch({_, <<_/binary>>}, lists:keyfind(local_seq, 1, Meta)),
+
+ % Empty atts_since list
+ {ok, OpenedDoc2} = fabric2_db:open_doc(Db, DocId, [{atts_since, []}]),
+ #doc{atts = [Att4]} = OpenedDoc2,
+ ?assertNotEqual(stub, couch_att:fetch(data, Att4)),
+
+ % Missing ancestor
+ Rev5 = crypto:strong_rand_bytes(16),
+ OpenOpts2 = [{atts_since, [{5, Rev5}]}],
+ {ok, OpenedDoc3} = fabric2_db:open_doc(Db, DocId, OpenOpts2),
+ #doc{atts = [Att5]} = OpenedDoc3,
+ ?assertNotEqual(stub, couch_att:fetch(data, Att5)).
+
diff --git a/src/fabric/test/fabric2_doc_fold_tests.erl b/src/fabric/test/fabric2_doc_fold_tests.erl
new file mode 100644
index 000000000..0695b450b
--- /dev/null
+++ b/src/fabric/test/fabric2_doc_fold_tests.erl
@@ -0,0 +1,378 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_doc_fold_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+-define(DOC_COUNT, 50).
+
+
+doc_fold_test_() ->
+ {
+ "Test document fold operations",
+ {
+ setup,
+ fun setup_all/0,
+ fun teardown_all/1,
+ {
+ foreach,
+ fun setup/0,
+ fun cleanup/1,
+ [
+ ?TDEF_FE(fold_docs_basic),
+ ?TDEF_FE(fold_docs_rev),
+ ?TDEF_FE(fold_docs_with_start_key),
+ ?TDEF_FE(fold_docs_with_end_key),
+ ?TDEF_FE(fold_docs_with_both_keys_the_same),
+ ?TDEF_FE(fold_docs_with_different_keys, 10000),
+ ?TDEF_FE(fold_docs_with_limit),
+ ?TDEF_FE(fold_docs_with_skip),
+ ?TDEF_FE(fold_docs_with_skip_and_limit),
+ ?TDEF_FE(fold_docs_tx_too_old),
+ ?TDEF_FE(fold_docs_db_recreated)
+ ]
+ }
+ }
+ }.
+
+
+setup_all() ->
+ test_util:start_couch([fabric]).
+
+
+teardown_all(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+
+setup() ->
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ DocIdRevs = lists:map(fun(Val) ->
+ DocId = fabric2_util:uuid(),
+ Doc = #doc{
+ id = DocId,
+ body = {[{<<"value">>, Val}]}
+ },
+ {ok, Rev} = fabric2_db:update_doc(Db, Doc, []),
+ {DocId, {[{rev, couch_doc:rev_to_str(Rev)}]}}
+ end, lists:seq(1, ?DOC_COUNT)),
+ meck:new(erlfdb, [passthrough]),
+ fabric2_test_util:tx_too_old_mock_erlfdb(),
+ {Db, lists:sort(DocIdRevs)}.
+
+
+cleanup({Db, _DocIdRevs}) ->
+ fabric2_test_util:tx_too_old_reset_errors(),
+ meck:unload(),
+ ok = fabric2_db:delete(fabric2_db:name(Db), []).
+
+
+fold_docs_basic({Db, DocIdRevs}) ->
+ {ok, {?DOC_COUNT, Rows}} = fabric2_db:fold_docs(Db, fun fold_fun/2, []),
+ ?assertEqual(DocIdRevs, lists:reverse(Rows)).
+
+
+fold_docs_rev({Db, DocIdRevs}) ->
+ Opts = [{dir, rev}],
+ {ok, {?DOC_COUNT, Rows}} =
+ fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts),
+ ?assertEqual(DocIdRevs, Rows).
+
+
+fold_docs_with_start_key({Db, DocIdRevs}) ->
+ {StartKey, _} = hd(DocIdRevs),
+ Opts = [{start_key, StartKey}],
+ {ok, {?DOC_COUNT, Rows}}
+ = fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts),
+ ?assertEqual(DocIdRevs, lists:reverse(Rows)),
+ if length(DocIdRevs) == 1 -> ok; true ->
+ fold_docs_with_start_key({Db, tl(DocIdRevs)})
+ end.
+
+
+fold_docs_with_end_key({Db, DocIdRevs}) ->
+ RevDocIdRevs = lists:reverse(DocIdRevs),
+ {EndKey, _} = hd(RevDocIdRevs),
+ Opts = [{end_key, EndKey}],
+ {ok, {?DOC_COUNT, Rows}} =
+ fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts),
+ ?assertEqual(RevDocIdRevs, Rows),
+ if length(DocIdRevs) == 1 -> ok; true ->
+ fold_docs_with_end_key({Db, lists:reverse(tl(RevDocIdRevs))})
+ end.
+
+
+fold_docs_with_both_keys_the_same({Db, DocIdRevs}) ->
+ lists:foreach(fun({DocId, _} = Row) ->
+ check_all_combos(Db, DocId, DocId, [Row])
+ end, DocIdRevs).
+
+
+fold_docs_with_different_keys({Db, DocIdRevs}) ->
+ lists:foreach(fun(_) ->
+ {StartKey, EndKey, Rows} = pick_range(DocIdRevs),
+ check_all_combos(Db, StartKey, EndKey, Rows)
+ end, lists:seq(1, 500)).
+
+
+fold_docs_with_limit({Db, DocIdRevs}) ->
+ lists:foreach(fun(Limit) ->
+ Opts1 = [{limit, Limit}],
+ {ok, {?DOC_COUNT, Rows1}} =
+ fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts1),
+ ?assertEqual(lists:sublist(DocIdRevs, Limit), lists:reverse(Rows1)),
+
+ Opts2 = [{dir, rev} | Opts1],
+ {ok, {?DOC_COUNT, Rows2}} =
+ fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts2),
+ ?assertEqual(
+ lists:sublist(lists:reverse(DocIdRevs), Limit),
+ lists:reverse(Rows2)
+ )
+ end, lists:seq(0, 51)).
+
+
+fold_docs_with_skip({Db, DocIdRevs}) ->
+ lists:foreach(fun(Skip) ->
+ Opts1 = [{skip, Skip}],
+ {ok, {?DOC_COUNT, Rows1}} =
+ fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts1),
+ Expect1 = case Skip > length(DocIdRevs) of
+ true -> [];
+ false -> lists:nthtail(Skip, DocIdRevs)
+ end,
+ ?assertEqual(Expect1, lists:reverse(Rows1)),
+
+ Opts2 = [{dir, rev} | Opts1],
+ {ok, {?DOC_COUNT, Rows2}} =
+ fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts2),
+ Expect2 = case Skip > length(DocIdRevs) of
+ true -> [];
+ false -> lists:nthtail(Skip, lists:reverse(DocIdRevs))
+ end,
+ ?assertEqual(Expect2, lists:reverse(Rows2))
+ end, lists:seq(0, 51)).
+
+
+fold_docs_with_skip_and_limit({Db, DocIdRevs}) ->
+ lists:foreach(fun(_) ->
+ check_skip_and_limit(Db, [], DocIdRevs),
+ check_skip_and_limit(Db, [{dir, rev}], lists:reverse(DocIdRevs))
+ end, lists:seq(1, 100)).
+
+
+fold_docs_tx_too_old({Db, _DocIdRevs}) ->
+ {ok, Expected} = fabric2_db:fold_docs(Db, fun fold_fun/2, []),
+
+ FoldDocsFun = fun() ->
+ fabric2_db:fold_docs(Db, fun fold_fun/2, [], [{restart_tx, true}])
+ end,
+
+ % Blow up in fold range on the first call
+ fabric2_test_util:tx_too_old_setup_errors(0, 1),
+ ?assertEqual({ok, Expected}, FoldDocsFun()),
+
+ % Blow up in fold_range after emitting one row
+ fabric2_test_util:tx_too_old_setup_errors(0, {1, 1}),
+ ?assertEqual({ok, Expected}, FoldDocsFun()),
+
+ % Blow up in fold_range after emitting 48 rows
+ fabric2_test_util:tx_too_old_setup_errors(0, {?DOC_COUNT - 2, 1}),
+ ?assertEqual({ok, Expected}, FoldDocsFun()),
+
+ % Blow up in fold_range after emitting 49 rows
+ fabric2_test_util:tx_too_old_setup_errors(0, {?DOC_COUNT - 1, 1}),
+ ?assertEqual({ok, Expected}, FoldDocsFun()),
+
+ % Blow up in user fun
+ fabric2_test_util:tx_too_old_setup_errors(1, 0),
+ ?assertEqual({ok, Expected}, FoldDocsFun()),
+
+ % Blow up in user fun after emitting one row
+ fabric2_test_util:tx_too_old_setup_errors({1, 1}, 0),
+ ?assertEqual({ok, Expected}, FoldDocsFun()),
+
+ % Blow up in user fun after emitting 48 rows
+ fabric2_test_util:tx_too_old_setup_errors({?DOC_COUNT - 2, 1}, 0),
+ ?assertEqual({ok, Expected}, FoldDocsFun()),
+
+ % Blow up in user fun after emitting 49 rows
+ fabric2_test_util:tx_too_old_setup_errors({?DOC_COUNT - 1, 1}, 0),
+ ?assertEqual({ok, Expected}, FoldDocsFun()),
+
+ % Blow up in in user fun and fold range
+ fabric2_test_util:tx_too_old_setup_errors(1, {1, 1}),
+ ?assertEqual({ok, Expected}, FoldDocsFun()).
+
+
+fold_docs_db_recreated({Db, _DocIdRevs}) ->
+ DbName = fabric2_db:name(Db),
+
+ RecreateDb = fun() ->
+ ok = fabric2_db:delete(DbName, []),
+ {ok, _} = fabric2_db:create(DbName, [])
+ end,
+
+ FoldFun = fun
+ ({meta, _}, Acc) ->
+ {ok, Acc};
+ ({row, Row}, Acc) ->
+ fabric2_test_util:tx_too_old_raise_in_user_fun(),
+ % After meta and one row emitted, recreate the db
+ case length(Acc) =:= 1 of
+ true -> RecreateDb();
+ false -> ok
+ end,
+ {ok, [Row | Acc]};
+ (complete, Acc) ->
+ {ok, Acc}
+ end,
+ % Blow up in user fun after emitting two rows
+ fabric2_test_util:tx_too_old_setup_errors({2, 1}, 0),
+ ?assertError(database_does_not_exist, fabric2_db:fold_docs(Db, FoldFun,
+ [], [{restart_tx, true}])).
+
+
+check_all_combos(Db, StartKey, EndKey, Rows) ->
+ Opts1 = make_opts(fwd, StartKey, EndKey, true),
+ {ok, {?DOC_COUNT, Rows1}} =
+ fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts1),
+ ?assertEqual(lists:reverse(Rows), Rows1),
+ check_skip_and_limit(Db, Opts1, Rows),
+
+ Opts2 = make_opts(fwd, StartKey, EndKey, false),
+ {ok, {?DOC_COUNT, Rows2}} =
+ fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts2),
+ Expect2 = if EndKey == undefined -> lists:reverse(Rows); true ->
+ lists:reverse(all_but_last(Rows))
+ end,
+ ?assertEqual(Expect2, Rows2),
+ check_skip_and_limit(Db, Opts2, lists:reverse(Expect2)),
+
+ Opts3 = make_opts(rev, StartKey, EndKey, true),
+ {ok, {?DOC_COUNT, Rows3}} =
+ fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts3),
+ ?assertEqual(Rows, Rows3),
+ check_skip_and_limit(Db, Opts3, lists:reverse(Rows)),
+
+ Opts4 = make_opts(rev, StartKey, EndKey, false),
+ {ok, {?DOC_COUNT, Rows4}} =
+ fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts4),
+ Expect4 = if StartKey == undefined -> Rows; true ->
+ tl(Rows)
+ end,
+ ?assertEqual(Expect4, Rows4),
+ check_skip_and_limit(Db, Opts4, lists:reverse(Expect4)).
+
+
+check_skip_and_limit(Db, Opts, []) ->
+ Skip = rand:uniform(?DOC_COUNT + 1) - 1,
+ Limit = rand:uniform(?DOC_COUNT + 1) - 1,
+ NewOpts = [{skip, Skip}, {limit, Limit} | Opts],
+ {ok, {?DOC_COUNT, OutRows}} =
+ fabric2_db:fold_docs(Db, fun fold_fun/2, [], NewOpts),
+ ?assertEqual([], OutRows);
+
+check_skip_and_limit(Db, Opts, Rows) ->
+ Skip = rand:uniform(length(Rows) + 1) - 1,
+ Limit = rand:uniform(?DOC_COUNT + 1 - Skip) - 1,
+
+ ExpectRows = case Skip >= length(Rows) of
+ true ->
+ [];
+ false ->
+ lists:sublist(lists:nthtail(Skip, Rows), Limit)
+ end,
+
+ SkipLimitOpts = [{skip, Skip}, {limit, Limit} | Opts],
+ {ok, {?DOC_COUNT, RevRows}} =
+ fabric2_db:fold_docs(Db, fun fold_fun/2, [], SkipLimitOpts),
+ OutRows = lists:reverse(RevRows),
+ ?assertEqual(ExpectRows, OutRows).
+
+
+make_opts(fwd, StartKey, EndKey, InclusiveEnd) ->
+ DirOpts = case rand:uniform() =< 0.50 of
+ true -> [{dir, fwd}];
+ false -> []
+ end,
+ StartOpts = case StartKey of
+ undefined -> [];
+ <<_/binary>> -> [{start_key, StartKey}]
+ end,
+ EndOpts = case EndKey of
+ undefined -> [];
+ <<_/binary>> when InclusiveEnd -> [{end_key, EndKey}];
+ <<_/binary>> -> [{end_key_gt, EndKey}]
+ end,
+ DirOpts ++ StartOpts ++ EndOpts;
+make_opts(rev, StartKey, EndKey, InclusiveEnd) ->
+ BaseOpts = make_opts(fwd, EndKey, StartKey, InclusiveEnd),
+ [{dir, rev}] ++ BaseOpts -- [{dir, fwd}].
+
+
+all_but_last([]) ->
+ [];
+all_but_last([_]) ->
+ [];
+all_but_last(Rows) ->
+ lists:sublist(Rows, length(Rows) - 1).
+
+
+pick_range(DocIdRevs) ->
+ {StartKey, StartRow, RestRows} = pick_start_key(DocIdRevs),
+ {EndKey, EndRow, RowsBetween} = pick_end_key(RestRows),
+ {StartKey, EndKey, StartRow ++ RowsBetween ++ EndRow}.
+
+
+pick_start_key(Rows) ->
+ case rand:uniform() =< 0.1 of
+ true ->
+ {undefined, [], Rows};
+ false ->
+ Idx = rand:uniform(length(Rows)),
+ {DocId, _} = Row = lists:nth(Idx, Rows),
+ {DocId, [Row], lists:nthtail(Idx, Rows)}
+ end.
+
+
+pick_end_key([]) ->
+ {undefined, [], []};
+
+pick_end_key(Rows) ->
+ case rand:uniform() =< 0.1 of
+ true ->
+ {undefined, [], Rows};
+ false ->
+ Idx = rand:uniform(length(Rows)),
+ {DocId, _} = Row = lists:nth(Idx, Rows),
+ Tail = lists:nthtail(Idx, Rows),
+ {DocId, [Row], Rows -- [Row | Tail]}
+ end.
+
+
+fold_fun({meta, Meta}, _Acc) ->
+ Total = fabric2_util:get_value(total, Meta),
+ {ok, {Total, []}};
+fold_fun({row, Row}, {Total, Rows}) ->
+ fabric2_test_util:tx_too_old_raise_in_user_fun(),
+ RowId = fabric2_util:get_value(id, Row),
+ RowId = fabric2_util:get_value(key, Row),
+ RowRev = fabric2_util:get_value(value, Row),
+ {ok, {Total, [{RowId, RowRev} | Rows]}};
+fold_fun(complete, Acc) ->
+ {ok, Acc}.
diff --git a/src/fabric/test/fabric2_doc_size_tests.erl b/src/fabric/test/fabric2_doc_size_tests.erl
new file mode 100644
index 000000000..1e3dca4f6
--- /dev/null
+++ b/src/fabric/test/fabric2_doc_size_tests.erl
@@ -0,0 +1,320 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_doc_size_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+% Doc body size calculations
+% ID: size(Doc#doc.id)
+% Rev: size(erlfdb_tuple:encode(Start)) + size(Rev) % where Rev is usually 16
+% Deleted: 1 % (binary value is one byte)
+% Body: couch_ejson_size:external_size(Body) % Where empty is {} which is 2)
+
+
+-define(NUM_RANDOM_TESTS, 1000).
+
+
+-define(DOC_IDS, [
+ {0, <<>>},
+ {1, <<"a">>},
+ {3, <<"foo">>},
+ {6, <<"foobar">>},
+ {32, <<"af196ae095631b020eedf8f69303e336">>}
+]).
+
+-define(REV_STARTS, [
+ {1, 0},
+ {2, 1},
+ {2, 255},
+ {3, 256},
+ {3, 65535},
+ {4, 65536},
+ {4, 16777215},
+ {5, 16777216},
+ {5, 4294967295},
+ {6, 4294967296},
+ {6, 1099511627775},
+ {7, 1099511627776},
+ {7, 281474976710655},
+ {8, 281474976710656},
+ {8, 72057594037927935},
+ {9, 72057594037927936},
+ {9, 18446744073709551615},
+
+ % The jump from 9 to 11 bytes is because when we
+ % spill over into the bigint range of 9-255
+ % bytes we have an extra byte that encodes the
+ % length of the bigint.
+ {11, 18446744073709551616}
+]).
+
+-define(REVS, [
+ {0, <<>>},
+ {8, <<"foobarba">>},
+ {16, <<"foobarbazbambang">>}
+]).
+
+-define(DELETED, [
+ {1, true},
+ {1, false}
+]).
+
+-define(BODIES, [
+ {2, {[]}},
+ {13, {[{<<"foo">>, <<"bar">>}]}},
+ {28, {[{<<"b">>, <<"a">>}, {<<"c">>, [true, null, []]}]}}
+]).
+
+-define(ATT_NAMES, [
+ {5, <<"a.txt">>},
+ {7, <<"foo.csv">>},
+ {29, <<"a-longer-name-for-example.bat">>}
+]).
+
+-define(ATT_TYPES, [
+ {24, <<"application/octet-stream">>},
+ {10, <<"text/plain">>},
+ {9, <<"image/png">>}
+]).
+
+-define(ATT_BODIES, [
+ {0, <<>>},
+ {1, <<"g">>},
+ {6, <<"foobar">>},
+ {384, <<
+ "xlasdjfsapoiewrposdlfadfuaducvwerwlkdsfljdfusfsd"
+ "xlasdjfsapoiewrposdlfadfuaducvwerwlkdsfljdfusfsd"
+ "xlasdjfsapoiewrposdlfadfuaducvwerwlkdsfljdfusfsd"
+ "xlasdjfsapoiewrposdlfadfuaducvwerwlkdsfljdfusfsd"
+ "xlasdjfsapoiewrposdlfadfuaducvwerwlkdsfljdfusfsd"
+ "xlasdjfsapoiewrposdlfadfuaducvwerwlkdsfljdfusfsd"
+ "xlasdjfsapoiewrposdlfadfuaducvwerwlkdsfljdfusfsd"
+ "xlasdjfsapoiewrposdlfadfuaducvwerwlkdsfljdfusfsd"
+ >>}
+]).
+
+-define(LDOC_IDS, [
+ {8, <<"_local/a">>},
+ {10, <<"_local/foo">>},
+ {13, <<"_local/foobar">>},
+ {39, <<"_local/af196ae095631b020eedf8f69303e336">>}
+]).
+
+-define(LDOC_REVS, [
+ {1, <<"0">>},
+ {2, <<"10">>},
+ {3, <<"100">>},
+ {4, <<"1000">>},
+ {5, <<"10000">>},
+ {6, <<"100000">>},
+ {7, <<"1000000">>}
+]).
+
+
+empty_doc_test() ->
+ ?assertEqual(4, fabric2_util:rev_size(#doc{})).
+
+
+docid_size_test() ->
+ lists:foreach(fun({Size, DocId}) ->
+ ?assertEqual(4 + Size, fabric2_util:rev_size(#doc{id = DocId}))
+ end, ?DOC_IDS).
+
+
+rev_size_test() ->
+ lists:foreach(fun({StartSize, Start}) ->
+ lists:foreach(fun({RevSize, Rev}) ->
+ Doc = #doc{
+ revs = {Start, [Rev]}
+ },
+ ?assertEqual(3 + StartSize + RevSize, fabric2_util:rev_size(Doc))
+ end, ?REVS)
+ end, ?REV_STARTS).
+
+
+deleted_size_test() ->
+ lists:foreach(fun({Size, Deleted}) ->
+ ?assertEqual(3 + Size, fabric2_util:rev_size(#doc{deleted = Deleted}))
+ end, ?DELETED).
+
+
+body_size_test() ->
+ lists:foreach(fun({Size, Body}) ->
+ ?assertEqual(2 + Size, fabric2_util:rev_size(#doc{body = Body}))
+ end, ?BODIES).
+
+
+att_names_test() ->
+ lists:foreach(fun({Size, AttName}) ->
+ Att = mk_att(AttName, <<>>, <<>>, false),
+ Doc = #doc{atts = [Att]},
+ ?assertEqual(4 + Size, fabric2_util:rev_size(Doc))
+ end, ?ATT_NAMES).
+
+
+att_types_test() ->
+ lists:foreach(fun({Size, AttType}) ->
+ Att = mk_att(<<"foo">>, AttType, <<>>, false),
+ Doc = #doc{atts = [Att]},
+ ?assertEqual(7 + Size, fabric2_util:rev_size(Doc))
+ end, ?ATT_TYPES).
+
+
+att_bodies_test() ->
+ lists:foreach(fun({Size, AttBody}) ->
+ Att1 = mk_att(<<"foo">>, <<>>, AttBody, false),
+ Doc1 = #doc{atts = [Att1]},
+ ?assertEqual(7 + Size, fabric2_util:rev_size(Doc1)),
+
+ Att2 = mk_att(<<"foo">>, <<>>, AttBody, true),
+ Doc2 = #doc{atts = [Att2]},
+ ?assertEqual(7 + 16 + Size, fabric2_util:rev_size(Doc2))
+ end, ?ATT_BODIES).
+
+
+local_doc_ids_test() ->
+ lists:foreach(fun({Size, LDocId}) ->
+ ?assertEqual(3 + Size, fabric2_util:ldoc_size(mk_ldoc(LDocId, 0)))
+ end, ?LDOC_IDS).
+
+
+local_doc_revs_test() ->
+ lists:foreach(fun({Size, Rev}) ->
+ Doc = mk_ldoc(<<"_local/foo">>, Rev),
+ ?assertEqual(12 + Size, fabric2_util:ldoc_size(Doc))
+ end, ?LDOC_REVS).
+
+
+local_doc_bodies_test() ->
+ lists:foreach(fun({Size, Body}) ->
+ Doc = mk_ldoc(<<"_local/foo">>, 0, Body),
+ ?assertEqual(11 + Size, fabric2_util:ldoc_size(Doc))
+ end, ?BODIES).
+
+
+doc_combinatorics_test() ->
+ Elements = [
+ {?DOC_IDS, fun(Doc, DocId) -> Doc#doc{id = DocId} end},
+ {?REV_STARTS, fun(Doc, RevStart) ->
+ #doc{revs = {_, RevIds}} = Doc,
+ Doc#doc{revs = {RevStart, RevIds}}
+ end},
+ {?REVS, fun(Doc, Rev) ->
+ #doc{revs = {Start, _}} = Doc,
+ Doc#doc{revs = {Start, [Rev]}}
+ end},
+ {?DELETED, fun(Doc, Deleted) -> Doc#doc{deleted = Deleted} end},
+ {?BODIES, fun(Doc, Body) -> Doc#doc{body = Body} end}
+ ],
+ doc_combine(Elements, 0, #doc{}).
+
+
+doc_combine([], TotalSize, Doc) ->
+ ?assertEqual(TotalSize, fabric2_util:rev_size(Doc));
+
+doc_combine([{Elems, UpdateFun} | Rest], TotalSize, Doc) ->
+ lists:foreach(fun({Size, Elem}) ->
+ doc_combine(Rest, TotalSize + Size, UpdateFun(Doc, Elem))
+ end, Elems).
+
+
+local_doc_combinatorics_test() ->
+ Elements = [
+ {?LDOC_IDS, fun(Doc, DocId) -> Doc#doc{id = DocId} end},
+ {?LDOC_REVS, fun(Doc, Rev) -> Doc#doc{revs = {0, [Rev]}} end},
+ {?BODIES, fun(Doc, Body) -> Doc#doc{body = Body} end}
+ ],
+ local_doc_combine(Elements, 0, #doc{}).
+
+
+local_doc_combine([], TotalSize, Doc) ->
+ ?assertEqual(TotalSize, fabric2_util:ldoc_size(Doc));
+
+local_doc_combine([{Elems, UpdateFun} | Rest], TotalSize, Doc) ->
+ lists:foreach(fun({Size, Elem}) ->
+ local_doc_combine(Rest, TotalSize + Size, UpdateFun(Doc, Elem))
+ end, Elems).
+
+
+random_docs_test() ->
+ lists:foreach(fun(_) ->
+ {DocIdSize, DocId} = choose(?DOC_IDS),
+ {RevStartSize, RevStart} = choose(?REV_STARTS),
+ {RevSize, Rev} = choose(?REVS),
+ {DeletedSize, Deleted} = choose(?DELETED),
+ {BodySize, Body} = choose(?BODIES),
+ NumAtts = choose([0, 1, 2, 5]),
+ {Atts, AttSize} = lists:mapfoldl(fun(_, Acc) ->
+ {S, A} = random_att(),
+ {A, Acc + S}
+ end, 0, lists:seq(1, NumAtts)),
+ Doc = #doc{
+ id = DocId,
+ revs = {RevStart, [Rev]},
+ deleted = Deleted,
+ body = Body,
+ atts = Atts
+ },
+ Expect = lists:sum([
+ DocIdSize,
+ RevStartSize,
+ RevSize,
+ DeletedSize,
+ BodySize,
+ AttSize
+ ]),
+ ?assertEqual(Expect, fabric2_util:rev_size(Doc))
+ end, lists:seq(1, ?NUM_RANDOM_TESTS)).
+
+
+random_att() ->
+ {NameSize, Name} = choose(?ATT_NAMES),
+ {TypeSize, Type} = choose(?ATT_TYPES),
+ {BodySize, Body} = choose(?ATT_BODIES),
+ {Md5Size, AddMd5} = choose([{0, false}, {16, true}]),
+ AttSize = lists:sum([NameSize, TypeSize, BodySize, Md5Size]),
+ {AttSize, mk_att(Name, Type, Body, AddMd5)}.
+
+
+mk_att(Name, Type, Data, AddMd5) ->
+ Md5 = if not AddMd5 -> <<>>; true ->
+ erlang:md5(Data)
+ end,
+ couch_att:new([
+ {name, Name},
+ {type, Type},
+ {att_len, size(Data)},
+ {data, Data},
+ {encoding, identity},
+ {md5, Md5}
+ ]).
+
+
+mk_ldoc(DocId, Rev) ->
+ mk_ldoc(DocId, Rev, {[]}).
+
+
+mk_ldoc(DocId, Rev, Body) ->
+ #doc{
+ id = DocId,
+ revs = {0, [Rev]},
+ body = Body
+ }.
+
+
+choose(Options) ->
+ Pos = rand:uniform(length(Options)),
+ lists:nth(Pos, Options).
diff --git a/src/fabric/test/fabric2_fdb_tx_retry_tests.erl b/src/fabric/test/fabric2_fdb_tx_retry_tests.erl
new file mode 100644
index 000000000..7fb0f21d0
--- /dev/null
+++ b/src/fabric/test/fabric2_fdb_tx_retry_tests.erl
@@ -0,0 +1,176 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_fdb_tx_retry_tests).
+
+
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+retry_test_() ->
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ with([
+ ?TDEF(read_only_no_retry),
+ ?TDEF(read_only_commit_unknown_result),
+ ?TDEF(run_on_first_try),
+ ?TDEF(retry_when_commit_conflict),
+ ?TDEF(retry_when_txid_not_found),
+ ?TDEF(no_retry_when_txid_found)
+ ])
+ }.
+
+
+setup() ->
+ meck:new(erlfdb),
+ meck:new(fabric2_txids),
+ EnvSt = case application:get_env(fabric, db) of
+ {ok, Db} -> {ok, Db};
+ undefined -> undefined
+ end,
+ application:set_env(fabric, db, not_a_real_db),
+ EnvSt.
+
+
+cleanup(EnvSt) ->
+ case EnvSt of
+ {ok, Db} -> application:set_env(fabric, db, Db);
+ undefined -> application:unset_env(fabric, db)
+ end,
+ meck:unload().
+
+
+read_only_no_retry(_) ->
+ meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+ UserFun(not_a_real_transaction)
+ end),
+ meck:expect(erlfdb, get_last_error, fun() -> 0 end),
+ meck:expect(erlfdb, get, fun(_, _) -> foo end),
+ meck:expect(erlfdb, is_read_only, fun(_) -> true end),
+ meck:expect(fabric2_txids, remove, fun(undefined) -> ok end),
+
+ Result = fabric2_fdb:transactional(fun(Tx) ->
+ ?assertEqual(foo, erlfdb:get(Tx, bar)),
+ did_run
+ end),
+
+ ?assertEqual(did_run, Result),
+ ?assert(meck:validate([erlfdb, fabric2_txids])).
+
+
+read_only_commit_unknown_result(_) ->
+ % Not 100% certain that this would ever actually
+ % happen in the wild but might as well test that
+ % we don't blow up if it does.
+ meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+ UserFun(not_a_real_transaction)
+ end),
+ meck:expect(erlfdb, get_last_error, fun() -> 1021 end),
+ meck:expect(erlfdb, get, fun(_, _) -> foo end),
+ meck:expect(erlfdb, is_read_only, fun(_) -> true end),
+ meck:expect(fabric2_txids, remove, fun(undefined) -> ok end),
+
+ Result = fabric2_fdb:transactional(fun(Tx) ->
+ ?assertEqual(foo, erlfdb:get(Tx, bar)),
+ did_run
+ end),
+
+ ?assertEqual(did_run, Result),
+ ?assert(meck:validate([erlfdb, fabric2_txids])).
+
+
+run_on_first_try(_) ->
+ meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+ UserFun(not_a_real_transaction)
+ end),
+ meck:expect(erlfdb, get_last_error, fun() -> undefined end),
+ meck:expect(erlfdb, clear, fun(_, _) -> ok end),
+ meck:expect(erlfdb, is_read_only, fun(_) -> false end),
+ meck:expect(fabric2_txids, create, fun(_, _) -> <<"a txid">> end),
+ meck:expect(erlfdb, set, fun(_, <<"a txid">>, <<>>) -> ok end),
+ meck:expect(fabric2_txids, remove, fun(<<"a txid">>) -> ok end),
+
+ Result = fabric2_fdb:transactional(fun(Tx) ->
+ ?assertEqual(ok, erlfdb:clear(Tx, bang)),
+ did_run
+ end),
+
+ ?assertEqual(did_run, Result),
+ ?assert(meck:validate([erlfdb, fabric2_txids])).
+
+
+retry_when_commit_conflict(_) ->
+ meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+ UserFun(not_a_real_transaction)
+ end),
+ meck:expect(erlfdb, get_last_error, fun() -> 1020 end),
+ meck:expect(erlfdb, clear, fun(_, _) -> ok end),
+ meck:expect(erlfdb, is_read_only, fun(_) -> false end),
+ meck:expect(fabric2_txids, create, fun(_, _) -> <<"a txid">> end),
+ meck:expect(erlfdb, set, fun(_, <<"a txid">>, <<>>) -> ok end),
+ meck:expect(fabric2_txids, remove, fun(<<"a txid">>) -> ok end),
+
+ Result = fabric2_fdb:transactional(fun(Tx) ->
+ ?assertEqual(ok, erlfdb:clear(Tx, <<"foo">>)),
+ did_run
+ end),
+
+ ?assertEqual(did_run, Result),
+ ?assert(meck:validate([erlfdb, fabric2_txids])).
+
+
+retry_when_txid_not_found(_) ->
+ meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+ UserFun(not_a_real_transaction)
+ end),
+ meck:expect(erlfdb, get_last_error, fun() -> 1021 end),
+ meck:expect(erlfdb, get, fun(_, <<"a txid">>) -> future end),
+ meck:expect(erlfdb, wait, fun(future) -> not_found end),
+ meck:expect(erlfdb, clear, fun(_, _) -> ok end),
+ meck:expect(erlfdb, is_read_only, fun(_) -> false end),
+ meck:expect(erlfdb, set, fun(_, <<"a txid">>, <<>>) -> ok end),
+ meck:expect(fabric2_txids, remove, fun(<<"a txid">>) -> ok end),
+
+ put('$fabric_tx_id', <<"a txid">>),
+ put('$fabric_tx_result', not_the_correct_result),
+
+ Result = fabric2_fdb:transactional(fun(Tx) ->
+ ?assertEqual(ok, erlfdb:clear(Tx, <<"foo">>)),
+ yay_not_skipped
+ end),
+
+ ?assertEqual(yay_not_skipped, Result),
+ ?assert(meck:validate([erlfdb, fabric2_txids])).
+
+
+no_retry_when_txid_found(_) ->
+ meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+ UserFun(not_a_real_transaction)
+ end),
+ meck:expect(erlfdb, get_last_error, fun() -> 1021 end),
+ meck:expect(erlfdb, get, fun(_, <<"a txid">>) -> future end),
+ meck:expect(erlfdb, wait, fun(future) -> <<>> end),
+ meck:expect(fabric2_txids, remove, fun(<<"a txid">>) -> ok end),
+
+ put('$fabric_tx_id', <<"a txid">>),
+ put('$fabric_tx_result', did_not_run),
+
+ Result = fabric2_fdb:transactional(fun(_Tx) ->
+ ?assert(false),
+ did_run
+ end),
+
+ ?assertEqual(did_not_run, Result),
+ ?assert(meck:validate([erlfdb, fabric2_txids])). \ No newline at end of file
diff --git a/src/fabric/test/fabric2_get_design_docs_tests.erl b/src/fabric/test/fabric2_get_design_docs_tests.erl
new file mode 100644
index 000000000..eb227835c
--- /dev/null
+++ b/src/fabric/test/fabric2_get_design_docs_tests.erl
@@ -0,0 +1,138 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_get_design_docs_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+get_design_docs_test_() ->
+ {
+ "Test get_design_docs",
+ {
+ setup,
+ fun setup_all/0,
+ fun cleanup_all/1,
+ {
+ foreach,
+ fun setup/0,
+ fun cleanup/1,
+ [
+ ?TDEF_FE(empty_db),
+ ?TDEF_FE(get_one),
+ ?TDEF_FE(get_two),
+ ?TDEF_FE(get_many),
+ ?TDEF_FE(get_many_with_regular_docs),
+ ?TDEF_FE(dont_return_deleted_ddocs)
+ ]
+ }
+ }
+ }.
+
+
+setup_all() ->
+ test_util:start_couch([fabric]).
+
+
+cleanup_all(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+
+setup() ->
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ Db.
+
+
+cleanup(Db) ->
+ ok = fabric2_db:delete(fabric2_db:name(Db), []).
+
+
+empty_db(Db) ->
+ DDocs = fabric2_db:get_design_docs(Db),
+ ?assertEqual([], DDocs).
+
+
+get_one(Db) ->
+ DDoc = create_ddoc(Db, <<"foo">>),
+ DDocs = fabric2_db:get_design_docs(Db),
+ ?assertEqual([DDoc], DDocs).
+
+
+get_two(Db) ->
+ DDoc1 = create_ddoc(Db, <<"foo">>),
+ DDoc2 = create_ddoc(Db, <<"bar">>),
+ DDocs = fabric2_db:get_design_docs(Db),
+ % DDocs come back sorted
+ ?assertEqual([DDoc2, DDoc1], DDocs).
+
+
+get_many(Db) ->
+ DDocsIn = lists:map(fun(Seq) ->
+ Id = io_lib:format("~2..0b", [Seq]),
+ create_ddoc(Db, iolist_to_binary(Id))
+ end, lists:seq(1, 10)),
+ DDocsOut = fabric2_db:get_design_docs(Db),
+ ?assertEqual(DDocsIn, DDocsOut).
+
+
+get_many_with_regular_docs(Db) ->
+ RegularIds = [
+ <<"0">>,
+ <<"012aCb">>,
+ <<"Another_doc">>,
+ <<"Znother_doc">>,
+ <<"a_doc_as_well">>,
+ <<"zebra_doc">>
+ ],
+ lists:foreach(fun(DocId) ->
+ create_doc(Db, DocId)
+ end, RegularIds),
+ DDocsIn = lists:map(fun(Seq) ->
+ Id = io_lib:format("~2..0b", [Seq]),
+ create_ddoc(Db, iolist_to_binary(Id))
+ end, lists:seq(1, 10)),
+ DDocsOut = fabric2_db:get_design_docs(Db),
+ ?assertEqual(DDocsIn, DDocsOut).
+
+
+dont_return_deleted_ddocs(Db) ->
+ DDocsIn = lists:flatmap(fun(Seq) ->
+ Id = io_lib:format("~2..0b", [Seq]),
+ DDoc = create_ddoc(Db, iolist_to_binary(Id)),
+ case Seq rem 2 == 0 of
+ true ->
+ delete_ddoc(Db, DDoc),
+ [];
+ false ->
+ [DDoc]
+ end
+ end, lists:seq(1, 10)),
+ DDocsOut = fabric2_db:get_design_docs(Db),
+ ?assertEqual(DDocsIn, DDocsOut).
+
+
+create_ddoc(Db, Id) ->
+ create_doc(Db, <<"_design/", Id/binary>>).
+
+
+delete_ddoc(Db, DDoc) ->
+ {ok, _} = fabric2_db:update_doc(Db, DDoc#doc{deleted = true}).
+
+
+create_doc(Db, Id) ->
+ Doc = #doc{id = Id},
+ {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, Doc),
+ Doc#doc{revs = {Pos, [Rev]}}.
diff --git a/src/fabric/test/fabric2_index_tests.erl b/src/fabric/test/fabric2_index_tests.erl
new file mode 100644
index 000000000..8a4acb77d
--- /dev/null
+++ b/src/fabric/test/fabric2_index_tests.erl
@@ -0,0 +1,304 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_index_tests).
+
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include("fabric2_test.hrl").
+
+
+% Should match fabric2_index define
+-define(SHARDS, 32).
+
+
+index_test_() ->
+ {
+ "Test fabric indexing module",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ with([
+ ?TDEF(register_index_works),
+ ?TDEF(single_update),
+ ?TDEF(multiple_updates),
+ ?TDEF(skip_db_if_no_ddocs),
+ ?TDEF(ignore_deleted_dbs, 10),
+ ?TDEF(check_gen_server_messages)
+ ])
+ }
+ }.
+
+
+index_process_cleanup_test_() ->
+ {
+ "Test fabric process cleanup in indexing module",
+ {
+ foreach,
+ fun setup/0,
+ fun cleanup/1,
+ [
+ ?TDEF_FE(updater_processes_start, 15),
+ ?TDEF_FE(updater_processes_stop, 15),
+ ?TDEF_FE(indexing_can_be_disabled),
+ ?TDEF_FE(handle_indexer_blowing_up)
+ ]
+ }
+ }.
+
+
+setup() ->
+ meck:new(config, [passthrough]),
+ meck:expect(config, get_integer, fun
+ ("fabric", "index_updater_delay_msec", _) -> 200;
+ ("fabric", "index_updater_resolution_msec", _) -> 100;
+
+ (_, _, Default) -> Default
+ end),
+ meck:expect(config, get_boolean, fun
+ ("fabric", "index_updater_enabled", _) -> true;
+ (_, _, Default) -> Default
+ end),
+
+ Indices = application:get_env(fabric, indices, []),
+
+ Ctx = test_util:start_couch([fabric]),
+
+ % Db1 has a valid design doc, a deleted one and one with "autoupdate":false
+ {ok, Db1} = fabric2_db:create(?tempdb(), [?ADMIN_CTX]),
+ {_, _} = create_doc(Db1, <<"_design/doc1">>),
+
+ DDocId2 = <<"_design/doc2">>,
+ {DDocId2, {Pos, Rev}} = create_doc(Db1, DDocId2),
+ Delete2 = #doc{id = DDocId2, revs = {Pos, [Rev]}, deleted = true},
+ {ok, _} = fabric2_db:update_doc(Db1, Delete2),
+
+ NoAutoUpdate = {[{<<"autoupdate">>, false}]},
+ {_, _} = create_doc(Db1, <<"_design/doc3">>, NoAutoUpdate),
+
+ % Db2 doesn't have any desig documents
+ {ok, Db2} = fabric2_db:create(?tempdb(), [?ADMIN_CTX]),
+
+ #{db1 => Db1, db2 => Db2, ctx => Ctx, indices => Indices}.
+
+
+cleanup(#{db1 := Db1, db2 := Db2, ctx := Ctx, indices := Indices}) ->
+ catch fabric2_db:delete(fabric2_db:name(Db1), []),
+ catch fabric2_db:delete(fabric2_db:name(Db2), []),
+
+ test_util:stop_couch(Ctx),
+ application:set_env(fabric, indices, Indices),
+
+ meck:unload().
+
+
+register_index_works(_) ->
+ reset_callbacks(),
+
+ Mod1 = fabric2_test_callback1,
+ fabric2_index:register_index(Mod1),
+ Indices1 = application:get_env(fabric, indices, []),
+ ?assertEqual([Mod1], Indices1),
+
+ Mod2 = fabric2_test_callback2,
+ fabric2_index:register_index(Mod2),
+ Indices2 = application:get_env(fabric, indices, []),
+ ?assertEqual(lists:sort([Mod1, Mod2]), lists:sort(Indices2)).
+
+
+single_update(#{db1 := Db}) ->
+ reset_callbacks(),
+
+ Mod = fabric2_test_callback3,
+ setup_callback(Mod),
+ create_doc(Db),
+
+ meck:wait(Mod, build_indices, 2, 2000),
+ ?assertEqual(1, meck:num_calls(Mod, build_indices, 2)).
+
+
+multiple_updates(#{db1 := Db}) ->
+ reset_callbacks(),
+
+ Mod = fabric2_test_callback4,
+ setup_callback(Mod),
+ create_docs(Db, 10),
+
+ % should be called at least once
+ meck:wait(Mod, build_indices, 2, 2000),
+
+ % Maybe called another time or two at most
+ timer:sleep(500),
+ ?assert(meck:num_calls(Mod, build_indices, 2) =< 3).
+
+
+skip_db_if_no_ddocs(#{db2 := Db}) ->
+ reset_callbacks(),
+
+ Mod = fabric2_test_callback5,
+ setup_callback(Mod),
+ create_doc(Db),
+
+ timer:sleep(500),
+ ?assertEqual(0, meck:num_calls(Mod, build_indices, 2)).
+
+
+ignore_deleted_dbs(#{}) ->
+ reset_callbacks(),
+
+ Mod = fabric2_test_callback6,
+ setup_callback(Mod),
+ lists:foreach(fun(_) ->
+ RandomDbName = fabric2_util:uuid(),
+ fabric2_index:db_updated(RandomDbName)
+ end, lists:seq(1, 1000)),
+
+ test_util:wait(fun() ->
+ case table_sizes() =:= 0 of
+ true -> ok;
+ false -> wait
+ end
+ end, 5000).
+
+
+check_gen_server_messages(#{}) ->
+ CallExpect = {stop, {bad_call, foo}, {bad_call, foo}, baz},
+ CastExpect = {stop, {bad_cast, foo}, bar},
+ InfoExpect = {stop, {bad_info, foo}, bar},
+ ?assertEqual(CallExpect, fabric2_index:handle_call(foo, bar, baz)),
+ ?assertEqual(CastExpect, fabric2_index:handle_cast(foo, bar)),
+ ?assertEqual(InfoExpect, fabric2_index:handle_info(foo, bar)),
+ ?assertEqual(ok, fabric2_index:terminate(shutdown, nil)),
+ ?assertEqual({ok, nil}, fabric2_index:code_change(v0, nil, extra)).
+
+
+updater_processes_start(#{}) ->
+ Pid = whereis(fabric2_index),
+ ?assert(is_process_alive(Pid)),
+ lists:map(fun(N) ->
+ ?assertEqual(tid(N), ets:info(tid(N), name))
+ end, lists:seq(0, ?SHARDS - 1)).
+
+
+updater_processes_stop(#{}) ->
+ Refs = lists:map(fun(N) ->
+ Pid = ets:info(tid(N), owner),
+ ?assert(is_process_alive(Pid)),
+ monitor(process, Pid)
+ end, lists:seq(0, ?SHARDS - 1)),
+
+ % We stop but don't restart fabric after this as we're running in a foreach
+ % test list where app restart happens after each test.
+ application:stop(fabric),
+
+ lists:foreach(fun(Ref) ->
+ receive
+ {'DOWN', Ref, _, _, _} -> ok
+ after 5000 ->
+ ?assert(false)
+ end
+ end, Refs).
+
+
+indexing_can_be_disabled(#{db1 := Db}) ->
+ meck:expect(config, get_boolean, fun
+ ("fabric", "index_updater_enabled", _) -> false;
+ (_, _, Default) -> Default
+ end),
+
+ Mod = fabric2_test_callback7,
+ setup_callback(Mod),
+
+ create_doc(Db),
+ timer:sleep(500),
+ ?assertEqual(0, meck:num_calls(Mod, build_indices, 2)),
+
+ meck:expect(config, get_boolean, fun
+ ("fabric", "index_updater_enabled", _) -> true;
+ (_, _, Default) -> Default
+ end),
+
+ create_doc(Db),
+ meck:wait(Mod, build_indices, 2, 2000).
+
+
+handle_indexer_blowing_up(#{db1 := Db}) ->
+ Mod = fabric2_test_callback8,
+ setup_callback(Mod),
+ meck:expect(Mod, build_indices, fun(_, _) -> error(bad_index) end),
+
+ MainPid = whereis(fabric2_index),
+ WPids1 = [ets:info(tid(N), owner) || N <- lists:seq(0, ?SHARDS - 1)],
+
+ create_doc(Db),
+ meck:wait(Mod, build_indices, 2, 2000),
+
+ ?assert(is_process_alive(MainPid)),
+
+ WPids2 = [ets:info(tid(N), owner) || N <- lists:seq(0, ?SHARDS - 1)],
+ ?assertEqual(lists:sort(WPids1), lists:sort(WPids2)),
+ ?assert(lists:all(fun(Pid) -> is_process_alive(Pid) end, WPids2)).
+
+
+% Utility functions
+
+setup_callback(Mod) ->
+ catch meck:unload(Mod),
+ meck:new(Mod, [non_strict]),
+ meck:expect(Mod, build_indices, 2, []),
+ fabric2_index:register_index(Mod).
+
+
+reset_callbacks() ->
+ Mods = application:get_env(fabric, indices, []),
+ application:set_env(fabric, indices, []),
+ lists:foreach(fun(M) ->
+ catch meck:reset(M),
+ catch meck:unload(M)
+ end, Mods).
+
+
+tid(Id) when is_integer(Id) ->
+ TableName = "fabric2_index_" ++ integer_to_list(Id),
+ list_to_existing_atom(TableName).
+
+
+table_sizes() ->
+ Sizes = [ets:info(tid(N), size) || N <- lists:seq(0, ?SHARDS - 1)],
+ lists:sum(Sizes).
+
+
+create_docs(Db, Count) ->
+ lists:map(fun(_) ->
+ {DocId, _RevStr} = create_doc(Db),
+ DocId
+ end, lists:seq(1, Count)).
+
+
+create_doc(Db) ->
+ create_doc(Db, fabric2_util:uuid()).
+
+
+create_doc(Db, DocId) ->
+ create_doc(Db, DocId, {[]}).
+
+
+create_doc(Db, DocId, Body) ->
+ Doc = #doc{
+ id = DocId,
+ body = Body
+ },
+ {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, Doc, []),
+ {DocId, {Pos, Rev}}.
diff --git a/src/fabric/test/fabric2_local_doc_fold_tests.erl b/src/fabric/test/fabric2_local_doc_fold_tests.erl
new file mode 100644
index 000000000..e3ff0eb21
--- /dev/null
+++ b/src/fabric/test/fabric2_local_doc_fold_tests.erl
@@ -0,0 +1,295 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_local_doc_fold_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+-define(DOC_COUNT, 50).
+
+
+doc_fold_test_() ->
+ {
+ "Test local document fold operations",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ with([
+ ?TDEF(fold_local_docs_basic),
+ ?TDEF(fold_local_docs_rev),
+ ?TDEF(fold_local_docs_with_start_key),
+ ?TDEF(fold_local_docs_with_end_key),
+ ?TDEF(fold_local_docs_with_both_keys_the_same),
+ ?TDEF(fold_local_docs_with_different_keys, 15000),
+ ?TDEF(fold_local_docs_with_limit),
+ ?TDEF(fold_local_docs_with_skip),
+ ?TDEF(fold_local_docs_with_skip_and_limit)
+ ])
+ }
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([fabric]),
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ DocIdRevs = lists:map(fun(Val) ->
+ UUID = fabric2_util:uuid(),
+ DocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+ % Every 10th doc is large to force the doc to be chunkified
+ BigChunk = << <<"x">> || _ <- lists:seq(1, 200000) >>,
+ Body = case Val rem 10 == 0 of
+ true -> {[{<<"value">>, BigChunk}]};
+ false -> {[{<<"value">>, Val}]}
+ end,
+ Doc = #doc{
+ id = DocId,
+ body = Body
+ },
+ {ok, Rev} = fabric2_db:update_doc(Db, Doc, []),
+ {DocId, {[{rev, couch_doc:rev_to_str(Rev)}]}}
+ end, lists:seq(1, ?DOC_COUNT)),
+ {Db, lists:sort(DocIdRevs), Ctx}.
+
+
+cleanup({Db, _DocIdRevs, Ctx}) ->
+ ok = fabric2_db:delete(fabric2_db:name(Db), []),
+ test_util:stop_couch(Ctx).
+
+
+fold_local_docs_basic({Db, DocIdRevs, _}) ->
+ {ok, {?DOC_COUNT, Rows}} = fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], []),
+ ?assertEqual(DocIdRevs, lists:reverse(Rows)).
+
+
+fold_local_docs_rev({Db, DocIdRevs, _}) ->
+ Opts = [{dir, rev}],
+ {ok, {?DOC_COUNT, Rows}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts),
+ ?assertEqual(DocIdRevs, Rows).
+
+
+fold_local_docs_with_start_key({Db, DocIdRevs, _}) ->
+ {StartKey, _} = hd(DocIdRevs),
+ Opts = [{start_key, StartKey}],
+ {ok, {?DOC_COUNT, Rows}}
+ = fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts),
+ ?assertEqual(DocIdRevs, lists:reverse(Rows)),
+ if length(DocIdRevs) == 1 -> ok; true ->
+ fold_local_docs_with_start_key({Db, tl(DocIdRevs), nil})
+ end.
+
+
+fold_local_docs_with_end_key({Db, DocIdRevs, _}) ->
+ RevDocIdRevs = lists:reverse(DocIdRevs),
+ {EndKey, _} = hd(RevDocIdRevs),
+ Opts = [{end_key, EndKey}],
+ {ok, {?DOC_COUNT, Rows}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts),
+ ?assertEqual(RevDocIdRevs, Rows),
+ if length(DocIdRevs) == 1 -> ok; true ->
+ fold_local_docs_with_end_key({Db, lists:reverse(tl(RevDocIdRevs)), nil})
+ end.
+
+
+fold_local_docs_with_both_keys_the_same({Db, DocIdRevs, _}) ->
+ lists:foreach(fun({DocId, _} = Row) ->
+ check_all_combos(Db, DocId, DocId, [Row])
+ end, DocIdRevs).
+
+
+fold_local_docs_with_different_keys({Db, DocIdRevs, _}) ->
+ lists:foreach(fun(_) ->
+ {StartKey, EndKey, Rows} = pick_range(DocIdRevs),
+ check_all_combos(Db, StartKey, EndKey, Rows)
+ end, lists:seq(1, 100)).
+
+
+fold_local_docs_with_limit({Db, DocIdRevs, _}) ->
+ lists:foreach(fun(Limit) ->
+ Opts1 = [{limit, Limit}],
+ {ok, {?DOC_COUNT, Rows1}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts1),
+ ?assertEqual(lists:sublist(DocIdRevs, Limit), lists:reverse(Rows1)),
+
+ Opts2 = [{dir, rev} | Opts1],
+ {ok, {?DOC_COUNT, Rows2}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts2),
+ ?assertEqual(
+ lists:sublist(lists:reverse(DocIdRevs), Limit),
+ lists:reverse(Rows2)
+ )
+ end, lists:seq(0, 51)).
+
+
+fold_local_docs_with_skip({Db, DocIdRevs, _}) ->
+ lists:foreach(fun(Skip) ->
+ Opts1 = [{skip, Skip}],
+ {ok, {?DOC_COUNT, Rows1}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts1),
+ Expect1 = case Skip > length(DocIdRevs) of
+ true -> [];
+ false -> lists:nthtail(Skip, DocIdRevs)
+ end,
+ ?assertEqual(Expect1, lists:reverse(Rows1)),
+
+ Opts2 = [{dir, rev} | Opts1],
+ {ok, {?DOC_COUNT, Rows2}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts2),
+ Expect2 = case Skip > length(DocIdRevs) of
+ true -> [];
+ false -> lists:nthtail(Skip, lists:reverse(DocIdRevs))
+ end,
+ ?assertEqual(Expect2, lists:reverse(Rows2))
+ end, lists:seq(0, 51)).
+
+
+fold_local_docs_with_skip_and_limit({Db, DocIdRevs, _}) ->
+ lists:foreach(fun(_) ->
+ check_skip_and_limit(Db, [], DocIdRevs),
+ check_skip_and_limit(Db, [{dir, rev}], lists:reverse(DocIdRevs))
+ end, lists:seq(1, 100)).
+
+
+check_all_combos(Db, StartKey, EndKey, Rows) ->
+ Opts1 = make_opts(fwd, StartKey, EndKey, true),
+ {ok, {?DOC_COUNT, Rows1}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts1),
+ ?assertEqual(lists:reverse(Rows), Rows1),
+ check_skip_and_limit(Db, Opts1, Rows),
+
+ Opts2 = make_opts(fwd, StartKey, EndKey, false),
+ {ok, {?DOC_COUNT, Rows2}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts2),
+ Expect2 = if EndKey == undefined -> lists:reverse(Rows); true ->
+ lists:reverse(all_but_last(Rows))
+ end,
+ ?assertEqual(Expect2, Rows2),
+ check_skip_and_limit(Db, Opts2, lists:reverse(Expect2)),
+
+ Opts3 = make_opts(rev, StartKey, EndKey, true),
+ {ok, {?DOC_COUNT, Rows3}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts3),
+ ?assertEqual(Rows, Rows3),
+ check_skip_and_limit(Db, Opts3, lists:reverse(Rows)),
+
+ Opts4 = make_opts(rev, StartKey, EndKey, false),
+ {ok, {?DOC_COUNT, Rows4}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts4),
+ Expect4 = if StartKey == undefined -> Rows; true ->
+ tl(Rows)
+ end,
+ ?assertEqual(Expect4, Rows4),
+ check_skip_and_limit(Db, Opts4, lists:reverse(Expect4)).
+
+
+check_skip_and_limit(Db, Opts, []) ->
+ Skip = rand:uniform(?DOC_COUNT + 1) - 1,
+ Limit = rand:uniform(?DOC_COUNT + 1) - 1,
+ NewOpts = [{skip, Skip}, {limit, Limit} | Opts],
+ {ok, {?DOC_COUNT, OutRows}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], NewOpts),
+ ?assertEqual([], OutRows);
+
+check_skip_and_limit(Db, Opts, Rows) ->
+ Skip = rand:uniform(length(Rows) + 1) - 1,
+ Limit = rand:uniform(?DOC_COUNT + 1 - Skip) - 1,
+
+ ExpectRows = case Skip >= length(Rows) of
+ true ->
+ [];
+ false ->
+ lists:sublist(lists:nthtail(Skip, Rows), Limit)
+ end,
+
+ SkipLimitOpts = [{skip, Skip}, {limit, Limit} | Opts],
+ {ok, {?DOC_COUNT, RevRows}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], SkipLimitOpts),
+ OutRows = lists:reverse(RevRows),
+ ?assertEqual(ExpectRows, OutRows).
+
+
+make_opts(fwd, StartKey, EndKey, InclusiveEnd) ->
+ DirOpts = case rand:uniform() =< 0.50 of
+ true -> [{dir, fwd}];
+ false -> []
+ end,
+ StartOpts = case StartKey of
+ undefined -> [];
+ <<_/binary>> -> [{start_key, StartKey}]
+ end,
+ EndOpts = case EndKey of
+ undefined -> [];
+ <<_/binary>> when InclusiveEnd -> [{end_key, EndKey}];
+ <<_/binary>> -> [{end_key_gt, EndKey}]
+ end,
+ DirOpts ++ StartOpts ++ EndOpts;
+make_opts(rev, StartKey, EndKey, InclusiveEnd) ->
+ BaseOpts = make_opts(fwd, EndKey, StartKey, InclusiveEnd),
+ [{dir, rev}] ++ BaseOpts -- [{dir, fwd}].
+
+
+all_but_last([]) ->
+ [];
+all_but_last([_]) ->
+ [];
+all_but_last(Rows) ->
+ lists:sublist(Rows, length(Rows) - 1).
+
+
+pick_range(DocIdRevs) ->
+ {StartKey, StartRow, RestRows} = pick_start_key(DocIdRevs),
+ {EndKey, EndRow, RowsBetween} = pick_end_key(RestRows),
+ {StartKey, EndKey, StartRow ++ RowsBetween ++ EndRow}.
+
+
+pick_start_key(Rows) ->
+ case rand:uniform() =< 0.1 of
+ true ->
+ {undefined, [], Rows};
+ false ->
+ Idx = rand:uniform(length(Rows)),
+ {DocId, _} = Row = lists:nth(Idx, Rows),
+ {DocId, [Row], lists:nthtail(Idx, Rows)}
+ end.
+
+
+pick_end_key([]) ->
+ {undefined, [], []};
+
+pick_end_key(Rows) ->
+ case rand:uniform() =< 0.1 of
+ true ->
+ {undefined, [], Rows};
+ false ->
+ Idx = rand:uniform(length(Rows)),
+ {DocId, _} = Row = lists:nth(Idx, Rows),
+ Tail = lists:nthtail(Idx, Rows),
+ {DocId, [Row], Rows -- [Row | Tail]}
+ end.
+
+
+fold_fun({meta, Meta}, _Acc) ->
+ Total = fabric2_util:get_value(total, Meta),
+ {ok, {Total, []}};
+fold_fun({row, Row}, {Total, Rows}) ->
+ RowId = fabric2_util:get_value(id, Row),
+ RowId = fabric2_util:get_value(key, Row),
+ RowRev = fabric2_util:get_value(value, Row),
+ {ok, {Total, [{RowId, RowRev} | Rows]}};
+fold_fun(complete, Acc) ->
+ {ok, Acc}.
diff --git a/src/fabric/test/fabric2_node_types_tests.erl b/src/fabric/test/fabric2_node_types_tests.erl
new file mode 100644
index 000000000..074afe86b
--- /dev/null
+++ b/src/fabric/test/fabric2_node_types_tests.erl
@@ -0,0 +1,66 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_node_types_tests).
+
+
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+node_types_test_() ->
+ {
+ "Test node types",
+ setup,
+ fun() ->
+ os:putenv("COUCHDB_NODE_TYPE_FOO", "false"),
+ os:putenv("COUCHDB_NODE_TYPE_BAZ", "true"),
+ os:putenv("COUCHDB_NODE_TYPE_ZIG", ""),
+ % erlfdb, rexi and mem3 are all dependent apps for fabric. We make
+ % sure to start them so when fabric is started during the test it
+ % already has its dependencies
+ test_util:start_couch([erlfdb, rexi, mem3, ctrace, fabric])
+ end,
+ fun(Ctx) ->
+ test_util:stop_couch(Ctx),
+ application:unset_env(fabric, node_types),
+ os:unsetenv("COUCHDB_NODE_TYPE_FOO"),
+ os:unsetenv("COUCHDB_NODE_TYPE_BAZ"),
+ os:unsetenv("COUCHDB_NODE_TYPE_ZIG")
+ end,
+ with([
+ ?TDEF(basics),
+ ?TDEF(os_env_priority)
+ ])
+ }.
+
+
+basics(_) ->
+ % default is true for new types
+ ?assert(fabric2_node_types:is_type(some_new_node_type)),
+
+ % defined in os env
+ ?assert(fabric2_node_types:is_type(baz)),
+ ?assert(not fabric2_node_types:is_type(foo)),
+ ?assert(fabric2_node_types:is_type(zig)),
+
+ % defined in app env
+ application:set_env(fabric, node_types, [{zag, true}, {bam, false}]),
+ ?assert(fabric2_node_types:is_type(zag)),
+ ?assert(not fabric2_node_types:is_type(bam)).
+
+
+os_env_priority(_) ->
+ % os env takes precedence
+ application:set_env(fabric, node_types, [{foo, true}, {baz, false}]),
+ ?assert(not fabric2_node_types:is_type(foo)),
+ ?assert(fabric2_node_types:is_type(baz)).
diff --git a/src/fabric/test/fabric2_rev_stemming.erl b/src/fabric/test/fabric2_rev_stemming.erl
new file mode 100644
index 000000000..62ce6901a
--- /dev/null
+++ b/src/fabric/test/fabric2_rev_stemming.erl
@@ -0,0 +1,205 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_rev_stemming).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+doc_crud_test_() ->
+ {
+ "Test document CRUD operations with stemming",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ with([
+ ?TDEF(update_doc),
+ ?TDEF(update_doc_replicated_no_stemming),
+ ?TDEF(update_doc_replicated_with_stemming),
+ ?TDEF(update_doc_replicate_existing_rev),
+ ?TDEF(update_winning_conflict_branch),
+ ?TDEF(update_non_winning_conflict_branch),
+ ?TDEF(delete_doc_basic),
+ ?TDEF(recreate_doc_basic)
+ ])
+ }
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([fabric]),
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ {Db, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+ ok = fabric2_db:delete(fabric2_db:name(Db), []),
+ test_util:stop_couch(Ctx).
+
+
+update_doc({Db, _}) ->
+ ok = fabric2_db:set_revs_limit(Db, 2),
+ Doc1 = #doc{id = fabric2_util:uuid()},
+ {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+ Doc2 = Doc1#doc{revs = {Pos1, [Rev1]}},
+ {ok, {Pos2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+ Doc3 = Doc2#doc{revs = {Pos2, [Rev2, Rev1]}},
+ ?assertEqual({ok, Doc3}, fabric2_db:open_doc(Db, Doc2#doc.id)),
+
+ {ok, {_, Rev3}} = fabric2_db:update_doc(Db, Doc3),
+ {ok, Doc4} = fabric2_db:open_doc(Db, Doc3#doc.id),
+ ?assertEqual({3, [Rev3, Rev2]}, Doc4#doc.revs).
+
+
+update_doc_replicated_no_stemming({Db, _}) ->
+ ok = fabric2_db:set_revs_limit(Db, 2),
+ Rev1 = fabric2_util:uuid(),
+ Rev2 = fabric2_util:uuid(),
+ Doc = #doc{
+ id = fabric2_util:uuid(),
+ revs = {2, [Rev2, Rev1]}
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc, [replicated_changes]),
+ {ok, #doc{revs = Revs}} = fabric2_db:open_doc(Db, Doc#doc.id),
+ ?assertEqual({2, [Rev2, Rev1]}, Revs).
+
+
+update_doc_replicated_with_stemming({Db, _}) ->
+ ok = fabric2_db:set_revs_limit(Db, 1),
+ Rev1 = fabric2_util:uuid(),
+ Rev2 = fabric2_util:uuid(),
+ Doc = #doc{
+ id = fabric2_util:uuid(),
+ revs = {2, [Rev2, Rev1]}
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc, [replicated_changes]),
+ {ok, #doc{revs = Revs}} = fabric2_db:open_doc(Db, Doc#doc.id),
+ ?assertEqual({2, [Rev2]}, Revs).
+
+
+update_doc_replicate_existing_rev({Db, _}) ->
+ ok = fabric2_db:set_revs_limit(Db, 1),
+ Rev1 = fabric2_util:uuid(),
+ Rev2 = fabric2_util:uuid(),
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ revs = {2, [Rev2, Rev1]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+ {ok, []} = fabric2_db:update_docs(Db, [Doc1], [replicated_changes]),
+ {ok, Doc} = fabric2_db:open_doc(Db, Doc1#doc.id),
+ ?assertEqual({2, [Rev2]}, Doc#doc.revs).
+
+
+update_winning_conflict_branch({Db, _}) ->
+ ok = fabric2_db:set_revs_limit(Db, 2),
+ [Rev1, Rev2, Rev3] = lists:sort([
+ fabric2_util:uuid(),
+ fabric2_util:uuid(),
+ fabric2_util:uuid()
+ ]),
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ revs = {2, [Rev3, Rev1]},
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+ Doc2 = Doc1#doc{
+ revs = {2, [Rev2, Rev1]},
+ body = {[{<<"bar">>, <<"foo">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+ % Update the winning branch
+ Doc3 = Doc1#doc{
+ revs = {2, [Rev3, Rev1]},
+ body = {[{<<"baz">>, 2}]}
+ },
+ {ok, {3, Rev4}} = fabric2_db:update_doc(Db, Doc3),
+ {ok, Doc4} = fabric2_db:open_doc(Db, Doc3#doc.id),
+ % Assert we've got the correct winner
+ ?assertEqual({3, [Rev4, Rev3]}, Doc4#doc.revs),
+ ?assertEqual(Doc3#doc{revs = undefined}, Doc4#doc{revs = undefined}).
+
+
+update_non_winning_conflict_branch({Db, _}) ->
+ ok = fabric2_db:set_revs_limit(Db, 2),
+ [Rev1, Rev2, Rev3] = lists:sort([
+ fabric2_util:uuid(),
+ fabric2_util:uuid(),
+ fabric2_util:uuid()
+ ]),
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ revs = {2, [Rev3, Rev1]},
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+ Doc2 = Doc1#doc{
+ revs = {2, [Rev2, Rev1]},
+ body = {[{<<"bar">>, <<"foo">>}]}
+ },
+ {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+ % Update the non winning branch
+ Doc3 = Doc1#doc{
+ revs = {2, [Rev2, Rev1]},
+ body = {[{<<"baz">>, 2}]}
+ },
+ {ok, {3, Rev4}} = fabric2_db:update_doc(Db, Doc3),
+ {ok, Doc4} = fabric2_db:open_doc(Db, Doc3#doc.id),
+ % Assert we've got the correct winner
+ ?assertEqual({3, [Rev4, Rev2]}, Doc4#doc.revs).
+
+
+delete_doc_basic({Db, _}) ->
+ ok = fabric2_db:set_revs_limit(Db, 1),
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ body = {[{<<"state">>, 1}]}
+ },
+ {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+ Doc2 = Doc1#doc{
+ revs = {Pos1, [Rev1]},
+ deleted = true,
+ body = {[{<<"state">>, 2}]}
+ },
+ {ok, {Pos2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+ Doc3 = Doc2#doc{revs = {Pos2, [Rev2]}},
+ ?assertEqual({ok, Doc3}, fabric2_db:open_doc(Db, Doc2#doc.id, [deleted])).
+
+
+recreate_doc_basic({Db, _}) ->
+ ok = fabric2_db:set_revs_limit(Db, 1),
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ body = {[{<<"state">>, 1}]}
+ },
+ {ok, {1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+ Doc2 = Doc1#doc{
+ revs = {1, [Rev1]},
+ deleted = true,
+ body = {[{<<"state">>, 2}]}
+ },
+ {ok, {2, _Rev2}} = fabric2_db:update_doc(Db, Doc2),
+ Doc3 = Doc1#doc{
+ revs = {0, []},
+ deleted = false,
+ body = {[{<<"state">>, 3}]}
+ },
+ {ok, {3, Rev3}} = fabric2_db:update_doc(Db, Doc3),
+ {ok, Doc4} = fabric2_db:open_doc(Db, Doc3#doc.id),
+ ?assertEqual({3, [Rev3]}, Doc4#doc.revs),
+ ?assertEqual(Doc3#doc{revs = undefined}, Doc4#doc{revs = undefined}).
diff --git a/src/fabric/test/fabric2_test.hrl b/src/fabric/test/fabric2_test.hrl
new file mode 100644
index 000000000..9239096fc
--- /dev/null
+++ b/src/fabric/test/fabric2_test.hrl
@@ -0,0 +1,33 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+% Some test modules do not use with, so squash the unused fun compiler warning
+-compile([{nowarn_unused_function, [{with, 1}]}]).
+
+
+-define(TDEF(Name), {atom_to_list(Name), fun Name/1}).
+-define(TDEF(Name, Timeout), {atom_to_list(Name), Timeout, fun Name/1}).
+
+-define(TDEF_FE(Name), fun(Arg) -> {atom_to_list(Name), ?_test(Name(Arg))} end).
+-define(TDEF_FE(Name, Timeout), fun(Arg) -> {atom_to_list(Name), {timeout, Timeout, ?_test(Name(Arg))}} end).
+
+
+with(Tests) ->
+ fun(ArgsTuple) ->
+ lists:map(fun
+ ({Name, Fun}) ->
+ {Name, ?_test(Fun(ArgsTuple))};
+ ({Name, Timeout, Fun}) ->
+ {Name, {timeout, Timeout, ?_test(Fun(ArgsTuple))}}
+ end, Tests)
+ end.
diff --git a/src/fabric/test/fabric2_test_util.erl b/src/fabric/test/fabric2_test_util.erl
new file mode 100644
index 000000000..acbe252b1
--- /dev/null
+++ b/src/fabric/test/fabric2_test_util.erl
@@ -0,0 +1,76 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_test_util).
+
+
+-export([
+ tx_too_old_mock_erlfdb/0,
+ tx_too_old_setup_errors/2,
+ tx_too_old_reset_errors/0,
+ tx_too_old_raise_in_user_fun/0
+]).
+
+
+-define(PDICT_ERROR_IN_FOLD_RANGE, '$fabric2_error_in_fold_range').
+-define(PDICT_ERROR_IN_USER_FUN, '$fabric2_error_throw_in_user_fun').
+
+
+% Set of function to test scenarios where the FDB throws transaction_too_long
+% (1007) errors. The general pattern is to call tx_too_old_mock_erlfdb() in
+% setup. Then, before tests call tx_too_old_setup_errors(UserErrs, FoldErrs)
+% which will set how and when the error will be thrown.
+
+tx_too_old_mock_erlfdb() ->
+ meck:expect(erlfdb, fold_range, fun(Tx, Start, End, Callback, Acc, Opts) ->
+ MockFun = fun(Row, InnerAcc) ->
+ maybe_tx_too_old(?PDICT_ERROR_IN_FOLD_RANGE),
+ Callback(Row, InnerAcc)
+ end,
+ meck:passthrough([Tx, Start, End, MockFun, Acc, Opts])
+ end).
+
+
+tx_too_old_setup_errors(UserCnt, FoldErrs) when is_integer(UserCnt) ->
+ tx_too_old_setup_errors({0, UserCnt}, FoldErrs);
+
+tx_too_old_setup_errors(UserErrs, FoldCnt) when is_integer(FoldCnt) ->
+ tx_too_old_setup_errors(UserErrs, {0, FoldCnt});
+
+tx_too_old_setup_errors({UserSkip, UserCnt}, {FoldSkip, FoldCnt}) ->
+ put(?PDICT_ERROR_IN_USER_FUN, {UserSkip, UserCnt}),
+ put(?PDICT_ERROR_IN_FOLD_RANGE, {FoldSkip, FoldCnt}).
+
+
+tx_too_old_reset_errors() ->
+ erase(?PDICT_ERROR_IN_FOLD_RANGE),
+ erase(?PDICT_ERROR_IN_USER_FUN).
+
+
+tx_too_old_raise_in_user_fun() ->
+ maybe_tx_too_old(?PDICT_ERROR_IN_USER_FUN).
+
+
+% Private functions
+
+maybe_tx_too_old(Key) ->
+ case get(Key) of
+ {Skip, Count} when is_integer(Skip), Skip > 0 ->
+ put(Key, {Skip - 1, Count});
+ {0, Count} when is_integer(Count), Count > 0 ->
+ put(Key, {0, Count - 1}),
+ error({erlfdb_error, 1007});
+ {0, 0} ->
+ ok;
+ undefined ->
+ ok
+ end.
diff --git a/src/fabric/test/fabric2_trace_db_create_tests.erl b/src/fabric/test/fabric2_trace_db_create_tests.erl
new file mode 100644
index 000000000..926219f6a
--- /dev/null
+++ b/src/fabric/test/fabric2_trace_db_create_tests.erl
@@ -0,0 +1,47 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_trace_db_create_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+trace_test_() ->
+ {
+ "Trace operation",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ with([
+ ?TDEF(create_db)
+ ])
+ }
+ }.
+
+
+setup() ->
+ put(erlfdb_trace, "starting fabric"),
+ test_util:start_couch([fabric]).
+
+
+cleanup(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+
+create_db(_) ->
+ put(erlfdb_trace, <<"create db">>),
+ {ok, _Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]).
diff --git a/src/fabric/test/fabric2_trace_db_delete_tests.erl b/src/fabric/test/fabric2_trace_db_delete_tests.erl
new file mode 100644
index 000000000..ac92c5335
--- /dev/null
+++ b/src/fabric/test/fabric2_trace_db_delete_tests.erl
@@ -0,0 +1,50 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_trace_db_delete_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+trace_test_() ->
+ {
+ "Trace operation",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ with([
+ ?TDEF(delete_db)
+ ])
+ }
+ }.
+
+
+setup() ->
+ put(erlfdb_trace, "starting fabric"),
+ Ctx = test_util:start_couch([fabric]),
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ {Db, Ctx}.
+
+
+cleanup({_Db, Ctx}) ->
+ test_util:stop_couch(Ctx).
+
+
+delete_db({Db, _}) ->
+ put(erlfdb_trace, <<"delete db">>),
+ fabric2_server:remove(fabric2_db:name(Db)),
+ ok = fabric2_db:delete(fabric2_db:name(Db), []).
diff --git a/src/fabric/test/fabric2_trace_db_open_tests.erl b/src/fabric/test/fabric2_trace_db_open_tests.erl
new file mode 100644
index 000000000..3602b50e1
--- /dev/null
+++ b/src/fabric/test/fabric2_trace_db_open_tests.erl
@@ -0,0 +1,51 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_trace_db_open_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+trace_test_() ->
+ {
+ "Trace operation",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ with([
+ ?TDEF(open_db)
+ ])
+ }
+ }.
+
+
+setup() ->
+ put(erlfdb_trace, "starting fabric"),
+ Ctx = test_util:start_couch([fabric]),
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ {Db, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+ ok = fabric2_db:delete(fabric2_db:name(Db), []),
+ test_util:stop_couch(Ctx).
+
+
+open_db({Db, _}) ->
+ put(erlfdb_trace, <<"open db">>),
+ fabric2_server:remove(fabric2_db:name(Db)),
+ {ok, _Db} = fabric2_db:open(fabric2_db:name(Db), [{user_ctx, ?ADMIN_USER}]).
diff --git a/src/fabric/test/fabric2_trace_doc_create_tests.erl b/src/fabric/test/fabric2_trace_doc_create_tests.erl
new file mode 100644
index 000000000..888039d05
--- /dev/null
+++ b/src/fabric/test/fabric2_trace_doc_create_tests.erl
@@ -0,0 +1,87 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_trace_doc_create_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+trace_doc_create_test_() ->
+ {
+ "Test document CRUD operations",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ with([
+ ?TDEF(create_new_doc),
+ ?TDEF(create_two_docs),
+ ?TDEF(create_50_docs)
+ ])
+ }
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([fabric]),
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ {Db, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+ ok = fabric2_db:delete(fabric2_db:name(Db), []),
+ test_util:stop_couch(Ctx).
+
+
+create_new_doc({Db, _}) ->
+ put(erlfdb_trace, <<"one doc">>),
+ Doc = #doc{
+ id = fabric2_util:uuid(),
+ body = {[{<<"foo">>, <<"bar">>}]}
+ },
+ {ok, _} = fabric2_db:update_doc(Db, Doc).
+
+
+create_two_docs({Db, _}) ->
+ put(erlfdb_trace, <<"two docs">>),
+ Doc1 = #doc{
+ id = fabric2_util:uuid(),
+ body = {[{<<"bam">>, <<"baz">>}]}
+ },
+ Doc2 = #doc{
+ id = fabric2_util:uuid(),
+ body = {[{<<"bang">>, <<"bargle">>}]}
+ },
+ {ok, _} = fabric2_db:update_docs(Db, [Doc1, Doc2]).
+
+
+create_50_docs({Db, _}) ->
+ lists:foreach(fun(_) ->
+ spawn_monitor(fun() ->
+ Name = io_lib:format("50 docs : ~w", [self()]),
+ put(erlfdb_trace, iolist_to_binary(Name)),
+ Docs = lists:map(fun(Val) ->
+ #doc{
+ id = fabric2_util:uuid(),
+ body = {[{<<"value">>, Val}]}
+ }
+ end, lists:seq(1, 50)),
+ {ok, _} = fabric2_db:update_docs(Db, Docs)
+ end)
+ end, lists:seq(1, 5)),
+ lists:foreach(fun(_) ->
+ receive {'DOWN', _, _, _, _} -> ok end
+ end, lists:seq(1, 5)).
diff --git a/src/fabric/test/fabric2_tx_options_tests.erl b/src/fabric/test/fabric2_tx_options_tests.erl
new file mode 100644
index 000000000..b93cc3d69
--- /dev/null
+++ b/src/fabric/test/fabric2_tx_options_tests.erl
@@ -0,0 +1,103 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_tx_options_tests).
+
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include("fabric2_test.hrl").
+-include("fabric2.hrl").
+
+
+fdb_tx_options_test_() ->
+ {
+ "Test setting default transaction options",
+ setup,
+ fun() ->
+ meck:new(erlfdb, [passthrough]),
+ % erlfdb, rexi and mem3 are all dependent apps for fabric. We make
+ % sure to start them so when fabric is started during the test it
+ % already has its dependencies
+ test_util:start_couch([erlfdb, rexi, mem3, ctrace, fabric])
+ end,
+ fun(Ctx) ->
+ meck:unload(),
+
+ config:delete("fdb_tx_options", "size_limit", false),
+ config:delete("fdb_tx_options", "max_retry_delay", false),
+ config:delete("fdb_tx_options", "machine_id", false),
+ config:delete("fdb_tx_options", "datacenter_id", false),
+
+ test_util:stop_couch(Ctx)
+ end,
+ with([
+ ?TDEF(options_take_effect, 15),
+ ?TDEF(can_configure_options_at_runtime, 15)
+ ])
+ }.
+
+
+options_take_effect(_) ->
+ ok = application:stop(fabric),
+
+ % Try one of each type including some invalid values
+ config:set("fdb_tx_options", "size_limit", "150000", false),
+ config:set("fdb_tx_options", "max_retry_delay", "badness", false),
+ config:set("fdb_tx_options", "machine_id", "123abc", false),
+ TooLong = ["x" || _ <- lists:seq(1, 1000)],
+ config:set("fdb_tx_options", "datacenter_id", TooLong, false),
+ ok = application:start(fabric),
+
+ DbName = ?tempdb(),
+ {ok, Db} = fabric2_db:create(DbName, [?ADMIN_CTX]),
+ ?assertError({erlfdb_error, ?TRANSACTION_TOO_LARGE},
+ add_large_doc(Db, 200000)),
+ ok = fabric2_db:delete(DbName, [?ADMIN_CTX]).
+
+
+can_configure_options_at_runtime(_) ->
+ meck:expect(erlfdb, set_option, fun(Fdb, Option, Val) ->
+ meck:passthrough([Fdb, Option, Val])
+ end),
+
+ meck:reset(erlfdb),
+
+ config:set("fdb_tx_options", "size_limit", "150000", false),
+ meck:wait(erlfdb, set_option, ['_', size_limit, 150000], 4000),
+
+ DbName = ?tempdb(),
+
+ {ok, Db} = fabric2_db:create(DbName, [?ADMIN_CTX]),
+ ?assertError({erlfdb_error, ?TRANSACTION_TOO_LARGE},
+ add_large_doc(Db, 200000)),
+
+ meck:reset(erlfdb),
+
+ config:delete("fdb_tx_options", "size_limit", false),
+ % Assert that we get a new handle and are setting our default values
+ meck:wait(erlfdb, set_option, ['_', timeout, '_'], 4000),
+ erase(?PDICT_DB_KEY),
+
+ {ok, Db1} = fabric2_db:open(DbName, [?ADMIN_CTX]),
+ ?assertMatch({ok, _}, add_large_doc(Db1, 200000)),
+
+ ok = fabric2_db:delete(DbName, [?ADMIN_CTX]).
+
+
+add_large_doc(Db, Size) ->
+ Doc = #doc{
+ id = fabric2_util:uuid(),
+ body = {[{<<"x">>, crypto:strong_rand_bytes(Size)}]}
+ },
+ fabric2_db:update_doc(Db, Doc).
diff --git a/src/fabric/test/fabric2_update_docs_tests.erl b/src/fabric/test/fabric2_update_docs_tests.erl
new file mode 100644
index 000000000..469fa0d1b
--- /dev/null
+++ b/src/fabric/test/fabric2_update_docs_tests.erl
@@ -0,0 +1,208 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_update_docs_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("fabric2_test.hrl").
+
+
+update_docs_test_() ->
+ {
+ "Test update_docs",
+ {
+ setup,
+ fun setup_all/0,
+ fun teardown_all/1,
+ {
+ foreach,
+ fun setup/0,
+ fun cleanup/1,
+ [
+ ?TDEF_FE(update_docs),
+ ?TDEF_FE(update_docs_replicated),
+ ?TDEF_FE(update_docs_batches),
+ ?TDEF_FE(update_docs_replicated_batches),
+ ?TDEF_FE(update_docs_duplicate_ids_conflict),
+ ?TDEF_FE(update_docs_duplicate_ids_with_batches),
+ ?TDEF_FE(update_docs_replicate_batches_duplicate_id)
+ ]
+ }
+ }
+ }.
+
+
+setup_all() ->
+ test_util:start_couch([fabric]).
+
+
+teardown_all(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+
+setup() ->
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ Db.
+
+
+cleanup(#{} = Db) ->
+ ok = fabric2_db:delete(fabric2_db:name(Db), []).
+
+
+update_docs(Db) ->
+ ?assertEqual({ok, []}, fabric2_db:update_docs(Db, [])),
+
+ Doc1 = doc(),
+ Res1 = fabric2_db:update_docs(Db, [Doc1]),
+ ?assertMatch({ok, [_]}, Res1),
+ {ok, [Doc1Res]} = Res1,
+ ?assertMatch({ok, {1, <<_/binary>>}}, Doc1Res),
+ {ok, {1, Rev1}} = Doc1Res,
+ {ok, Doc1Open} = fabric2_db:open_doc(Db, Doc1#doc.id),
+ ?assertEqual(Doc1#doc{revs = {1, [Rev1]}}, Doc1Open),
+
+ Doc2 = doc(),
+ Doc3 = doc(),
+ Res2 = fabric2_db:update_docs(Db, [Doc2, Doc3]),
+ ?assertMatch({ok, [_, _]}, Res2),
+ {ok, [Doc2Res, Doc3Res]} = Res2,
+ ?assertMatch({ok, {1, <<_/binary>>}}, Doc2Res),
+ ?assertMatch({ok, {1, <<_/binary>>}}, Doc3Res).
+
+
+update_docs_replicated(Db) ->
+ Opts = [replicated_changes],
+
+ ?assertEqual({ok, []}, fabric2_db:update_docs(Db, [], Opts)),
+
+ Doc1 = doc(10, {1, [rev()]}),
+ ?assertMatch({ok, []}, fabric2_db:update_docs(Db, [Doc1], Opts)),
+ {ok, Doc1Open} = fabric2_db:open_doc(Db, Doc1#doc.id),
+ ?assertEqual(Doc1, Doc1Open),
+
+ Doc2 = doc(10, {1, [rev()]}),
+ Doc3 = doc(10, {1, [rev()]}),
+ ?assertMatch({ok, []}, fabric2_db:update_docs(Db, [Doc2, Doc3], Opts)),
+ {ok, Doc2Open} = fabric2_db:open_doc(Db, Doc2#doc.id),
+ ?assertEqual(Doc2, Doc2Open),
+ {ok, Doc3Open} = fabric2_db:open_doc(Db, Doc3#doc.id),
+ ?assertEqual(Doc3, Doc3Open).
+
+
+update_docs_batches(Db) ->
+ Opts = [{batch_size, 5000}],
+
+ Docs1 = [doc(9000), doc(9000)],
+
+ ?assertMatch({ok, [_ | _]}, fabric2_db:update_docs(Db, Docs1, Opts)),
+
+ lists:foreach(fun(#doc{} = Doc) ->
+ ?assertMatch({ok, #doc{}}, fabric2_db:open_doc(Db, Doc#doc.id))
+ end, Docs1),
+
+ Docs2 = [doc(10), doc(10), doc(9000), doc(10)],
+
+ ?assertMatch({ok, [_ | _]}, fabric2_db:update_docs(Db, Docs2, Opts)),
+
+ lists:foreach(fun(#doc{} = Doc) ->
+ ?assertMatch({ok, #doc{}}, fabric2_db:open_doc(Db, Doc#doc.id))
+ end, Docs2).
+
+
+update_docs_replicated_batches(Db) ->
+ Opts = [{batch_size, 5000}, replicated_changes],
+
+ Docs1 = [doc(Size, {1, [rev()]}) || Size <- [9000, 9000]],
+
+ ?assertMatch({ok, []}, fabric2_db:update_docs(Db, Docs1, Opts)),
+
+ lists:foreach(fun(#doc{} = Doc) ->
+ ?assertEqual({ok, Doc}, fabric2_db:open_doc(Db, Doc#doc.id))
+ end, Docs1),
+
+ Docs2 = [doc(Size, {1, [rev()]}) || Size <- [10, 10, 9000, 10]],
+
+ ?assertMatch({ok, []}, fabric2_db:update_docs(Db, Docs2, Opts)),
+
+ lists:foreach(fun(#doc{} = Doc) ->
+ ?assertEqual({ok, Doc}, fabric2_db:open_doc(Db, Doc#doc.id))
+ end, Docs2).
+
+
+update_docs_duplicate_ids_conflict(Db) ->
+ Doc = doc(),
+
+ Res = fabric2_db:update_docs(Db, [Doc, doc(), Doc]),
+ ?assertMatch({ok, [_, _, _]}, Res),
+
+ {ok, [Doc1Res, Doc2Res, Doc3Res]} = Res,
+ ?assertMatch({ok, {1, <<_/binary>>}}, Doc1Res),
+ ?assertMatch({ok, {1, <<_/binary>>}}, Doc2Res),
+ ?assertMatch(conflict, Doc3Res).
+
+
+update_docs_duplicate_ids_with_batches(Db) ->
+ Opts = [{batch_size, 5000}],
+
+ Doc = doc(9000),
+
+ Res = fabric2_db:update_docs(Db, [Doc, doc(9000), Doc], Opts),
+ ?assertMatch({ok, [_, _, _]}, Res),
+
+ {ok, [Doc1Res, Doc2Res, Doc3Res]} = Res,
+ ?assertMatch({ok, {1, <<_/binary>>}}, Doc1Res),
+ ?assertMatch({ok, {1, <<_/binary>>}}, Doc2Res),
+ ?assertMatch(conflict, Doc3Res).
+
+
+update_docs_replicate_batches_duplicate_id(Db) ->
+ Opts = [replicated_changes],
+
+ Doc = doc(10, {1, [rev()]}),
+ Docs = [Doc, Doc],
+
+ ?assertMatch({ok, []}, fabric2_db:update_docs(Db, Docs, Opts)),
+
+ ?assertEqual({ok, Doc}, fabric2_db:open_doc(Db, Doc#doc.id)).
+
+
+% Utility functions
+
+doc() ->
+ doc(2).
+
+
+doc(Size) ->
+ doc(Size, undefined).
+
+
+doc(Size, Revs) ->
+ Doc = #doc{
+ id = fabric2_util:uuid(),
+ body = doc_body(Size)
+ },
+ case Revs of
+ undefined -> Doc;
+ _ -> Doc#doc{revs = Revs}
+ end.
+
+
+rev() ->
+ fabric2_util:to_hex(crypto:strong_rand_bytes(16)).
+
+
+doc_body(Size) when is_integer(Size), Size >= 2 ->
+ Val = fabric2_util:to_hex(crypto:strong_rand_bytes(Size div 2)),
+ {[{<<"x">>, Val}]}.
diff --git a/src/global_changes/src/global_changes_httpd_handlers.erl b/src/global_changes/src/global_changes_httpd_handlers.erl
index b21a64b8f..94a50abc8 100644
--- a/src/global_changes/src/global_changes_httpd_handlers.erl
+++ b/src/global_changes/src/global_changes_httpd_handlers.erl
@@ -12,7 +12,7 @@
-module(global_changes_httpd_handlers).
--export([url_handler/1, db_handler/1, design_handler/1]).
+-export([url_handler/1, db_handler/1, design_handler/1, handler_info/3]).
url_handler(<<"_db_updates">>) -> fun global_changes_httpd:handle_global_changes_req/1;
url_handler(_) -> no_match.
@@ -20,3 +20,9 @@ url_handler(_) -> no_match.
db_handler(_) -> no_match.
design_handler(_) -> no_match.
+
+handler_info('GET', [<<"_db_updates">>], _) ->
+ {'db_updates.read', #{}};
+
+handler_info(_, _, _) ->
+ no_match. \ No newline at end of file
diff --git a/src/global_changes/src/global_changes_server.erl b/src/global_changes/src/global_changes_server.erl
index 7e3062586..a116e0668 100644
--- a/src/global_changes/src/global_changes_server.erl
+++ b/src/global_changes/src/global_changes_server.erl
@@ -25,7 +25,8 @@
handle_call/3,
handle_cast/2,
handle_info/2,
- code_change/3
+ code_change/3,
+ format_status/2
]).
-export([
@@ -143,7 +144,13 @@ handle_info(_, State) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
+format_status(_Opt, [_PDict, State]) ->
+ Scrubbed = State#state{
+ pending_updates=nil
+ },
+ [{data, [{"State",
+ ?record_to_keyval(state, Scrubbed)
+ }]}].
flush_updates(State) ->
DocIds = sets:to_list(State#state.pending_updates),
diff --git a/src/ken/src/ken_server.erl b/src/ken/src/ken_server.erl
index b33d01f35..74c8e25ac 100644
--- a/src/ken/src/ken_server.erl
+++ b/src/ken/src/ken_server.erl
@@ -16,7 +16,9 @@
-behaviour(gen_server).
-vsn(1).
-export([init/1, terminate/2]).
--export([handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
+-export([
+ handle_call/3, handle_cast/2, handle_info/2, code_change/3,format_status/2
+]).
% Public interface
-export([start_link/0]).
@@ -228,6 +230,18 @@ handle_info(Msg, State) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
+
+format_status(_Opt, [_PDict, State]) ->
+ #state{
+ q = Queue
+ } = State,
+ Scrubbed = State#state{
+ q = {queue_length, queue:len(Queue)}
+ },
+ [{data, [{"State",
+ ?record_to_keyval(state, Scrubbed)
+ }]}].
+
%% private functions
maybe_start_next_queued_job(#state{dbworker = {_,_}} = State) ->
diff --git a/src/mango/src/mango_crud.erl b/src/mango/src/mango_crud.erl
index 41a4d143d..66cef65b3 100644
--- a/src/mango/src/mango_crud.erl
+++ b/src/mango/src/mango_crud.erl
@@ -33,10 +33,8 @@ insert(Db, #doc{}=Doc, Opts) ->
insert(Db, [Doc], Opts);
insert(Db, {_}=Doc, Opts) ->
insert(Db, [Doc], Opts);
-insert(Db, Docs, Opts0) when is_list(Docs) ->
- Opts1 = maybe_add_user_ctx(Db, Opts0),
- Opts2 = maybe_int_to_str(w, Opts1),
- case fabric:update_docs(Db, Docs, Opts2) of
+insert(Db, Docs, Opts) when is_list(Docs) ->
+ case fabric2_db:update_docs(Db, Docs, Opts) of
{ok, Results0} ->
{ok, lists:zipwith(fun result_to_json/2, Docs, Results0)};
{accepted, Results0} ->
@@ -46,10 +44,8 @@ insert(Db, Docs, Opts0) when is_list(Docs) ->
end.
-find(Db, Selector, Callback, UserAcc, Opts0) ->
- Opts1 = maybe_add_user_ctx(Db, Opts0),
- Opts2 = maybe_int_to_str(r, Opts1),
- {ok, Cursor} = mango_cursor:create(Db, Selector, Opts2),
+find(Db, Selector, Callback, UserAcc, Opts) ->
+ {ok, Cursor} = mango_cursor:create(Db, Selector, Opts),
mango_cursor:execute(Cursor, Callback, UserAcc).
@@ -99,30 +95,11 @@ delete(Db, Selector, Options) ->
end.
-explain(Db, Selector, Opts0) ->
- Opts1 = maybe_add_user_ctx(Db, Opts0),
- Opts2 = maybe_int_to_str(r, Opts1),
- {ok, Cursor} = mango_cursor:create(Db, Selector, Opts2),
+explain(Db, Selector, Opts) ->
+ {ok, Cursor} = mango_cursor:create(Db, Selector, Opts),
mango_cursor:explain(Cursor).
-maybe_add_user_ctx(Db, Opts) ->
- case lists:keyfind(user_ctx, 1, Opts) of
- {user_ctx, _} ->
- Opts;
- false ->
- [{user_ctx, couch_db:get_user_ctx(Db)} | Opts]
- end.
-
-
-maybe_int_to_str(_Key, []) ->
- [];
-maybe_int_to_str(Key, [{Key, Val} | Rest]) when is_integer(Val) ->
- [{Key, integer_to_list(Val)} | maybe_int_to_str(Key, Rest)];
-maybe_int_to_str(Key, [KV | Rest]) ->
- [KV | maybe_int_to_str(Key, Rest)].
-
-
result_to_json(#doc{id=Id}, Result) ->
result_to_json(Id, Result);
result_to_json({Props}, Result) ->
diff --git a/src/mango/src/mango_cursor.erl b/src/mango/src/mango_cursor.erl
index b1cb4148e..63b449cdc 100644
--- a/src/mango/src/mango_cursor.erl
+++ b/src/mango/src/mango_cursor.erl
@@ -48,7 +48,9 @@
create(Db, Selector0, Opts) ->
Selector = mango_selector:normalize(Selector0),
- UsableIndexes = mango_idx:get_usable_indexes(Db, Selector, Opts),
+ UsableIndexes = fabric2_fdb:transactional(Db, fun (TxDb) ->
+ mango_idx:get_usable_indexes(TxDb, Selector, Opts)
+ end),
case mango_cursor:maybe_filter_indexes_by_ddoc(UsableIndexes, Opts) of
[] ->
% use_index doesn't match a valid index - fall back to a valid one
@@ -72,7 +74,6 @@ explain(#cursor{}=Cursor) ->
{[
{dbname, mango_idx:dbname(Idx)},
{index, mango_idx:to_json(Idx)},
- {partitioned, mango_idx:partitioned(Idx)},
{selector, Selector},
{opts, {Opts}},
{limit, Limit},
@@ -206,12 +207,9 @@ invalid_index_warning_int(_, _) ->
% returned, implying a lot of in-memory filtering
index_scan_warning(#execution_stats {
totalDocsExamined = Docs,
- totalQuorumDocsExamined = DocsQuorum,
resultsReturned = ResultCount
}) ->
- % Docs and DocsQuorum are mutually exclusive so it's safe to sum them
- DocsScanned = Docs + DocsQuorum,
- Ratio = calculate_index_scan_ratio(DocsScanned, ResultCount),
+ Ratio = calculate_index_scan_ratio(Docs, ResultCount),
Threshold = config:get_integer("mango", "index_scan_warning_threshold", 10),
case Threshold > 0 andalso Ratio > Threshold of
true ->
diff --git a/src/mango/src/mango_cursor_text.erl b/src/mango/src/mango_cursor_text.erl
index 43ef84e4c..ccf58ad6e 100644
--- a/src/mango/src/mango_cursor_text.erl
+++ b/src/mango/src/mango_cursor_text.erl
@@ -77,7 +77,6 @@ explain(Cursor) ->
} = Cursor,
[
{'query', mango_selector_text:convert(Selector)},
- {partition, get_partition(Opts, null)},
{sort, sort_query(Opts, Selector)}
].
@@ -95,7 +94,6 @@ execute(Cursor, UserFun, UserAcc) ->
Query = mango_selector_text:convert(Selector),
QueryArgs = #index_query_args{
q = Query,
- partition = get_partition(Opts, nil),
sort = sort_query(Opts, Selector),
raw_bookmark = true
},
@@ -250,13 +248,6 @@ sort_query(Opts, Selector) ->
end.
-get_partition(Opts, Default) ->
- case couch_util:get_value(partition, Opts) of
- <<>> -> Default;
- Else -> Else
- end.
-
-
get_bookmark(Opts) ->
case lists:keyfind(bookmark, 1, Opts) of
{_, BM} when is_list(BM), BM /= [] ->
diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl
index 240ef501d..4960fa126 100644
--- a/src/mango/src/mango_cursor_view.erl
+++ b/src/mango/src/mango_cursor_view.erl
@@ -19,7 +19,6 @@
]).
-export([
- view_cb/2,
handle_message/2,
handle_all_docs_message/2,
composite_indexes/2,
@@ -32,9 +31,7 @@
-include_lib("fabric/include/fabric.hrl").
-include("mango_cursor.hrl").
--include("mango_idx_view.hrl").
--define(HEARTBEAT_INTERVAL_IN_USEC, 4000000).
create(Db, Indexes, Selector, Opts) ->
FieldRanges = mango_idx_view:field_ranges(Selector),
@@ -73,7 +70,6 @@ explain(Cursor) ->
{include_docs, Args#mrargs.include_docs},
{view_type, Args#mrargs.view_type},
{reduce, Args#mrargs.reduce},
- {partition, couch_mrview_util:get_extra(Args, partition, null)},
{start_key, maybe_replace_max_json(Args#mrargs.start_key)},
{end_key, maybe_replace_max_json(Args#mrargs.end_key)},
{direction, Args#mrargs.direction},
@@ -93,7 +89,8 @@ maybe_replace_max_json(?MAX_STR) ->
<<"<MAX>">>;
maybe_replace_max_json([H | T] = EndKey) when is_list(EndKey) ->
- H1 = if H == ?MAX_JSON_OBJ -> <<"<MAX>">>;
+ MAX_VAL = couch_views_encoding:max(),
+ H1 = if H == MAX_VAL -> <<"<MAX>">>;
true -> H
end,
[H1 | maybe_replace_max_json(T)];
@@ -102,7 +99,7 @@ maybe_replace_max_json(EndKey) ->
EndKey.
-base_args(#cursor{index = Idx, selector = Selector} = Cursor) ->
+base_args(#cursor{index = Idx} = Cursor) ->
{StartKey, EndKey} = case Cursor#cursor.ranges of
[empty] ->
{null, null};
@@ -115,8 +112,7 @@ base_args(#cursor{index = Idx, selector = Selector} = Cursor) ->
reduce = false,
start_key = StartKey,
end_key = EndKey,
- include_docs = true,
- extra = [{callback, {?MODULE, view_cb}}, {selector, Selector}]
+ include_docs = true
}.
@@ -135,18 +131,19 @@ execute(#cursor{db = Db, index = Idx, execution_stats = Stats} = Cursor0, UserFu
#cursor{opts = Opts, bookmark = Bookmark} = Cursor,
Args0 = apply_opts(Opts, BaseArgs),
Args = mango_json_bookmark:update_args(Bookmark, Args0),
- UserCtx = couch_util:get_value(user_ctx, Opts, #user_ctx{}),
- DbOpts = [{user_ctx, UserCtx}],
Result = case mango_idx:def(Idx) of
all_docs ->
CB = fun ?MODULE:handle_all_docs_message/2,
- fabric:all_docs(Db, DbOpts, CB, Cursor, Args);
+ AllDocOpts = fabric2_util:all_docs_view_opts(Args)
+ ++ [{restart_tx, true}],
+ fabric2_db:fold_docs(Db, CB, Cursor, AllDocOpts);
_ ->
CB = fun ?MODULE:handle_message/2,
% Normal view
- DDoc = ddocid(Idx),
+ DDocId = mango_idx:ddoc(Idx),
+ {ok, DDoc} = fabric2_db:open_doc(Db, DDocId),
Name = mango_idx:name(Idx),
- fabric:query_view(Db, DbOpts, DDoc, Name, CB, Cursor, Args)
+ couch_views:query(Db, DDoc, Name, CB, Cursor, Args)
end,
case Result of
{ok, LastCursor} ->
@@ -227,70 +224,10 @@ choose_best_index(_DbName, IndexRanges) ->
{SelectedIndex, SelectedIndexRanges}.
-view_cb({meta, Meta}, Acc) ->
- % Map function starting
- put(mango_docs_examined, 0),
- set_mango_msg_timestamp(),
- ok = rexi:stream2({meta, Meta}),
- {ok, Acc};
-view_cb({row, Row}, #mrargs{extra = Options} = Acc) ->
- ViewRow = #view_row{
- id = couch_util:get_value(id, Row),
- key = couch_util:get_value(key, Row),
- doc = couch_util:get_value(doc, Row)
- },
- case ViewRow#view_row.doc of
- null ->
- maybe_send_mango_ping();
- undefined ->
- % include_docs=false. Use quorum fetch at coordinator
- ok = rexi:stream2(ViewRow),
- set_mango_msg_timestamp();
- Doc ->
- put(mango_docs_examined, get(mango_docs_examined) + 1),
- Selector = couch_util:get_value(selector, Options),
- couch_stats:increment_counter([mango, docs_examined]),
- case mango_selector:match(Selector, Doc) of
- true ->
- ok = rexi:stream2(ViewRow),
- set_mango_msg_timestamp();
- false ->
- maybe_send_mango_ping()
- end
- end,
- {ok, Acc};
-view_cb(complete, Acc) ->
- % Send shard-level execution stats
- ok = rexi:stream2({execution_stats, {docs_examined, get(mango_docs_examined)}}),
- % Finish view output
- ok = rexi:stream_last(complete),
- {ok, Acc};
-view_cb(ok, ddoc_updated) ->
- rexi:reply({ok, ddoc_updated}).
-
-
-maybe_send_mango_ping() ->
- Current = os:timestamp(),
- LastPing = get(mango_last_msg_timestamp),
- % Fabric will timeout if it has not heard a response from a worker node
- % after 5 seconds. Send a ping every 4 seconds so the timeout doesn't happen.
- case timer:now_diff(Current, LastPing) > ?HEARTBEAT_INTERVAL_IN_USEC of
- false ->
- ok;
- true ->
- rexi:ping(),
- set_mango_msg_timestamp()
- end.
-
-
-set_mango_msg_timestamp() ->
- put(mango_last_msg_timestamp, os:timestamp()).
-
-
handle_message({meta, _}, Cursor) ->
{ok, Cursor};
handle_message({row, Props}, Cursor) ->
- case doc_member(Cursor, Props) of
+ case match_doc(Cursor, Props) of
{ok, Doc, {execution_stats, Stats}} ->
Cursor1 = Cursor#cursor {
execution_stats = Stats
@@ -343,29 +280,8 @@ handle_doc(C, _Doc) ->
{stop, C}.
-ddocid(Idx) ->
- case mango_idx:ddoc(Idx) of
- <<"_design/", Rest/binary>> ->
- Rest;
- Else ->
- Else
- end.
-
-
apply_opts([], Args) ->
Args;
-apply_opts([{r, RStr} | Rest], Args) ->
- IncludeDocs = case list_to_integer(RStr) of
- 1 ->
- true;
- R when R > 1 ->
- % We don't load the doc in the view query because
- % we have to do a quorum read in the coordinator
- % so there's no point.
- false
- end,
- NewArgs = Args#mrargs{include_docs = IncludeDocs},
- apply_opts(Rest, NewArgs);
apply_opts([{conflicts, true} | Rest], Args) ->
NewArgs = Args#mrargs{conflicts = true},
apply_opts(Rest, NewArgs);
@@ -410,51 +326,23 @@ apply_opts([{update, false} | Rest], Args) ->
update = false
},
apply_opts(Rest, NewArgs);
-apply_opts([{partition, <<>>} | Rest], Args) ->
- apply_opts(Rest, Args);
-apply_opts([{partition, Partition} | Rest], Args) when is_binary(Partition) ->
- NewArgs = couch_mrview_util:set_extra(Args, partition, Partition),
- apply_opts(Rest, NewArgs);
apply_opts([{_, _} | Rest], Args) ->
% Ignore unknown options
apply_opts(Rest, Args).
-doc_member(Cursor, RowProps) ->
- Db = Cursor#cursor.db,
- Opts = Cursor#cursor.opts,
- ExecutionStats = Cursor#cursor.execution_stats,
- Selector = Cursor#cursor.selector,
- case couch_util:get_value(doc, RowProps) of
- {DocProps} ->
- % only matching documents are returned; the selector
- % is evaluated at the shard level in view_cb({row, Row},
- {ok, {DocProps}, {execution_stats, ExecutionStats}};
- undefined ->
- % an undefined doc was returned, indicating we should
- % perform a quorum fetch
- ExecutionStats1 = mango_execution_stats:incr_quorum_docs_examined(ExecutionStats),
- couch_stats:increment_counter([mango, quorum_docs_examined]),
- Id = couch_util:get_value(id, RowProps),
- case mango_util:defer(fabric, open_doc, [Db, Id, Opts]) of
- {ok, #doc{}=DocProps} ->
- Doc = couch_doc:to_json_obj(DocProps, []),
- match_doc(Selector, Doc, ExecutionStats1);
- Else ->
- Else
- end;
- _ ->
- % no doc, no match
- {no_match, null, {execution_stats, ExecutionStats}}
- end.
-
-
-match_doc(Selector, Doc, ExecutionStats) ->
+match_doc(Cursor, RowProps) ->
+ #cursor{
+ execution_stats = Stats0,
+ selector = Selector
+ } = Cursor,
+ Stats1 = mango_execution_stats:incr_docs_examined(Stats0, 1),
+ Doc = couch_util:get_value(doc, RowProps),
case mango_selector:match(Selector, Doc) of
true ->
- {ok, Doc, {execution_stats, ExecutionStats}};
+ {ok, Doc, {execution_stats, Stats1}};
false ->
- {no_match, Doc, {execution_stats, ExecutionStats}}
+ {no_match, Doc, {execution_stats, Stats1}}
end.
@@ -474,34 +362,3 @@ update_bookmark_keys(#cursor{limit = Limit} = Cursor, Props) when Limit > 0 ->
};
update_bookmark_keys(Cursor, _Props) ->
Cursor.
-
-
-%%%%%%%% module tests below %%%%%%%%
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-
-does_not_refetch_doc_with_value_test() ->
- Cursor = #cursor {
- db = <<"db">>,
- opts = [],
- execution_stats = #execution_stats{},
- selector = mango_selector:normalize({[{<<"user_id">>, <<"1234">>}]})
- },
- RowProps = [
- {id,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>},
- {key,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>},
- {doc,{
- [
- {<<"_id">>,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>},
- {<<"_rev">>,<<"1-a954fe2308f14307756067b0e18c2968">>},
- {<<"user_id">>,11}
- ]
- }}
- ],
- {Match, _, _} = doc_member(Cursor, RowProps),
- ?assertEqual(Match, ok).
-
-
--endif.
diff --git a/src/mango/src/mango_epi.erl b/src/mango/src/mango_epi.erl
index 1fcd05b7f..d593d6371 100644
--- a/src/mango/src/mango_epi.erl
+++ b/src/mango/src/mango_epi.erl
@@ -33,7 +33,9 @@ providers() ->
].
services() ->
- [].
+ [
+ {mango, mango_plugin}
+ ].
data_subscriptions() ->
[].
diff --git a/src/mango/src/mango_error.erl b/src/mango/src/mango_error.erl
index bb545ad67..9ac8f6368 100644
--- a/src/mango/src/mango_error.erl
+++ b/src/mango/src/mango_error.erl
@@ -28,13 +28,6 @@ info(mango_idx, {no_usable_index, missing_sort_index}) ->
<<"No index exists for this sort, "
"try indexing by the sort fields.">>
};
-info(mango_idx, {no_usable_index, missing_sort_index_partitioned}) ->
- {
- 400,
- <<"no_usable_index">>,
- <<"No partitioned index exists for this sort, "
- "try indexing by the sort fields.">>
- };
info(mango_idx, {no_usable_index, missing_sort_index_global}) ->
{
400,
@@ -118,13 +111,6 @@ info(mango_idx, {invalid_index_type, BadType}) ->
<<"invalid_index">>,
fmt("Invalid type for index: ~s", [BadType])
};
-info(mango_idx, {partitioned_option_mismatch, BadDDoc}) ->
- {
- 400,
- <<"invalid_partitioned_option">>,
- fmt("Requested partitioned option does not match existing value on"
- " design document ~s", [BadDDoc])
- };
info(mango_idx, invalid_query_ddoc_language) ->
{
400,
diff --git a/src/mango/src/mango_eval.erl b/src/mango/src/mango_eval.erl
new file mode 100644
index 000000000..59d784b49
--- /dev/null
+++ b/src/mango/src/mango_eval.erl
@@ -0,0 +1,115 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-module(mango_eval).
+-behavior(couch_eval).
+
+
+-export([
+ acquire_map_context/1,
+ release_map_context/1,
+ map_docs/2
+]).
+
+
+-export([
+ index_doc/2
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include("mango_idx.hrl").
+
+
+acquire_map_context(Opts) ->
+ #{
+ db_name := DbName,
+ ddoc_id := DDocId,
+ map_funs := MapFuns
+ } = Opts,
+ Indexes = lists:map(fun (Def) ->
+ #idx{
+ type = <<"json">>,
+ dbname = DbName,
+ ddoc = DDocId,
+ def = Def
+ }
+ end, MapFuns),
+ {ok, Indexes}.
+
+
+release_map_context(_) ->
+ ok.
+
+
+map_docs(Indexes, Docs) ->
+ {ok, lists:map(fun(Doc) ->
+ Json = couch_doc:to_json_obj(Doc, []),
+ Results = index_doc(Indexes, Json),
+ {Doc#doc.id, Results}
+ end, Docs)}.
+
+
+index_doc(Indexes, Doc) ->
+ lists:map(fun(Idx) ->
+ {IdxDef} = mango_idx:def(Idx),
+ Results = get_index_entries(IdxDef, Doc),
+ case lists:member(not_found, Results) of
+ true ->
+ [];
+ false ->
+ [{Results, null}]
+ end
+ end, Indexes).
+
+
+get_index_entries(IdxDef, Doc) ->
+ {Fields} = couch_util:get_value(<<"fields">>, IdxDef),
+ Selector = get_index_partial_filter_selector(IdxDef),
+ case should_index(Selector, Doc) of
+ false ->
+ [not_found];
+ true ->
+ get_index_values(Fields, Doc)
+ end.
+
+
+get_index_values(Fields, Doc) ->
+ lists:map(fun({Field, _Dir}) ->
+ case mango_doc:get_field(Doc, Field) of
+ not_found -> not_found;
+ bad_path -> not_found;
+ Value -> Value
+ end
+ end, Fields).
+
+
+get_index_partial_filter_selector(IdxDef) ->
+ case couch_util:get_value(<<"partial_filter_selector">>, IdxDef, {[]}) of
+ {[]} ->
+ % this is to support legacy text indexes that had the
+ % partial_filter_selector set as selector
+ couch_util:get_value(<<"selector">>, IdxDef, {[]});
+ Else ->
+ Else
+ end.
+
+
+should_index(Selector, Doc) ->
+ NormSelector = mango_selector:normalize(Selector),
+ Matches = mango_selector:match(NormSelector, Doc),
+ IsDesign = case mango_doc:get_field(Doc, <<"_id">>) of
+ <<"_design/", _/binary>> -> true;
+ _ -> false
+ end,
+ Matches and not IsDesign.
diff --git a/src/mango/src/mango_execution_stats.erl b/src/mango/src/mango_execution_stats.erl
index 5878a3190..fe9d27b90 100644
--- a/src/mango/src/mango_execution_stats.erl
+++ b/src/mango/src/mango_execution_stats.erl
@@ -18,7 +18,6 @@
incr_keys_examined/1,
incr_docs_examined/1,
incr_docs_examined/2,
- incr_quorum_docs_examined/1,
incr_results_returned/1,
log_start/1,
log_end/1,
@@ -33,7 +32,6 @@ to_json(Stats) ->
{[
{total_keys_examined, Stats#execution_stats.totalKeysExamined},
{total_docs_examined, Stats#execution_stats.totalDocsExamined},
- {total_quorum_docs_examined, Stats#execution_stats.totalQuorumDocsExamined},
{results_returned, Stats#execution_stats.resultsReturned},
{execution_time_ms, Stats#execution_stats.executionTimeMs}
]}.
@@ -55,12 +53,6 @@ incr_docs_examined(Stats, N) ->
}.
-incr_quorum_docs_examined(Stats) ->
- Stats#execution_stats {
- totalQuorumDocsExamined = Stats#execution_stats.totalQuorumDocsExamined + 1
- }.
-
-
incr_results_returned(Stats) ->
couch_stats:increment_counter([mango, results_returned]),
Stats#execution_stats {
diff --git a/src/mango/src/mango_execution_stats.hrl b/src/mango/src/mango_execution_stats.hrl
index ea5ed5ee8..783c1e7f9 100644
--- a/src/mango/src/mango_execution_stats.hrl
+++ b/src/mango/src/mango_execution_stats.hrl
@@ -13,7 +13,6 @@
-record(execution_stats, {
totalKeysExamined = 0,
totalDocsExamined = 0,
- totalQuorumDocsExamined = 0,
resultsReturned = 0,
executionStartTime,
executionTimeMs
diff --git a/src/mango/src/mango_httpd.erl b/src/mango/src/mango_httpd.erl
index 379d2e127..8d5a2123d 100644
--- a/src/mango/src/mango_httpd.erl
+++ b/src/mango/src/mango_httpd.erl
@@ -32,9 +32,8 @@
threshold = 1490
}).
-handle_req(#httpd{} = Req, Db0) ->
+handle_req(#httpd{} = Req, Db) ->
try
- Db = set_user_ctx(Req, Db0),
handle_req_int(Req, Db)
catch
throw:{mango_error, Module, Reason} ->
@@ -61,7 +60,9 @@ handle_req_int(_, _) ->
handle_index_req(#httpd{method='GET', path_parts=[_, _]}=Req, Db) ->
Params = lists:flatmap(fun({K, V}) -> parse_index_param(K, V) end,
chttpd:qs(Req)),
- Idxs = lists:sort(mango_idx:list(Db)),
+ Idxs = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ lists:sort(mango_idx:list(TxDb))
+ end),
JsonIdxs0 = lists:map(fun mango_idx:to_json/1, Idxs),
TotalRows = length(JsonIdxs0),
Limit = case couch_util:get_value(limit, Params, TotalRows) of
@@ -87,26 +88,27 @@ handle_index_req(#httpd{method='POST', path_parts=[_, _]}=Req, Db) ->
{ok, Idx0} = mango_idx:new(Db, Opts),
{ok, Idx} = mango_idx:validate_new(Idx0, Db),
DbOpts = [{user_ctx, Req#httpd.user_ctx}, deleted, ejson_body],
- {ok, DDoc} = mango_util:load_ddoc(Db, mango_idx:ddoc(Idx), DbOpts),
- Id = Idx#idx.ddoc,
- Name = Idx#idx.name,
- Status = case mango_idx:add(DDoc, Idx) of
- {ok, DDoc} ->
- <<"exists">>;
- {ok, NewDDoc} ->
- CreateOpts = get_idx_w_opts(Opts),
- case mango_crud:insert(Db, NewDDoc, CreateOpts) of
- {ok, [{RespProps}]} ->
- case lists:keyfind(error, 1, RespProps) of
- {error, Reason} ->
- ?MANGO_ERROR({error_saving_ddoc, Reason});
- _ ->
- <<"created">>
- end;
- _ ->
- ?MANGO_ERROR(error_saving_ddoc)
- end
- end,
+ Id = mango_idx:ddoc(Idx),
+ Name = mango_idx:name(Idx),
+ Status = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ {ok, DDoc} = mango_util:load_ddoc(TxDb, Id, DbOpts),
+ case mango_idx:add(DDoc, Idx) of
+ {ok, DDoc} ->
+ <<"exists">>;
+ {ok, NewDDoc} ->
+ case mango_crud:insert(TxDb, NewDDoc, Opts) of
+ {ok, [{RespProps}]} ->
+ case lists:keyfind(error, 1, RespProps) of
+ {error, Reason} ->
+ ?MANGO_ERROR({error_saving_ddoc, Reason});
+ _ ->
+ <<"created">>
+ end;
+ _ ->
+ ?MANGO_ERROR(error_saving_ddoc)
+ end
+ end
+ end),
chttpd:send_json(Req, {[{result, Status}, {id, Id}, {name, Name}]});
handle_index_req(#httpd{path_parts=[_, _]}=Req, _Db) ->
@@ -119,20 +121,21 @@ handle_index_req(#httpd{method='POST', path_parts=[_, <<"_index">>,
<<"_bulk_delete">>]}=Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
{ok, Opts} = mango_opts:validate_bulk_delete(chttpd:json_body_obj(Req)),
- Idxs = mango_idx:list(Db),
- DDocs = get_bulk_delete_ddocs(Opts),
- DelOpts = get_idx_w_opts(Opts),
- {Success, Fail} = lists:foldl(fun(DDocId0, {Success0, Fail0}) ->
- DDocId = convert_to_design_id(DDocId0),
- Filt = fun(Idx) -> mango_idx:ddoc(Idx) == DDocId end,
- Id = {<<"id">>, DDocId},
- case mango_idx:delete(Filt, Db, Idxs, DelOpts) of
- {ok, true} ->
- {[{[Id, {<<"ok">>, true}]} | Success0], Fail0};
- {error, Error} ->
- {Success0, [{[Id, {<<"error">>, Error}]} | Fail0]}
- end
- end, {[], []}, DDocs),
+ {Success, Fail} = fabric2_fdb:transactional(Db, fun (TxDb) ->
+ Idxs = mango_idx:list(TxDb),
+ DDocs = get_bulk_delete_ddocs(Opts),
+ lists:foldl(fun(DDocId0, {Success0, Fail0}) ->
+ DDocId = convert_to_design_id(DDocId0),
+ Filt = fun(Idx) -> mango_idx:ddoc(Idx) == DDocId end,
+ Id = {<<"id">>, DDocId},
+ case mango_idx:delete(Filt, TxDb, Idxs, Opts) of
+ {ok, true} ->
+ {[{[Id, {<<"ok">>, true}]} | Success0], Fail0};
+ {error, Error} ->
+ {Success0, [{[Id, {<<"error">>, Error}]} | Fail0]}
+ end
+ end, {[], []}, DDocs)
+ end),
chttpd:send_json(Req, {[{<<"success">>, Success}, {<<"fail">>, Fail}]});
handle_index_req(#httpd{path_parts=[_, <<"_index">>,
@@ -146,16 +149,18 @@ handle_index_req(#httpd{method='DELETE',
handle_index_req(#httpd{method='DELETE',
path_parts=[_, _, DDocId0, Type, Name]}=Req, Db) ->
- Idxs = mango_idx:list(Db),
- DDocId = convert_to_design_id(DDocId0),
- DelOpts = get_idx_del_opts(Req),
- Filt = fun(Idx) ->
- IsDDoc = mango_idx:ddoc(Idx) == DDocId,
- IsType = mango_idx:type(Idx) == Type,
- IsName = mango_idx:name(Idx) == Name,
- IsDDoc andalso IsType andalso IsName
- end,
- case mango_idx:delete(Filt, Db, Idxs, DelOpts) of
+ Result = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ Idxs = mango_idx:list(TxDb),
+ DDocId = convert_to_design_id(DDocId0),
+ Filt = fun(Idx) ->
+ IsDDoc = mango_idx:ddoc(Idx) == DDocId,
+ IsType = mango_idx:type(Idx) == Type,
+ IsName = mango_idx:name(Idx) == Name,
+ IsDDoc andalso IsType andalso IsName
+ end,
+ mango_idx:delete(Filt, TxDb, Idxs, [])
+ end),
+ case Result of
{ok, true} ->
chttpd:send_json(Req, {[{ok, true}]});
{error, not_found} ->
@@ -170,27 +175,30 @@ handle_index_req(#httpd{path_parts=[_, _, _DDocId0, _Type, _Name]}=Req, _Db) ->
handle_explain_req(#httpd{method='POST'}=Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
- Body = maybe_set_partition(Req),
+ Body = chttpd:json_body_obj(Req),
{ok, Opts0} = mango_opts:validate_find(Body),
{value, {selector, Sel}, Opts} = lists:keytake(selector, 1, Opts0),
- Resp = mango_crud:explain(Db, Sel, Opts),
+ Resp = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ mango_crud:explain(TxDb, Sel, Opts)
+ end),
chttpd:send_json(Req, Resp);
handle_explain_req(Req, _Db) ->
chttpd:send_method_not_allowed(Req, "POST").
-handle_find_req(#httpd{method='POST'}=Req, Db) ->
- chttpd:validate_ctype(Req, "application/json"),
- Body = maybe_set_partition(Req),
+handle_find_req(#httpd{method='POST'}=Req0, Db) ->
+ {ok, Req1} = mango_plugin:before_find(Req0),
+ chttpd:validate_ctype(Req1, "application/json"),
+ Body = chttpd:json_body_obj(Req1),
{ok, Opts0} = mango_opts:validate_find(Body),
{value, {selector, Sel}, Opts} = lists:keytake(selector, 1, Opts0),
- {ok, Resp0} = start_find_resp(Req),
+ {ok, Resp0} = start_find_resp(Req1),
case run_find(Resp0, Db, Sel, Opts) of
{ok, AccOut} ->
- end_find_resp(AccOut);
+ end_find_resp(Req1, AccOut);
{error, Error} ->
- chttpd:send_error(Req, Error)
+ chttpd:send_error(Req1, Error)
end;
@@ -198,20 +206,6 @@ handle_find_req(Req, _Db) ->
chttpd:send_method_not_allowed(Req, "POST").
-set_user_ctx(#httpd{user_ctx=Ctx}, Db) ->
- {ok, NewDb} = couch_db:set_user_ctx(Db, Ctx),
- NewDb.
-
-
-get_idx_w_opts(Opts) ->
- case lists:keyfind(w, 1, Opts) of
- {w, N} when is_integer(N), N > 0 ->
- [{w, integer_to_list(N)}];
- _ ->
- [{w, "2"}]
- end.
-
-
get_bulk_delete_ddocs(Opts) ->
case lists:keyfind(docids, 1, Opts) of
{docids, DDocs} when is_list(DDocs) ->
@@ -221,33 +215,6 @@ get_bulk_delete_ddocs(Opts) ->
end.
-get_idx_del_opts(Req) ->
- try
- WStr = chttpd:qs_value(Req, "w", "2"),
- _ = list_to_integer(WStr),
- [{w, WStr}]
- catch _:_ ->
- [{w, "2"}]
- end.
-
-
-maybe_set_partition(Req) ->
- {Props} = chttpd:json_body_obj(Req),
- case chttpd:qs_value(Req, "partition", undefined) of
- undefined ->
- {Props};
- Partition ->
- case couch_util:get_value(<<"partition">>, Props) of
- undefined ->
- {[{<<"partition">>, ?l2b(Partition)} | Props]};
- Partition ->
- {Props};
- OtherPartition ->
- ?MANGO_ERROR({bad_partition, OtherPartition})
- end
- end.
-
-
convert_to_design_id(DDocId) ->
case DDocId of
<<"_design/", _/binary>> -> DDocId;
@@ -259,14 +226,15 @@ start_find_resp(Req) ->
chttpd:start_delayed_json_response(Req, 200, [], "{\"docs\":[").
-end_find_resp(Acc0) ->
- #vacc{resp=Resp00, buffer=Buf, kvs=KVs, threshold=Max} = Acc0,
+end_find_resp(Req, Acc0) ->
+ #vacc{resp=Resp00, buffer=Buf, kvs=KVs0, threshold=Max} = Acc0,
{ok, Resp0} = chttpd:close_delayed_json_object(Resp00, Buf, "\r\n]", Max),
+ {ok, KVs1} = mango_plugin:after_find(Req, Resp0, KVs0),
FinalAcc = lists:foldl(fun({K, V}, Acc) ->
JK = ?JSON_ENCODE(K),
JV = ?JSON_ENCODE(V),
[JV, ": ", JK, ",\r\n" | Acc]
- end, [], KVs),
+ end, [], KVs1),
Chunk = lists:reverse(FinalAcc, ["}\r\n"]),
{ok, Resp1} = chttpd:send_delayed_chunk(Resp0, Chunk),
chttpd:end_delayed_json_response(Resp1).
diff --git a/src/mango/src/mango_httpd_handlers.erl b/src/mango/src/mango_httpd_handlers.erl
index 80e5e277e..c1ddd6c4e 100644
--- a/src/mango/src/mango_httpd_handlers.erl
+++ b/src/mango/src/mango_httpd_handlers.erl
@@ -12,7 +12,7 @@
-module(mango_httpd_handlers).
--export([url_handler/1, db_handler/1, design_handler/1]).
+-export([url_handler/1, db_handler/1, design_handler/1, handler_info/3]).
url_handler(_) -> no_match.
@@ -22,3 +22,32 @@ db_handler(<<"_find">>) -> fun mango_httpd:handle_req/2;
db_handler(_) -> no_match.
design_handler(_) -> no_match.
+
+handler_info('GET', [Db, <<"_index">>], _) ->
+ {'db.mango.index.read', #{'db.name' => Db}};
+
+handler_info('POST', [Db, <<"_index">>], _) ->
+ {'db.mango.index.create', #{'db.name' => Db}};
+
+handler_info('POST', [Db, <<"_index">>, <<"_bulk_delete">>], _) ->
+ {'db.mango.index.delete', #{'db.name' => Db, multi => true}};
+
+handler_info('DELETE', [Db, <<"_index">>, <<"_design">>, Name, Type, Idx], _) ->
+ {'db.mango.index.delete', #{
+ 'db.name' => Db,
+ 'design.id' => Name,
+ 'index.type' => Type,
+ 'index.name' => Idx
+ }};
+
+handler_info(M, [Db, <<"_index">>, <<"_design/", N/binary>>, T, I], R) ->
+ handler_info(M, [Db, <<"_index">>, <<"_design">>, N, T, I], R);
+
+handler_info('POST', [Db, <<"_explain">>], _) ->
+ {'db.mango.explain.execute', #{'db.name' => Db}};
+
+handler_info('POST', [Db, <<"_find">>], _) ->
+ {'db.mango.find.execute', #{'db.name' => Db}};
+
+handler_info(_, _, _) ->
+ no_match. \ No newline at end of file
diff --git a/src/mango/src/mango_idx.erl b/src/mango/src/mango_idx.erl
index 5d06a8fe3..37b6e03eb 100644
--- a/src/mango/src/mango_idx.erl
+++ b/src/mango/src/mango_idx.erl
@@ -19,7 +19,6 @@
-export([
list/1,
- recover/1,
new/2,
validate_new/2,
@@ -33,7 +32,6 @@
name/1,
type/1,
def/1,
- partitioned/1,
opts/1,
columns/1,
is_usable/3,
@@ -51,11 +49,35 @@
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-include("mango_idx.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
list(Db) ->
- {ok, Indexes} = ddoc_cache:open(db_to_name(Db), ?MODULE),
- Indexes.
+ DDocs = couch_views_ddoc:get_mango_list(Db),
+ DbName = fabric2_db:name(Db),
+ Indexes = lists:foldl(fun(DDoc, Acc) ->
+ {Props} = couch_doc:to_json_obj(DDoc, []),
+
+ case proplists:get_value(<<"language">>, Props) == <<"query">> of
+ true ->
+ {ok, Mrst} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
+
+ IsInteractive = couch_views_ddoc:is_interactive(DDoc),
+ BuildState = couch_views_fdb:get_build_status(Db, Mrst),
+
+ Idxs = lists:map(fun(Idx) ->
+ Idx#idx{
+ build_status = BuildState,
+ interactive = IsInteractive
+ }
+ end, from_ddoc(Db, DDoc)),
+ Acc ++ Idxs;
+ false ->
+ Acc
+ end
+
+ end, [], DDocs),
+ Indexes ++ special(Db).
get_usable_indexes(Db, Selector, Opts) ->
@@ -63,14 +85,14 @@ get_usable_indexes(Db, Selector, Opts) ->
GlobalIndexes = mango_cursor:remove_indexes_with_partial_filter_selector(
ExistingIndexes
),
+ BuiltIndexes = remove_unbuilt_indexes(GlobalIndexes),
UserSpecifiedIndex = mango_cursor:maybe_filter_indexes_by_ddoc(ExistingIndexes, Opts),
- UsableIndexes0 = lists:usort(GlobalIndexes ++ UserSpecifiedIndex),
- UsableIndexes1 = filter_partition_indexes(UsableIndexes0, Opts),
+ UsableIndexes0 = lists:usort(BuiltIndexes ++ UserSpecifiedIndex),
SortFields = get_sort_fields(Opts),
UsableFilter = fun(I) -> is_usable(I, Selector, SortFields) end,
- case lists:filter(UsableFilter, UsableIndexes1) of
+ case lists:filter(UsableFilter, UsableIndexes0) of
[] ->
mango_sort_error(Db, Opts);
UsableIndexes ->
@@ -78,30 +100,8 @@ get_usable_indexes(Db, Selector, Opts) ->
end.
-mango_sort_error(Db, Opts) ->
- case {fabric_util:is_partitioned(Db), is_opts_partitioned(Opts)} of
- {false, _} ->
- ?MANGO_ERROR({no_usable_index, missing_sort_index});
- {true, true} ->
- ?MANGO_ERROR({no_usable_index, missing_sort_index_partitioned});
- {true, false} ->
- ?MANGO_ERROR({no_usable_index, missing_sort_index_global})
- end.
-
-
-recover(Db) ->
- {ok, DDocs0} = mango_util:open_ddocs(Db),
- Pred = fun({Props}) ->
- case proplists:get_value(<<"language">>, Props) of
- <<"query">> -> true;
- _ -> false
- end
- end,
- DDocs = lists:filter(Pred, DDocs0),
- Special = special(Db),
- {ok, Special ++ lists:flatmap(fun(Doc) ->
- from_ddoc(Db, Doc)
- end, DDocs)}.
+mango_sort_error(_Db, _Opts) ->
+ ?MANGO_ERROR({no_usable_index, missing_sort_index}).
get_sort_fields(Opts) ->
@@ -124,7 +124,6 @@ new(Db, Opts) ->
name = IdxName,
type = Type,
def = Def,
- partitioned = get_idx_partitioned(Opts),
opts = filter_opts(Opts)
}}.
@@ -136,11 +135,10 @@ validate_new(Idx, Db) ->
add(DDoc, Idx) ->
Mod = idx_mod(Idx),
- {ok, NewDDoc1} = Mod:add(DDoc, Idx),
- NewDDoc2 = set_ddoc_partitioned(NewDDoc1, Idx),
+ {ok, NewDDoc} = Mod:add(DDoc, Idx),
% Round trip through JSON for normalization
- Body = ?JSON_DECODE(?JSON_ENCODE(NewDDoc2#doc.body)),
- {ok, NewDDoc2#doc{body = Body}}.
+ Body = ?JSON_DECODE(?JSON_ENCODE(NewDDoc#doc.body)),
+ {ok, NewDDoc#doc{body = Body}}.
remove(DDoc, Idx) ->
@@ -173,16 +171,17 @@ delete(Filt, Db, Indexes, DelOpts) ->
end.
-from_ddoc(Db, {Props}) ->
+from_ddoc(Db, #doc{id = DDocId} = DDoc) ->
+ {Props} = couch_doc:to_json_obj(DDoc, []),
DbName = db_to_name(Db),
- DDoc = proplists:get_value(<<"_id">>, Props),
+ DDocId = proplists:get_value(<<"_id">>, Props),
case proplists:get_value(<<"language">>, Props) of
<<"query">> -> ok;
_ ->
?MANGO_ERROR(invalid_query_ddoc_language)
end,
- IdxMods = case clouseau_rpc:connected() of
+ IdxMods = case is_text_service_available() of
true ->
[mango_idx_view, mango_idx_text];
false ->
@@ -192,8 +191,7 @@ from_ddoc(Db, {Props}) ->
lists:map(fun(Idx) ->
Idx#idx{
dbname = DbName,
- ddoc = DDoc,
- partitioned = get_idx_partitioned(Db, Props)
+ ddoc = DDocId
}
end, Idxs).
@@ -204,7 +202,8 @@ special(Db) ->
name = <<"_all_docs">>,
type = <<"special">>,
def = all_docs,
- opts = []
+ opts = [],
+ build_status = ?INDEX_READY
},
% Add one for _update_seq
[AllDocs].
@@ -230,10 +229,6 @@ def(#idx{def=Def}) ->
Def.
-partitioned(#idx{partitioned=Partitioned}) ->
- Partitioned.
-
-
opts(#idx{opts=Opts}) ->
Opts.
@@ -294,7 +289,7 @@ db_to_name(Name) when is_binary(Name) ->
db_to_name(Name) when is_list(Name) ->
iolist_to_binary(Name);
db_to_name(Db) ->
- couch_db:name(Db).
+ fabric2_db:name(Db).
get_idx_def(Opts) ->
@@ -309,7 +304,7 @@ get_idx_def(Opts) ->
get_idx_type(Opts) ->
case proplists:get_value(type, Opts) of
<<"json">> -> <<"json">>;
- <<"text">> -> case clouseau_rpc:connected() of
+ <<"text">> -> case is_text_service_available() of
true ->
<<"text">>;
false ->
@@ -322,6 +317,11 @@ get_idx_type(Opts) ->
end.
+is_text_service_available() ->
+ erlang:function_exported(clouseau_rpc, connected, 0) andalso
+ clouseau_rpc:connected().
+
+
get_idx_ddoc(Idx, Opts) ->
case proplists:get_value(ddoc, Opts) of
<<"_design/", _Rest/binary>> = Name ->
@@ -350,97 +350,6 @@ gen_name(Idx, Opts0) ->
mango_util:enc_hex(Sha).
-get_idx_partitioned(Opts) ->
- case proplists:get_value(partitioned, Opts) of
- B when is_boolean(B) ->
- B;
- db_default ->
- % Default to the partitioned setting on
- % the database.
- undefined
- end.
-
-
-set_ddoc_partitioned(DDoc, Idx) ->
- % We have to verify that the new index being added
- % to this design document either matches the current
- % ddoc's design options *or* this is a new design doc
- #doc{
- id = DDocId,
- revs = Revs,
- body = {BodyProps}
- } = DDoc,
- OldDOpts = couch_util:get_value(<<"options">>, BodyProps),
- OldOpt = case OldDOpts of
- {OldDOptProps} when is_list(OldDOptProps) ->
- couch_util:get_value(<<"partitioned">>, OldDOptProps);
- _ ->
- undefined
- end,
- % If new matches old we're done
- if Idx#idx.partitioned == OldOpt -> DDoc; true ->
- % If we're creating a ddoc then we can set the options
- case Revs == {0, []} of
- true when Idx#idx.partitioned /= undefined ->
- set_ddoc_partitioned_option(DDoc, Idx#idx.partitioned);
- true when Idx#idx.partitioned == undefined ->
- DDoc;
- false ->
- ?MANGO_ERROR({partitioned_option_mismatch, DDocId})
- end
- end.
-
-
-set_ddoc_partitioned_option(DDoc, Partitioned) ->
- #doc{
- body = {BodyProps}
- } = DDoc,
- NewProps = case couch_util:get_value(<<"options">>, BodyProps) of
- {Existing} when is_list(Existing) ->
- Opt = {<<"partitioned">>, Partitioned},
- New = lists:keystore(<<"partitioned">>, 1, Existing, Opt),
- lists:keystore(<<"options">>, 1, BodyProps, {<<"options">>, New});
- undefined ->
- New = {<<"options">>, {[{<<"partitioned">>, Partitioned}]}},
- lists:keystore(<<"options">>, 1, BodyProps, New)
- end,
- DDoc#doc{body = {NewProps}}.
-
-
-get_idx_partitioned(Db, DDocProps) ->
- Default = fabric_util:is_partitioned(Db),
- case couch_util:get_value(<<"options">>, DDocProps) of
- {DesignOpts} ->
- case couch_util:get_value(<<"partitioned">>, DesignOpts) of
- P when is_boolean(P) ->
- P;
- undefined ->
- Default
- end;
- undefined ->
- Default
- end.
-
-is_opts_partitioned(Opts) ->
- case couch_util:get_value(partition, Opts, <<>>) of
- <<>> ->
- false;
- Partition when is_binary(Partition) ->
- true
- end.
-
-
-filter_partition_indexes(Indexes, Opts) ->
- PFilt = case is_opts_partitioned(Opts) of
- false ->
- fun(#idx{partitioned = P}) -> not P end;
- true ->
- fun(#idx{partitioned = P}) -> P end
- end,
- Filt = fun(Idx) -> type(Idx) == <<"special">> orelse PFilt(Idx) end,
- lists:filter(Filt, Indexes).
-
-
filter_opts([]) ->
[];
filter_opts([{user_ctx, _} | Rest]) ->
@@ -451,10 +360,6 @@ filter_opts([{name, _} | Rest]) ->
filter_opts(Rest);
filter_opts([{type, _} | Rest]) ->
filter_opts(Rest);
-filter_opts([{w, _} | Rest]) ->
- filter_opts(Rest);
-filter_opts([{partitioned, _} | Rest]) ->
- filter_opts(Rest);
filter_opts([Opt | Rest]) ->
[Opt | filter_opts(Rest)].
@@ -478,6 +383,17 @@ get_legacy_selector(Def) ->
Selector -> Selector
end.
+% remove any interactive indexes that are not built. If an index is not
+% interactive than we do not remove it as it will be built when queried
+remove_unbuilt_indexes(Indexes) ->
+ lists:filter(fun(Idx) ->
+ case Idx#idx.interactive of
+ true -> Idx#idx.build_status == ?INDEX_READY;
+ _ -> true
+ end
+ end, Indexes).
+
+
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -488,8 +404,9 @@ index(SelectorName, Selector) ->
<<"Selected">>,<<"json">>,
{[{<<"fields">>,{[{<<"location">>,<<"asc">>}]}},
{SelectorName,{Selector}}]},
- false,
- [{<<"def">>,{[{<<"fields">>,[<<"location">>]}]}}]
+ [{<<"def">>,{[{<<"fields">>,[<<"location">>]}]}}],
+ <<"ready">>,
+ false
}.
get_partial_filter_all_docs_test() ->
diff --git a/src/mango/src/mango_idx.hrl b/src/mango/src/mango_idx.hrl
index 97259500b..68e5aaaf0 100644
--- a/src/mango/src/mango_idx.hrl
+++ b/src/mango/src/mango_idx.hrl
@@ -16,6 +16,7 @@
name,
type,
def,
- partitioned,
- opts
+ opts,
+ build_status,
+ interactive
}).
diff --git a/src/mango/src/mango_idx_special.erl b/src/mango/src/mango_idx_special.erl
index ac6efc707..3548372b6 100644
--- a/src/mango/src/mango_idx_special.erl
+++ b/src/mango/src/mango_idx_special.erl
@@ -28,6 +28,7 @@
-include_lib("couch/include/couch_db.hrl").
-include("mango_idx.hrl").
+-include_lib("couch_views/include/couch_views.hrl").
validate(_) ->
@@ -55,7 +56,8 @@ to_json(#idx{def=all_docs}) ->
{<<"fields">>, [{[
{<<"_id">>, <<"asc">>}
]}]}
- ]}}
+ ]}},
+ {build_status, ?INDEX_READY}
]}.
diff --git a/src/mango/src/mango_idx_text.erl b/src/mango/src/mango_idx_text.erl
index 1d4becfb3..71eaf110a 100644
--- a/src/mango/src/mango_idx_text.erl
+++ b/src/mango/src/mango_idx_text.erl
@@ -100,7 +100,6 @@ to_json(Idx) ->
{ddoc, Idx#idx.ddoc},
{name, Idx#idx.name},
{type, Idx#idx.type},
- {partitioned, Idx#idx.partitioned},
{def, {def_to_json(Idx#idx.def)}}
]}.
diff --git a/src/mango/src/mango_idx_view.erl b/src/mango/src/mango_idx_view.erl
index 37911498c..f80cc217b 100644
--- a/src/mango/src/mango_idx_view.erl
+++ b/src/mango/src/mango_idx_view.erl
@@ -34,7 +34,6 @@
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-include("mango_idx.hrl").
--include("mango_idx_view.hrl").
validate_new(#idx{}=Idx, _Db) ->
@@ -54,7 +53,16 @@ add(#doc{body={Props0}}=DDoc, Idx) ->
NewView = make_view(Idx),
Views2 = lists:keystore(element(1, NewView), 1, Views1, NewView),
Props1 = lists:keystore(<<"views">>, 1, Props0, {<<"views">>, {Views2}}),
- {ok, DDoc#doc{body={Props1}}}.
+
+ {Opts0} = proplists:get_value(<<"options">>, Props1, {[]}),
+ Opts1 = case lists:keymember(<<"interactive">>, 1, Opts0) of
+ true -> Opts0;
+ false -> Opts0 ++ [{<<"interactive">>, true}]
+ end,
+ Props2 = lists:keystore(<<"options">>, 1, Props1, {<<"options">>, {Opts1}}),
+
+ Props3 = [{<<"autoupdate">>, false}],
+ {ok, DDoc#doc{body={Props2 ++ Props3}}}.
remove(#doc{body={Props0}}=DDoc, Idx) ->
@@ -68,13 +76,15 @@ remove(#doc{body={Props0}}=DDoc, Idx) ->
if Views2 /= Views1 -> ok; true ->
?MANGO_ERROR({index_not_found, Idx#idx.name})
end,
- Props1 = case Views2 of
+ Props3 = case Views2 of
[] ->
- lists:keydelete(<<"views">>, 1, Props0);
+ Props1 = lists:keydelete(<<"views">>, 1, Props0),
+ Props2 = lists:keydelete(<<"options">>, 1, Props1),
+ lists:keydelete(<<"autoupdate">>, 1, Props2);
_ ->
lists:keystore(<<"views">>, 1, Props0, {<<"views">>, {Views2}})
end,
- {ok, DDoc#doc{body={Props1}}}.
+ {ok, DDoc#doc{body={Props3}}}.
from_ddoc({Props}) ->
@@ -104,8 +114,8 @@ to_json(Idx) ->
{ddoc, Idx#idx.ddoc},
{name, Idx#idx.name},
{type, Idx#idx.type},
- {partitioned, Idx#idx.partitioned},
- {def, {def_to_json(Idx#idx.def)}}
+ {def, {def_to_json(Idx#idx.def)}},
+ {build_status, Idx#idx.build_status}
]}.
@@ -172,11 +182,11 @@ start_key([{'$eq', Key, '$eq', Key} | Rest]) ->
end_key([]) ->
- [?MAX_JSON_OBJ];
+ [couch_views_encoding:max()];
end_key([{_, _, '$lt', Key} | Rest]) ->
case mango_json:special(Key) of
true ->
- [?MAX_JSON_OBJ];
+ [couch_views_encoding:max()];
false ->
[Key | end_key(Rest)]
end;
diff --git a/src/mango/src/mango_native_proc.erl b/src/mango/src/mango_native_proc.erl
deleted file mode 100644
index 274ae11de..000000000
--- a/src/mango/src/mango_native_proc.erl
+++ /dev/null
@@ -1,378 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mango_native_proc).
--behavior(gen_server).
-
-
--include("mango_idx.hrl").
-
-
--export([
- start_link/0,
- set_timeout/2,
- prompt/2
-]).
-
--export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
-]).
-
-
--record(st, {
- indexes = [],
- timeout = 5000
-}).
-
-
--record(tacc, {
- index_array_lengths = true,
- fields = all_fields,
- path = []
-}).
-
-
-start_link() ->
- gen_server:start_link(?MODULE, [], []).
-
-
-set_timeout(Pid, TimeOut) when is_integer(TimeOut), TimeOut > 0 ->
- gen_server:call(Pid, {set_timeout, TimeOut}).
-
-
-prompt(Pid, Data) ->
- gen_server:call(Pid, {prompt, Data}).
-
-
-init(_) ->
- {ok, #st{}}.
-
-
-terminate(_Reason, _St) ->
- ok.
-
-
-handle_call({set_timeout, TimeOut}, _From, St) ->
- {reply, ok, St#st{timeout=TimeOut}};
-
-handle_call({prompt, [<<"reset">>]}, _From, St) ->
- {reply, true, St#st{indexes=[]}};
-
-handle_call({prompt, [<<"reset">>, _QueryConfig]}, _From, St) ->
- {reply, true, St#st{indexes=[]}};
-
-handle_call({prompt, [<<"add_fun">>, IndexInfo]}, _From, St) ->
- Indexes = case validate_index_info(IndexInfo) of
- true ->
- St#st.indexes ++ [IndexInfo];
- false ->
- couch_log:error("No Valid Indexes For: ~p", [IndexInfo]),
- St#st.indexes
- end,
- NewSt = St#st{indexes = Indexes},
- {reply, true, NewSt};
-
-handle_call({prompt, [<<"map_doc">>, Doc]}, _From, St) ->
- {reply, map_doc(St, mango_json:to_binary(Doc)), St};
-
-handle_call({prompt, [<<"reduce">>, RedSrcs, _]}, _From, St) ->
- {reply, [true, [null || _ <- RedSrcs]], St};
-
-handle_call({prompt, [<<"rereduce">>, RedSrcs, _]}, _From, St) ->
- {reply, [true, [null || _ <- RedSrcs]], St};
-
-handle_call({prompt, [<<"index_doc">>, Doc]}, _From, St) ->
- Vals = case index_doc(St, mango_json:to_binary(Doc)) of
- [] ->
- [[]];
- Else ->
- Else
- end,
- {reply, Vals, St};
-
-
-handle_call(Msg, _From, St) ->
- {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
-
-handle_cast(garbage_collect, St) ->
- erlang:garbage_collect(),
- {noreply, St};
-
-handle_cast(stop, St) ->
- {stop, normal, St};
-
-handle_cast(Msg, St) ->
- {stop, {invalid_cast, Msg}, St}.
-
-
-handle_info(Msg, St) ->
- {stop, {invalid_info, Msg}, St}.
-
-
-code_change(_OldVsn, St, _Extra) ->
- {ok, St}.
-
-
-map_doc(#st{indexes=Indexes}, Doc) ->
- lists:map(fun(Idx) -> get_index_entries(Idx, Doc) end, Indexes).
-
-
-index_doc(#st{indexes=Indexes}, Doc) ->
- lists:map(fun(Idx) -> get_text_entries(Idx, Doc) end, Indexes).
-
-
-get_index_entries({IdxProps}, Doc) ->
- {Fields} = couch_util:get_value(<<"fields">>, IdxProps),
- Selector = get_index_partial_filter_selector(IdxProps),
- case should_index(Selector, Doc) of
- false ->
- [];
- true ->
- Values = get_index_values(Fields, Doc),
- case lists:member(not_found, Values) of
- true -> [];
- false -> [[Values, null]]
- end
- end.
-
-
-get_index_values(Fields, Doc) ->
- lists:map(fun({Field, _Dir}) ->
- case mango_doc:get_field(Doc, Field) of
- not_found -> not_found;
- bad_path -> not_found;
- Value -> Value
- end
- end, Fields).
-
-
-get_text_entries({IdxProps}, Doc) ->
- Selector = get_index_partial_filter_selector(IdxProps),
- case should_index(Selector, Doc) of
- true ->
- get_text_entries0(IdxProps, Doc);
- false ->
- []
- end.
-
-
-get_index_partial_filter_selector(IdxProps) ->
- case couch_util:get_value(<<"partial_filter_selector">>, IdxProps, {[]}) of
- {[]} ->
- % this is to support legacy text indexes that had the partial_filter_selector
- % set as selector
- couch_util:get_value(<<"selector">>, IdxProps, {[]});
- Else ->
- Else
- end.
-
-
-get_text_entries0(IdxProps, Doc) ->
- DefaultEnabled = get_default_enabled(IdxProps),
- IndexArrayLengths = get_index_array_lengths(IdxProps),
- FieldsList = get_text_field_list(IdxProps),
- TAcc = #tacc{
- index_array_lengths = IndexArrayLengths,
- fields = FieldsList
- },
- Fields0 = get_text_field_values(Doc, TAcc),
- Fields = if not DefaultEnabled -> Fields0; true ->
- add_default_text_field(Fields0)
- end,
- FieldNames = get_field_names(Fields),
- Converted = convert_text_fields(Fields),
- FieldNames ++ Converted.
-
-
-get_text_field_values({Props}, TAcc) when is_list(Props) ->
- get_text_field_values_obj(Props, TAcc, []);
-
-get_text_field_values(Values, TAcc) when is_list(Values) ->
- IndexArrayLengths = TAcc#tacc.index_array_lengths,
- NewPath = ["[]" | TAcc#tacc.path],
- NewTAcc = TAcc#tacc{path = NewPath},
- case IndexArrayLengths of
- true ->
- % We bypass make_text_field and directly call make_text_field_name
- % because the length field name is not part of the path.
- LengthFieldName = make_text_field_name(NewTAcc#tacc.path, <<"length">>),
- LengthField = [{LengthFieldName, <<"length">>, length(Values)}],
- get_text_field_values_arr(Values, NewTAcc, LengthField);
- _ ->
- get_text_field_values_arr(Values, NewTAcc, [])
- end;
-
-get_text_field_values(Bin, TAcc) when is_binary(Bin) ->
- make_text_field(TAcc, <<"string">>, Bin);
-
-get_text_field_values(Num, TAcc) when is_number(Num) ->
- make_text_field(TAcc, <<"number">>, Num);
-
-get_text_field_values(Bool, TAcc) when is_boolean(Bool) ->
- make_text_field(TAcc, <<"boolean">>, Bool);
-
-get_text_field_values(null, TAcc) ->
- make_text_field(TAcc, <<"null">>, true).
-
-
-get_text_field_values_obj([], _, FAcc) ->
- FAcc;
-get_text_field_values_obj([{Key, Val} | Rest], TAcc, FAcc) ->
- NewPath = [Key | TAcc#tacc.path],
- NewTAcc = TAcc#tacc{path = NewPath},
- Fields = get_text_field_values(Val, NewTAcc),
- get_text_field_values_obj(Rest, TAcc, Fields ++ FAcc).
-
-
-get_text_field_values_arr([], _, FAcc) ->
- FAcc;
-get_text_field_values_arr([Value | Rest], TAcc, FAcc) ->
- Fields = get_text_field_values(Value, TAcc),
- get_text_field_values_arr(Rest, TAcc, Fields ++ FAcc).
-
-
-get_default_enabled(Props) ->
- case couch_util:get_value(<<"default_field">>, Props, {[]}) of
- Bool when is_boolean(Bool) ->
- Bool;
- {[]} ->
- true;
- {Opts}->
- couch_util:get_value(<<"enabled">>, Opts, true)
- end.
-
-
-get_index_array_lengths(Props) ->
- couch_util:get_value(<<"index_array_lengths">>, Props, true).
-
-
-add_default_text_field(Fields) ->
- DefaultFields = add_default_text_field(Fields, []),
- DefaultFields ++ Fields.
-
-
-add_default_text_field([], Acc) ->
- Acc;
-add_default_text_field([{_Name, <<"string">>, Value} | Rest], Acc) ->
- NewAcc = [{<<"$default">>, <<"string">>, Value} | Acc],
- add_default_text_field(Rest, NewAcc);
-add_default_text_field([_ | Rest], Acc) ->
- add_default_text_field(Rest, Acc).
-
-
-%% index of all field names
-get_field_names(Fields) ->
- FieldNameSet = lists:foldl(fun({Name, _, _}, Set) ->
- gb_sets:add([<<"$fieldnames">>, Name, []], Set)
- end, gb_sets:new(), Fields),
- gb_sets:to_list(FieldNameSet).
-
-
-convert_text_fields([]) ->
- [];
-convert_text_fields([{Name, _Type, Value} | Rest]) ->
- [[Name, Value, []] | convert_text_fields(Rest)].
-
-
-should_index(Selector, Doc) ->
- % We should do this
- NormSelector = mango_selector:normalize(Selector),
- Matches = mango_selector:match(NormSelector, Doc),
- IsDesign = case mango_doc:get_field(Doc, <<"_id">>) of
- <<"_design/", _/binary>> -> true;
- _ -> false
- end,
- Matches and not IsDesign.
-
-
-get_text_field_list(IdxProps) ->
- case couch_util:get_value(<<"fields">>, IdxProps) of
- Fields when is_list(Fields) ->
- RawList = lists:flatmap(fun get_text_field_info/1, Fields),
- [mango_util:lucene_escape_user(Field) || Field <- RawList];
- _ ->
- all_fields
- end.
-
-
-get_text_field_info({Props}) ->
- Name = couch_util:get_value(<<"name">>, Props),
- Type0 = couch_util:get_value(<<"type">>, Props),
- if not is_binary(Name) -> []; true ->
- Type = get_text_field_type(Type0),
- [iolist_to_binary([Name, ":", Type])]
- end.
-
-
-get_text_field_type(<<"number">>) ->
- <<"number">>;
-get_text_field_type(<<"boolean">>) ->
- <<"boolean">>;
-get_text_field_type(_) ->
- <<"string">>.
-
-
-make_text_field(TAcc, Type, Value) ->
- FieldName = make_text_field_name(TAcc#tacc.path, Type),
- Fields = TAcc#tacc.fields,
- case Fields == all_fields orelse lists:member(FieldName, Fields) of
- true ->
- [{FieldName, Type, Value}];
- false ->
- []
- end.
-
-
-make_text_field_name([P | Rest], Type) ->
- Parts = lists:reverse(Rest, [iolist_to_binary([P, ":", Type])]),
- Escaped = [mango_util:lucene_escape_field(N) || N <- Parts],
- iolist_to_binary(mango_util:join(".", Escaped)).
-
-
-validate_index_info(IndexInfo) ->
- IdxTypes = case clouseau_rpc:connected() of
- true ->
- [mango_idx_view, mango_idx_text];
- false ->
- [mango_idx_view]
- end,
- Results = lists:foldl(fun(IdxType, Results0) ->
- try
- IdxType:validate_index_def(IndexInfo),
- [valid_index | Results0]
- catch _:_ ->
- [invalid_index | Results0]
- end
- end, [], IdxTypes),
- lists:member(valid_index, Results).
-
-
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-handle_garbage_collect_cast_test() ->
- ?assertEqual({noreply, []}, handle_cast(garbage_collect, [])).
-
-handle_stop_cast_test() ->
- ?assertEqual({stop, normal, []}, handle_cast(stop, [])).
-
-handle_invalid_cast_test() ->
- ?assertEqual({stop, {invalid_cast, random}, []}, handle_cast(random, [])).
-
--endif.
diff --git a/src/mango/src/mango_opts.erl b/src/mango/src/mango_opts.erl
index 92c07f743..7bae9c90d 100644
--- a/src/mango/src/mango_opts.erl
+++ b/src/mango/src/mango_opts.erl
@@ -34,7 +34,6 @@
validate_sort/1,
validate_fields/1,
validate_bulk_delete/1,
- validate_partitioned/1,
default_limit/0
]).
@@ -71,12 +70,6 @@ validate_idx_create({Props}) ->
{optional, true},
{default, 2},
{validator, fun is_pos_integer/1}
- ]},
- {<<"partitioned">>, [
- {tag, partitioned},
- {optional, true},
- {default, db_default},
- {validator, fun validate_partitioned/1}
]}
],
validate(Props, Opts).
@@ -124,12 +117,6 @@ validate_find({Props}) ->
{default, []},
{validator, fun validate_fields/1}
]},
- {<<"partition">>, [
- {tag, partition},
- {optional, true},
- {default, <<>>},
- {validator, fun validate_partition/1}
- ]},
{<<"r">>, [
{tag, r},
{optional, true},
@@ -309,23 +296,6 @@ validate_fields(Value) ->
mango_fields:new(Value).
-validate_partitioned(true) ->
- {ok, true};
-validate_partitioned(false) ->
- {ok, false};
-validate_partitioned(db_default) ->
- {ok, db_default};
-validate_partitioned(Else) ->
- ?MANGO_ERROR({invalid_partitioned_value, Else}).
-
-
-validate_partition(<<>>) ->
- {ok, <<>>};
-validate_partition(Partition) ->
- couch_partition:validate_partition(Partition),
- {ok, Partition}.
-
-
validate_opts([], Props, Acc) ->
{Props, lists:reverse(Acc)};
validate_opts([{Name, Desc} | Rest], Props, Acc) ->
diff --git a/src/mango/src/mango_plugin.erl b/src/mango/src/mango_plugin.erl
new file mode 100644
index 000000000..de23f8e7c
--- /dev/null
+++ b/src/mango/src/mango_plugin.erl
@@ -0,0 +1,46 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(mango_plugin).
+
+-export([
+ before_find/1,
+ after_find/3
+]).
+
+-define(SERVICE_ID, mango).
+
+%% ------------------------------------------------------------------
+%% API Function Definitions
+%% ------------------------------------------------------------------
+
+before_find(HttpReq0) ->
+ [HttpReq1] = with_pipe(before_find, [HttpReq0]),
+ {ok, HttpReq1}.
+
+
+after_find(HttpReq, HttpResp, Arg0) ->
+ [_HttpReq, _HttpResp, Arg1] = with_pipe(after_find, [HttpReq, HttpResp, Arg0]),
+ {ok, Arg1}.
+
+
+%% ------------------------------------------------------------------
+%% Internal Function Definitions
+%% ------------------------------------------------------------------
+
+with_pipe(Func, Args) ->
+ do_apply(Func, Args, [pipe]).
+
+
+do_apply(Func, Args, Opts) ->
+ Handle = couch_epi:get_handle(?SERVICE_ID),
+ couch_epi:apply(Handle, ?SERVICE_ID, Func, Args, Opts).
diff --git a/src/mango/src/mango_util.erl b/src/mango/src/mango_util.erl
index 0d31f15f9..d649f95f1 100644
--- a/src/mango/src/mango_util.erl
+++ b/src/mango/src/mango_util.erl
@@ -15,13 +15,9 @@
-export([
open_doc/2,
- open_ddocs/1,
load_ddoc/2,
load_ddoc/3,
- defer/3,
- do_defer/3,
-
assert_ejson/1,
to_lower/1,
@@ -85,23 +81,7 @@ open_doc(Db, DocId) ->
open_doc(Db, DocId, Options) ->
- case mango_util:defer(fabric, open_doc, [Db, DocId, Options]) of
- {ok, Doc} ->
- {ok, Doc};
- {not_found, _} ->
- not_found;
- _ ->
- ?MANGO_ERROR({error_loading_doc, DocId})
- end.
-
-
-open_ddocs(Db) ->
- case mango_util:defer(fabric, design_docs, [Db]) of
- {ok, Docs} ->
- {ok, Docs};
- _ ->
- ?MANGO_ERROR(error_loading_ddocs)
- end.
+ fabric2_db:open_doc(Db, DocId, Options).
load_ddoc(Db, DDocId) ->
@@ -111,7 +91,7 @@ load_ddoc(Db, DDocId, DbOpts) ->
case open_doc(Db, DDocId, DbOpts) of
{ok, Doc} ->
{ok, check_lang(Doc)};
- not_found ->
+ {not_found, missing} ->
Body = {[
{<<"language">>, <<"query">>}
]},
@@ -119,40 +99,6 @@ load_ddoc(Db, DDocId, DbOpts) ->
end.
-defer(Mod, Fun, Args) ->
- {Pid, Ref} = erlang:spawn_monitor(?MODULE, do_defer, [Mod, Fun, Args]),
- receive
- {'DOWN', Ref, process, Pid, {mango_defer_ok, Value}} ->
- Value;
- {'DOWN', Ref, process, Pid, {mango_defer_throw, Value}} ->
- erlang:throw(Value);
- {'DOWN', Ref, process, Pid, {mango_defer_error, Value}} ->
- erlang:error(Value);
- {'DOWN', Ref, process, Pid, {mango_defer_exit, Value}} ->
- erlang:exit(Value)
- end.
-
-
-do_defer(Mod, Fun, Args) ->
- try erlang:apply(Mod, Fun, Args) of
- Resp ->
- erlang:exit({mango_defer_ok, Resp})
- catch
- throw:Error ->
- Stack = erlang:get_stacktrace(),
- couch_log:error("Defered error: ~w~n ~p", [{throw, Error}, Stack]),
- erlang:exit({mango_defer_throw, Error});
- error:Error ->
- Stack = erlang:get_stacktrace(),
- couch_log:error("Defered error: ~w~n ~p", [{error, Error}, Stack]),
- erlang:exit({mango_defer_error, Error});
- exit:Error ->
- Stack = erlang:get_stacktrace(),
- couch_log:error("Defered error: ~w~n ~p", [{exit, Error}, Stack]),
- erlang:exit({mango_defer_exit, Error})
- end.
-
-
assert_ejson({Props}) ->
assert_ejson_obj(Props);
assert_ejson(Vals) when is_list(Vals) ->
diff --git a/src/mango/test/01-index-crud-test.py b/src/mango/test/01-index-crud-test.py
index b60239992..13ae300dd 100644
--- a/src/mango/test/01-index-crud-test.py
+++ b/src/mango/test/01-index-crud-test.py
@@ -113,6 +113,21 @@ class IndexCrudTests(mango.DbPerClass):
return
raise AssertionError("index not created")
+ def test_ignore_design_docs(self):
+ fields = ["baz", "foo"]
+ ret = self.db.create_index(fields, name="idx_02")
+ assert ret is True
+ self.db.save_doc({
+ "_id": "_design/ignore",
+ "views": {
+ "view1": {
+ "map": "function (doc) { emit(doc._id, 1)}"
+ }
+ }
+ })
+ indexes = self.db.list_indexes()
+ self.assertEqual(len(indexes), 2)
+
def test_read_idx_doc(self):
self.db.create_index(["foo", "bar"], name="idx_01")
self.db.create_index(["hello", "bar"])
diff --git a/src/mango/test/02-basic-find-test.py b/src/mango/test/02-basic-find-test.py
index afdba03a2..2a03a3a55 100644
--- a/src/mango/test/02-basic-find-test.py
+++ b/src/mango/test/02-basic-find-test.py
@@ -100,16 +100,6 @@ class BasicFindTests(mango.UserDocsTests):
else:
raise AssertionError("bad find")
- def test_bad_r(self):
- bad_rs = ([None, True, False, 1.2, "no limit!", {"foo": "bar"}, [2]],)
- for br in bad_rs:
- try:
- self.db.find({"int": {"$gt": 2}}, r=br)
- except Exception as e:
- assert e.response.status_code == 400
- else:
- raise AssertionError("bad find")
-
def test_bad_conflicts(self):
bad_conflicts = ([None, 1.2, "no limit!", {"foo": "bar"}, [2]],)
for bc in bad_conflicts:
@@ -262,11 +252,6 @@ class BasicFindTests(mango.UserDocsTests):
assert sorted(d.keys()) == ["location", "user_id"]
assert sorted(d["location"].keys()) == ["address"]
- def test_r(self):
- for r in [1, 2, 3]:
- docs = self.db.find({"age": {"$gt": 0}}, r=r)
- assert len(docs) == 15
-
def test_empty(self):
docs = self.db.find({})
# 15 users
diff --git a/src/mango/test/05-index-selection-test.py b/src/mango/test/05-index-selection-test.py
index cb4d32986..bae3d58f1 100644
--- a/src/mango/test/05-index-selection-test.py
+++ b/src/mango/test/05-index-selection-test.py
@@ -14,6 +14,8 @@ import mango
import user_docs
import unittest
+import requests
+
class IndexSelectionTests:
def test_basic(self):
@@ -201,8 +203,11 @@ class IndexSelectionTests:
}
},
}
- with self.assertRaises(KeyError):
+ try:
self.db.save_doc(design_doc)
+ assert False, "Should not get here."
+ except requests.exceptions.HTTPError as e:
+ self.assertEqual(e.response.json()['error'], 'invalid_design_doc')
def test_explain_sort_reverse(self):
selector = {"manager": {"$gt": None}}
diff --git a/src/mango/test/12-use-correct-index-test.py b/src/mango/test/12-use-correct-index-test.py
index c21ad6c5e..a7f07b5e8 100644
--- a/src/mango/test/12-use-correct-index-test.py
+++ b/src/mango/test/12-use-correct-index-test.py
@@ -54,36 +54,41 @@ class ChooseCorrectIndexForDocs(mango.DbPerClass):
self.db.save_docs(copy.deepcopy(DOCS))
def test_choose_index_with_one_field_in_index(self):
- self.db.create_index(["name", "age", "user_id"], ddoc="aaa")
- self.db.create_index(["name"], ddoc="zzz")
+ self.db.create_index(["name", "age", "user_id"], ddoc="aaa", wait_for_built_index=False)
+ self.db.create_index(["name"], ddoc="zzz", wait_for_built_index=False)
+ self.db.wait_for_built_indexes()
explain = self.db.find({"name": "Eddie"}, explain=True)
self.assertEqual(explain["index"]["ddoc"], "_design/zzz")
def test_choose_index_with_two(self):
- self.db.create_index(["name", "age", "user_id"], ddoc="aaa")
- self.db.create_index(["name", "age"], ddoc="bbb")
- self.db.create_index(["name"], ddoc="zzz")
+ self.db.create_index(["name", "age", "user_id"], ddoc="aaa", wait_for_built_index=False)
+ self.db.create_index(["name", "age"], ddoc="bbb", wait_for_built_index=False)
+ self.db.create_index(["name"], ddoc="zzz", wait_for_built_index=False)
+ self.db.wait_for_built_indexes()
explain = self.db.find({"name": "Eddie", "age": {"$gte": 12}}, explain=True)
self.assertEqual(explain["index"]["ddoc"], "_design/bbb")
def test_choose_index_alphabetically(self):
- self.db.create_index(["name"], ddoc="aaa")
- self.db.create_index(["name"], ddoc="bbb")
- self.db.create_index(["name"], ddoc="zzz")
+ self.db.create_index(["name"], ddoc="aaa", wait_for_built_index=False)
+ self.db.create_index(["name"], ddoc="bbb", wait_for_built_index=False)
+ self.db.create_index(["name"], ddoc="zzz", wait_for_built_index=False)
+ self.db.wait_for_built_indexes()
explain = self.db.find({"name": "Eddie", "age": {"$gte": 12}}, explain=True)
self.assertEqual(explain["index"]["ddoc"], "_design/aaa")
def test_choose_index_most_accurate(self):
- self.db.create_index(["name", "age", "user_id"], ddoc="aaa")
- self.db.create_index(["name", "age"], ddoc="bbb")
- self.db.create_index(["name"], ddoc="zzz")
+ self.db.create_index(["name", "age", "user_id"], ddoc="aaa", wait_for_built_index=False)
+ self.db.create_index(["name", "age"], ddoc="bbb", wait_for_built_index=False)
+ self.db.create_index(["name"], ddoc="zzz", wait_for_built_index=False)
+ self.db.wait_for_built_indexes()
explain = self.db.find({"name": "Eddie", "age": {"$gte": 12}}, explain=True)
self.assertEqual(explain["index"]["ddoc"], "_design/bbb")
def test_choose_index_most_accurate_in_memory_selector(self):
- self.db.create_index(["name", "location", "user_id"], ddoc="aaa")
- self.db.create_index(["name", "age", "user_id"], ddoc="bbb")
- self.db.create_index(["name"], ddoc="zzz")
+ self.db.create_index(["name", "location", "user_id"], ddoc="aaa", wait_for_built_index=False)
+ self.db.create_index(["name", "age", "user_id"], ddoc="bbb", wait_for_built_index=False)
+ self.db.create_index(["name"], ddoc="zzz", wait_for_built_index=False)
+ self.db.wait_for_built_indexes()
explain = self.db.find({"name": "Eddie", "number": {"$lte": 12}}, explain=True)
self.assertEqual(explain["index"]["ddoc"], "_design/zzz")
@@ -100,8 +105,9 @@ class ChooseCorrectIndexForDocs(mango.DbPerClass):
def test_chooses_idxA(self):
DOCS2 = [{"a": 1, "b": 1, "c": 1}, {"a": 1000, "d": 1000, "e": 1000}]
self.db.save_docs(copy.deepcopy(DOCS2))
- self.db.create_index(["a", "b", "c"])
- self.db.create_index(["a", "d", "e"])
+ self.db.create_index(["a", "b", "c"], wait_for_built_index=False)
+ self.db.create_index(["a", "d", "e"], wait_for_built_index=False)
+ self.db.wait_for_built_indexes()
explain = self.db.find(
{"a": {"$gt": 0}, "b": {"$gt": 0}, "c": {"$gt": 0}}, explain=True
)
diff --git a/src/mango/test/13-stable-update-test.py b/src/mango/test/13-stable-update-test.py
deleted file mode 100644
index 303f3fab1..000000000
--- a/src/mango/test/13-stable-update-test.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import copy
-import mango
-
-DOCS1 = [
- {
- "_id": "54af50626de419f5109c962f",
- "user_id": 0,
- "age": 10,
- "name": "Jimi",
- "location": "UK",
- "number": 4,
- },
- {
- "_id": "54af50622071121b25402dc3",
- "user_id": 1,
- "age": 12,
- "name": "Eddie",
- "location": "ZAR",
- "number": 2,
- },
-]
-
-
-class SupportStableAndUpdate(mango.DbPerClass):
- def setUp(self):
- self.db.recreate()
- # Hack to prevent auto-indexer from foiling update=False test
- # https://github.com/apache/couchdb/issues/2313
- self.db.save_doc(
- {"_id": "_design/foo", "language": "query", "autoupdate": False}
- )
- self.db.create_index(["name"], ddoc="foo")
- self.db.save_docs(copy.deepcopy(DOCS1))
-
- def test_update_updates_view_when_specified(self):
- docs = self.db.find({"name": "Eddie"}, update=False)
- assert len(docs) == 0
- docs = self.db.find({"name": "Eddie"}, update=True)
- assert len(docs) == 1
diff --git a/src/mango/test/13-users-db-find-test.py b/src/mango/test/13-users-db-find-test.py
index 73d15ea1a..9f9b53a81 100644
--- a/src/mango/test/13-users-db-find-test.py
+++ b/src/mango/test/13-users-db-find-test.py
@@ -12,10 +12,15 @@
# the License.
-import mango, requests
+import mango, requests, unittest
+# Re-enable once the _users db is implemented
class UsersDbFindTests(mango.UsersDbTests):
+ @classmethod
+ def setUpClass(klass):
+ raise unittest.SkipTest("Re-enable once the _users db is implemented")
+
def test_simple_find(self):
docs = self.db.find({"name": {"$eq": "demo02"}})
assert len(docs) == 1
diff --git a/src/mango/test/15-execution-stats-test.py b/src/mango/test/15-execution-stats-test.py
index 537a19add..6ccc04b44 100644
--- a/src/mango/test/15-execution-stats-test.py
+++ b/src/mango/test/15-execution-stats-test.py
@@ -22,7 +22,6 @@ class ExecutionStatsTests(mango.UserDocsTests):
self.assertEqual(len(resp["docs"]), 3)
self.assertEqual(resp["execution_stats"]["total_keys_examined"], 0)
self.assertEqual(resp["execution_stats"]["total_docs_examined"], 3)
- self.assertEqual(resp["execution_stats"]["total_quorum_docs_examined"], 0)
self.assertEqual(resp["execution_stats"]["results_returned"], 3)
# See https://github.com/apache/couchdb/issues/1732
# Erlang os:timestamp() only has ms accuracy on Windows!
@@ -35,12 +34,11 @@ class ExecutionStatsTests(mango.UserDocsTests):
def test_quorum_json_index(self):
resp = self.db.find(
- {"age": {"$lt": 35}}, return_raw=True, r=3, executionStats=True
+ {"age": {"$lt": 35}}, return_raw=True, executionStats=True
)
self.assertEqual(len(resp["docs"]), 3)
self.assertEqual(resp["execution_stats"]["total_keys_examined"], 0)
- self.assertEqual(resp["execution_stats"]["total_docs_examined"], 0)
- self.assertEqual(resp["execution_stats"]["total_quorum_docs_examined"], 3)
+ self.assertEqual(resp["execution_stats"]["total_docs_examined"], 3)
self.assertEqual(resp["execution_stats"]["results_returned"], 3)
# See https://github.com/apache/couchdb/issues/1732
# Erlang os:timestamp() only has ms accuracy on Windows!
@@ -70,7 +68,6 @@ class ExecutionStatsTests_Text(mango.UserDocsTextTests):
self.assertEqual(len(resp["docs"]), 1)
self.assertEqual(resp["execution_stats"]["total_keys_examined"], 0)
self.assertEqual(resp["execution_stats"]["total_docs_examined"], 1)
- self.assertEqual(resp["execution_stats"]["total_quorum_docs_examined"], 0)
self.assertEqual(resp["execution_stats"]["results_returned"], 1)
self.assertGreater(resp["execution_stats"]["execution_time_ms"], 0)
diff --git a/src/mango/test/16-index-selectors-test.py b/src/mango/test/16-index-selectors-test.py
index 4510065f5..cde8438fc 100644
--- a/src/mango/test/16-index-selectors-test.py
+++ b/src/mango/test/16-index-selectors-test.py
@@ -246,6 +246,14 @@ class IndexSelectorJson(mango.DbPerClass):
docs = self.db.find(selector, use_index="oldschooltext")
self.assertEqual(len(docs), 3)
+ def test_text_old_index_not_used(self):
+ selector = {"location": {"$gte": "FRA"}}
+ self.db.save_doc(oldschoolddoctext)
+ resp = self.db.find(selector, explain=True)
+ self.assertEqual(resp["index"]["name"], "_all_docs")
+ docs = self.db.find(selector)
+ self.assertEqual(len(docs), 3)
+
@unittest.skipUnless(mango.has_text_service(), "requires text service")
def test_text_old_selector_still_supported_via_api(self):
selector = {"location": {"$gte": "FRA"}}
diff --git a/src/mango/test/17-multi-type-value-test.py b/src/mango/test/17-multi-type-value-test.py
index 21e7afda4..5a8fcedef 100644
--- a/src/mango/test/17-multi-type-value-test.py
+++ b/src/mango/test/17-multi-type-value-test.py
@@ -53,9 +53,9 @@ class MultiValueFieldTests:
class MultiValueFieldJSONTests(mango.DbPerClass, MultiValueFieldTests):
def setUp(self):
self.db.recreate()
+ self.db.create_index(["name"], wait_for_built_index=False)
+ self.db.create_index(["age", "name"], wait_for_built_index=True)
self.db.save_docs(copy.deepcopy(DOCS))
- self.db.create_index(["name"])
- self.db.create_index(["age", "name"])
# @unittest.skipUnless(mango.has_text_service(), "requires text service")
diff --git a/src/mango/test/19-find-conflicts.py b/src/mango/test/19-find-conflicts.py
index bf865d6ea..3bf3c0693 100644
--- a/src/mango/test/19-find-conflicts.py
+++ b/src/mango/test/19-find-conflicts.py
@@ -12,11 +12,12 @@
import mango
import copy
+import unittest
-DOC = [{"_id": "doc", "a": 2}]
+DOC = [{"_id": "doc", "a": 2}, {"_id": "doc1", "b": 2}]
CONFLICT = [{"_id": "doc", "_rev": "1-23202479633c2b380f79507a776743d5", "a": 1}]
-
+CONFLICT2 = [{"_id": "doc1", "_rev": "1-23202479633c2b380f79507a776743d5", "b": 1}]
class ChooseCorrectIndexForDocs(mango.DbPerClass):
def setUp(self):
@@ -25,7 +26,7 @@ class ChooseCorrectIndexForDocs(mango.DbPerClass):
self.db.save_docs_with_conflicts(copy.deepcopy(CONFLICT))
def test_retrieve_conflicts(self):
- self.db.create_index(["_conflicts"])
+ self.db.create_index(["_conflicts"], wait_for_built_index=False)
result = self.db.find({"_conflicts": {"$exists": True}}, conflicts=True)
self.assertEqual(
result[0]["_conflicts"][0], "1-23202479633c2b380f79507a776743d5"
diff --git a/src/mango/test/20-no-timeout-test.py b/src/mango/test/20-no-timeout-test.py
deleted file mode 100644
index cffdfc335..000000000
--- a/src/mango/test/20-no-timeout-test.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import mango
-import copy
-import unittest
-
-
-class LongRunningMangoTest(mango.DbPerClass):
- def setUp(self):
- self.db.recreate()
- docs = []
- for i in range(100000):
- docs.append({"_id": str(i), "another": "field"})
- if i % 20000 == 0:
- self.db.save_docs(docs)
- docs = []
-
- # This test should run to completion and not timeout
- def test_query_does_not_time_out(self):
- selector = {"_id": {"$gt": 0}, "another": "wrong"}
- docs = self.db.find(selector)
- self.assertEqual(len(docs), 0)
diff --git a/src/mango/test/22-build-wait-selected-index.py b/src/mango/test/22-build-wait-selected-index.py
new file mode 100644
index 000000000..fd856f4d6
--- /dev/null
+++ b/src/mango/test/22-build-wait-selected-index.py
@@ -0,0 +1,50 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import mango
+import copy
+import unittest
+
+
+class BuildAndWaitOnSelectedIndex(mango.DbPerClass):
+ def setUp(self):
+ self.db.recreate()
+ docs = []
+ for i in range(1000):
+ docs.append({"_id": str(i), "val": i})
+ if len(docs) == 250:
+ self.db.save_docs(docs)
+ docs = []
+
+ def test_wait_for_query(self):
+ self.db.create_index(["val"], ddoc="my-ddoc", wait_for_built_index=False)
+
+ explain = self.db.find({'val': {"$gt": 990}}, use_index="my-ddoc", explain=True)
+ self.assertEqual(explain["index"]["ddoc"], "_design/my-ddoc")
+
+ docs = self.db.find({'val': {"$gte": 990}}, limit=10)
+
+ self.assertEqual(len(docs), 10)
+
+ def test_dont_wait(self):
+ self.db.create_index(["val"], ddoc="my-ddoc", wait_for_built_index=False)
+
+ explain = self.db.find({'val': {"$gt": 990}}, explain=True)
+ self.assertEqual(explain["index"]["name"], "_all_docs")
+
+ docs = self.db.find({'val': {"$gte": 990}})
+ self.assertEqual(len(docs), 10)
+
+ def test_update_false(self):
+ self.db.create_index(["val"], ddoc="my-ddoc", wait_for_built_index=False)
+ docs = self.db.find({'val': {"$gte": 990}}, update=False, use_index="my-ddoc")
+ self.assertEqual(docs, [])
diff --git a/src/mango/test/mango.py b/src/mango/test/mango.py
index e78160f57..05c4e65c4 100644
--- a/src/mango/test/mango.py
+++ b/src/mango/test/mango.py
@@ -48,8 +48,8 @@ class Database(object):
dbname,
host="127.0.0.1",
port="15984",
- user="testuser",
- password="testpass",
+ user="adm",
+ password="pass",
):
root_url = get_from_environment("COUCH_HOST", "http://{}:{}".format(host, port))
auth_header = get_from_environment("COUCH_AUTH_HEADER", None)
@@ -139,8 +139,9 @@ class Database(object):
ddoc=None,
partial_filter_selector=None,
selector=None,
+ wait_for_built_index=True,
):
- body = {"index": {"fields": fields}, "type": idx_type, "w": 3}
+ body = {"index": {"fields": fields}, "type": idx_type}
if name is not None:
body["name"] = name
if ddoc is not None:
@@ -156,13 +157,22 @@ class Database(object):
assert r.json()["name"] is not None
created = r.json()["result"] == "created"
- if created:
- # wait until the database reports the index as available
- while len(self.get_index(r.json()["id"], r.json()["name"])) < 1:
- delay(t=0.1)
+ if created and wait_for_built_index:
+ # wait until the database reports the index as available and build
+ while True:
+ idx = self.get_index(r.json()["id"], r.json()["name"])[0]
+ if idx["build_status"] == "ready":
+ break
+ delay(t=0.2)
return created
+ def wait_for_built_indexes(self):
+ while True:
+ if all(idx["build_status"] == "ready" for idx in self.list_indexes()):
+ break
+ delay(t=0.2)
+
def create_text_index(
self,
analyzer=None,
@@ -244,7 +254,6 @@ class Database(object):
skip=0,
sort=None,
fields=None,
- r=1,
conflicts=False,
use_index=None,
explain=False,
@@ -258,7 +267,6 @@ class Database(object):
"use_index": use_index,
"limit": limit,
"skip": skip,
- "r": r,
"conflicts": conflicts,
}
if sort is not None:
diff --git a/src/mango/test/user_docs.py b/src/mango/test/user_docs.py
index 617b430c7..c30198347 100644
--- a/src/mango/test/user_docs.py
+++ b/src/mango/test/user_docs.py
@@ -65,12 +65,11 @@ def teardown_users(db):
def setup(db, index_type="view", **kwargs):
db.recreate()
- db.save_docs(copy.deepcopy(DOCS))
if index_type == "view":
add_view_indexes(db, kwargs)
elif index_type == "text":
add_text_indexes(db, kwargs)
-
+ db.save_docs(copy.deepcopy(DOCS))
def add_view_indexes(db, kwargs):
indexes = [
@@ -94,7 +93,9 @@ def add_view_indexes(db, kwargs):
(["ordered"], "ordered"),
]
for (idx, name) in indexes:
- assert db.create_index(idx, name=name, ddoc=name) is True
+ assert db.create_index(idx, name=name, ddoc=name,
+ wait_for_built_index=False) is True
+ db.wait_for_built_indexes()
def add_text_indexes(db, kwargs):
diff --git a/src/mem3/src/mem3_httpd_handlers.erl b/src/mem3/src/mem3_httpd_handlers.erl
index 7dd6ab052..eeec1edf3 100644
--- a/src/mem3/src/mem3_httpd_handlers.erl
+++ b/src/mem3/src/mem3_httpd_handlers.erl
@@ -12,7 +12,7 @@
-module(mem3_httpd_handlers).
--export([url_handler/1, db_handler/1, design_handler/1]).
+-export([url_handler/1, db_handler/1, design_handler/1, handler_info/3]).
url_handler(<<"_membership">>) -> fun mem3_httpd:handle_membership_req/1;
url_handler(<<"_reshard">>) -> fun mem3_reshard_httpd:handle_reshard_req/1;
@@ -23,3 +23,39 @@ db_handler(<<"_sync_shards">>) -> fun mem3_httpd:handle_sync_req/2;
db_handler(_) -> no_match.
design_handler(_) -> no_match.
+
+handler_info('GET', [<<"_membership">>], _) ->
+ {'cluster.membership.read', #{}};
+
+handler_info('GET', [<<"_reshard">>], _) ->
+ {'reshard.summary.read', #{}};
+
+handler_info('GET', [<<"_reshard">>, <<"state">>], _) ->
+ {'reshard.state.read', #{}};
+
+handler_info('PUT', [<<"_reshard">>, <<"state">>], _) ->
+ {'reshard.state.write', #{}};
+
+handler_info('GET', [<<"_reshard">>, <<"jobs">>], _) ->
+ {'reshard.jobs.read', #{}};
+
+handler_info('POST', [<<"_reshard">>, <<"jobs">>], _) ->
+ {'reshard.jobs.create', #{}};
+
+handler_info('GET', [<<"_reshard">>, <<"jobs">>, JobId], _) ->
+ {'reshard.job.read', #{'job.id' => JobId}};
+
+handler_info('DELETE', [<<"_reshard">>, <<"jobs">>, JobId], _) ->
+ {'reshard.job.delete', #{'job.id' => JobId}};
+
+handler_info('GET', [DbName, <<"_shards">>], _) ->
+ {'db.shards.read', #{'db.name' => DbName}};
+
+handler_info('GET', [DbName, <<"_shards">>, DocId], _) ->
+ {'db.shards.read', #{'db.name' => DbName, 'doc.id' => DocId}};
+
+handler_info('POST', [DbName, <<"_sync_shards">>], _) ->
+ {'db.shards.sync', #{'db.name' => DbName}};
+
+handler_info(_, _, _) ->
+ no_match.
diff --git a/src/mem3/src/mem3_reshard.erl b/src/mem3/src/mem3_reshard.erl
index 620b1bc73..234670c34 100644
--- a/src/mem3/src/mem3_reshard.erl
+++ b/src/mem3/src/mem3_reshard.erl
@@ -213,11 +213,6 @@ reset_state() ->
% Gen server functions
init(_) ->
- % Advertise resharding API feature only if it is not disabled
- case is_disabled() of
- true -> ok;
- false -> config:enable_feature('reshard')
- end,
couch_log:notice("~p start init()", [?MODULE]),
EtsOpts = [named_table, {keypos, #job.id}, {read_concurrency, true}],
?MODULE = ets:new(?MODULE, EtsOpts),
diff --git a/src/setup/src/setup.erl b/src/setup/src/setup.erl
index 4867f6096..b35366f61 100644
--- a/src/setup/src/setup.erl
+++ b/src/setup/src/setup.erl
@@ -165,7 +165,7 @@ enable_cluster_int(Options, false) ->
couch_log:debug("Enable Cluster: ~p~n", [Options]).
set_admin(Username, Password) ->
- config:set("admins", binary_to_list(Username), binary_to_list(Password)).
+ config:set("admins", binary_to_list(Username), binary_to_list(Password), #{sensitive => true}).
setup_node(NewCredentials, NewBindAddress, NodeCount, Port) ->
case NewCredentials of
diff --git a/src/setup/src/setup_httpd.erl b/src/setup/src/setup_httpd.erl
index 949675b6a..48b1b2a5a 100644
--- a/src/setup/src/setup_httpd.erl
+++ b/src/setup/src/setup_httpd.erl
@@ -19,7 +19,7 @@ handle_setup_req(#httpd{method='POST'}=Req) ->
ok = chttpd:verify_is_server_admin(Req),
couch_httpd:validate_ctype(Req, "application/json"),
Setup = get_body(Req),
- couch_log:notice("Setup: ~p~n", [Setup]),
+ couch_log:notice("Setup: ~p~n", [remove_sensitive(Setup)]),
Action = binary_to_list(couch_util:get_value(<<"action">>, Setup, <<"missing">>)),
case handle_action(Action, Setup) of
ok ->
@@ -91,7 +91,7 @@ handle_action("enable_cluster", Setup) ->
handle_action("finish_cluster", Setup) ->
- couch_log:notice("finish_cluster: ~p~n", [Setup]),
+ couch_log:notice("finish_cluster: ~p~n", [remove_sensitive(Setup)]),
Options = get_options([
{ensure_dbs_exist, <<"ensure_dbs_exist">>}
@@ -105,7 +105,7 @@ handle_action("finish_cluster", Setup) ->
end;
handle_action("enable_single_node", Setup) ->
- couch_log:notice("enable_single_node: ~p~n", [Setup]),
+ couch_log:notice("enable_single_node: ~p~n", [remove_sensitive(Setup)]),
Options = get_options([
{ensure_dbs_exist, <<"ensure_dbs_exist">>},
@@ -125,7 +125,7 @@ handle_action("enable_single_node", Setup) ->
handle_action("add_node", Setup) ->
- couch_log:notice("add_node: ~p~n", [Setup]),
+ couch_log:notice("add_node: ~p~n", [remove_sensitive(Setup)]),
Options = get_options([
{username, <<"username">>},
@@ -147,10 +147,10 @@ handle_action("add_node", Setup) ->
end;
handle_action("remove_node", Setup) ->
- couch_log:notice("remove_node: ~p~n", [Setup]);
+ couch_log:notice("remove_node: ~p~n", [remove_sensitive(Setup)]);
handle_action("receive_cookie", Setup) ->
- couch_log:notice("receive_cookie: ~p~n", [Setup]),
+ couch_log:notice("receive_cookie: ~p~n", [remove_sensitive(Setup)]),
Options = get_options([
{cookie, <<"cookie">>}
], Setup),
@@ -173,3 +173,8 @@ get_body(Req) ->
couch_log:notice("Body Fail: ~p~n", [Else]),
couch_httpd:send_error(Req, 400, <<"bad_request">>, <<"Missing JSON body'">>)
end.
+
+remove_sensitive(KVList0) ->
+ KVList1 = lists:keyreplace(<<"username">>, 1, KVList0, {<<"username">>, <<"****">>}),
+ KVList2 = lists:keyreplace(<<"password">>, 1, KVList1, {<<"password">>, <<"****">>}),
+ KVList2. \ No newline at end of file
diff --git a/src/setup/src/setup_httpd_handlers.erl b/src/setup/src/setup_httpd_handlers.erl
index 994c217e8..e26fbc3c4 100644
--- a/src/setup/src/setup_httpd_handlers.erl
+++ b/src/setup/src/setup_httpd_handlers.erl
@@ -12,7 +12,7 @@
-module(setup_httpd_handlers).
--export([url_handler/1, db_handler/1, design_handler/1]).
+-export([url_handler/1, db_handler/1, design_handler/1, handler_info/3]).
url_handler(<<"_cluster_setup">>) -> fun setup_httpd:handle_setup_req/1;
url_handler(_) -> no_match.
@@ -20,3 +20,13 @@ url_handler(_) -> no_match.
db_handler(_) -> no_match.
design_handler(_) -> no_match.
+
+
+handler_info('GET', [<<"_cluster_setup">>], _) ->
+ {'cluster_setup.read', #{}};
+
+handler_info('POST', [<<"_cluster_setup">>], _) ->
+ {'cluster_setup.write', #{}};
+
+handler_info(_, _, _) ->
+ no_match. \ No newline at end of file
diff --git a/test/elixir/lib/couch.ex b/test/elixir/lib/couch.ex
index 7819299cc..094f275a8 100644
--- a/test/elixir/lib/couch.ex
+++ b/test/elixir/lib/couch.ex
@@ -3,7 +3,7 @@ defmodule Couch.Session do
CouchDB session helpers.
"""
- defstruct [:cookie, :error]
+ defstruct [:cookie, :error, :base_url]
def new(cookie, error \\ "") do
%Couch.Session{cookie: cookie, error: error}
@@ -42,12 +42,12 @@ defmodule Couch.Session do
# if the need arises.
def go(%Couch.Session{} = sess, method, url, opts) do
- opts = Keyword.merge(opts, cookie: sess.cookie)
+ opts = Keyword.merge(opts, cookie: sess.cookie, base_url: sess.base_url)
Couch.request(method, url, opts)
end
def go!(%Couch.Session{} = sess, method, url, opts) do
- opts = Keyword.merge(opts, cookie: sess.cookie)
+ opts = Keyword.merge(opts, cookie: sess.cookie, base_url: sess.base_url)
Couch.request!(method, url, opts)
end
end
@@ -71,9 +71,10 @@ defmodule Couch do
url
end
- def process_url(url) do
- base_url = System.get_env("EX_COUCH_URL") || "http://127.0.0.1:15984"
- base_url <> url
+ def process_url(url, options) do
+ (Keyword.get(options, :base_url) <> url)
+ |> prepend_protocol
+ |> append_query_string(options)
end
def process_request_headers(headers, _body, options) do
@@ -96,10 +97,13 @@ defmodule Couch do
end
def process_options(options) do
+ base_url = System.get_env("EX_COUCH_URL") || "http://127.0.0.1:15984"
+ options = Keyword.put_new(options, :base_url, base_url)
+
options
- |> set_auth_options()
- |> set_inactivity_timeout()
- |> set_request_timeout()
+ |> set_auth_options()
+ |> set_inactivity_timeout()
+ |> set_request_timeout()
end
def process_request_body(body) do
@@ -125,7 +129,12 @@ defmodule Couch do
end
def set_auth_options(options) do
- if Keyword.get(options, :cookie) == nil do
+ no_auth? = Keyword.get(options, :no_auth) == true
+ cookie? = Keyword.has_key?(options, :cookie)
+ basic_auth? = Keyword.has_key?(options, :basic_auth)
+ if cookie? or no_auth? or basic_auth? do
+ Keyword.delete(options, :no_auth)
+ else
headers = Keyword.get(options, :headers, [])
if headers[:basic_auth] != nil or headers[:authorization] != nil
or List.keymember?(headers, :"X-Auth-CouchDB-UserName", 0) do
@@ -135,8 +144,6 @@ defmodule Couch do
password = System.get_env("EX_PASSWORD") || "pass"
Keyword.put(options, :basic_auth, {username, password})
end
- else
- options
end
end
@@ -161,17 +168,30 @@ defmodule Couch do
login(user, pass)
end
- def login(user, pass, expect \\ :success) do
- resp = Couch.post("/_session", body: %{:username => user, :password => pass})
+ def login(user, pass, options \\ []) do
+ options = options |> Enum.into(%{})
+
+ base_url =
+ Map.get_lazy(options, :base_url, fn ->
+ System.get_env("EX_COUCH_URL") || "http://127.0.0.1:15984"
+ end)
+
+ resp =
+ Couch.post(
+ "/_session",
+ body: %{:username => user, :password => pass},
+ base_url: base_url,
+ no_auth: true
+ )
- if expect == :success do
+ if Map.get(options, :expect, :success) == :success do
true = resp.body["ok"]
cookie = resp.headers[:"set-cookie"]
[token | _] = String.split(cookie, ";")
- %Couch.Session{cookie: token}
+ %Couch.Session{cookie: token, base_url: base_url}
else
true = Map.has_key?(resp.body, "error")
- %Couch.Session{error: resp.body["error"]}
+ %Couch.Session{error: resp.body["error"], base_url: base_url}
end
end
end
diff --git a/test/elixir/run-only b/test/elixir/run-only
new file mode 100755
index 000000000..7c2a4aeea
--- /dev/null
+++ b/test/elixir/run-only
@@ -0,0 +1,3 @@
+#!/bin/bash -e
+cd "$(dirname "$0")"
+mix test --trace "$@"
diff --git a/test/elixir/test/all_docs_test.exs b/test/elixir/test/all_docs_test.exs
index a091dce55..21c136d39 100644
--- a/test/elixir/test/all_docs_test.exs
+++ b/test/elixir/test/all_docs_test.exs
@@ -41,10 +41,9 @@ defmodule AllDocsTest do
assert resp["total_rows"] == length(rows)
# Check _all_docs offset
- retry_until(fn ->
- resp = Couch.get("/#{db_name}/_all_docs", query: %{:startkey => "\"2\""}).body
- assert resp["offset"] == 2
- end)
+ resp = Couch.get("/#{db_name}/_all_docs", query: %{:startkey => "\"2\""}).body
+ assert resp["offset"] == :null
+ assert Enum.at(resp["rows"], 0)["key"] == "2"
# Confirm that queries may assume raw collation
resp =
@@ -72,11 +71,9 @@ defmodule AllDocsTest do
changes = Couch.get("/#{db_name}/_changes").body["results"]
assert length(changes) == 4
- retry_until(fn ->
- deleted = Enum.filter(changes, fn row -> row["deleted"] end)
- assert length(deleted) == 1
- assert hd(deleted)["id"] == "1"
- end)
+ deleted = Enum.filter(changes, fn row -> row["deleted"] end)
+ assert length(deleted) == 1
+ assert hd(deleted)["id"] == "1"
# (remember old seq)
orig_doc = Enum.find(changes, fn row -> row["id"] == "3" end)
@@ -191,34 +188,36 @@ defmodule AllDocsTest do
test "GET with one key", context do
db_name = context[:db_name]
- {:ok, _} = create_doc(
- db_name,
- %{
- _id: "foo",
- bar: "baz"
- }
- )
+ {:ok, _} =
+ create_doc(
+ db_name,
+ %{
+ _id: "foo",
+ bar: "baz"
+ }
+ )
- {:ok, _} = create_doc(
- db_name,
- %{
- _id: "foo2",
- bar: "baz2"
- }
- )
+ {:ok, _} =
+ create_doc(
+ db_name,
+ %{
+ _id: "foo2",
+ bar: "baz2"
+ }
+ )
- resp = Couch.get(
- "/#{db_name}/_all_docs",
- query: %{
- :key => "\"foo\"",
- }
- )
+ resp =
+ Couch.get(
+ "/#{db_name}/_all_docs",
+ query: %{
+ :key => "\"foo\""
+ }
+ )
assert resp.status_code == 200
assert length(Map.get(resp, :body)["rows"]) == 1
end
-
@tag :with_db
test "POST with empty body", context do
db_name = context[:db_name]
@@ -226,32 +225,123 @@ defmodule AllDocsTest do
resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: create_docs(0..2)})
assert resp.status_code in [201, 202]
- resp = Couch.post(
- "/#{db_name}/_all_docs",
- body: %{}
- )
+ resp =
+ Couch.post(
+ "/#{db_name}/_all_docs",
+ body: %{}
+ )
assert resp.status_code == 200
assert length(Map.get(resp, :body)["rows"]) == 3
end
@tag :with_db
+ test "POST with missing keys", context do
+ db_name = context[:db_name]
+
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: create_docs(0..3)})
+ assert resp.status_code in [201, 202]
+
+ resp =
+ Couch.post(
+ "/#{db_name}/_all_docs",
+ body: %{
+ :keys => [1]
+ }
+ )
+
+ assert resp.status_code == 200
+ rows = resp.body["rows"]
+ assert length(rows) == 1
+ assert hd(rows) == %{"error" => "not_found", "key" => 1}
+ end
+
+ @tag :with_db
test "POST with keys and limit", context do
db_name = context[:db_name]
resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: create_docs(0..3)})
assert resp.status_code in [201, 202]
- resp = Couch.post(
- "/#{db_name}/_all_docs",
- body: %{
- :keys => [1, 2],
- :limit => 1
+ resp =
+ Couch.post(
+ "/#{db_name}/_all_docs",
+ body: %{
+ :keys => ["1", "2"],
+ :limit => 1,
+ :include_docs => true
+ }
+ )
+
+ assert resp.status_code == 200
+ rows = resp.body["rows"]
+ assert length(rows) == 1
+ doc = hd(rows)["doc"]
+ assert doc["string"] == "1"
+ end
+
+ @tag :with_db
+ test "_local_docs POST with keys and limit", context do
+ expected = [
+ %{
+ "doc" => %{"_id" => "_local/one", "_rev" => "0-1", "value" => "one"},
+ "id" => "_local/one",
+ "key" => "_local/one",
+ "value" => %{"rev" => "0-1"}
+ },
+ %{
+ "doc" => %{"_id" => "_local/two", "_rev" => "0-1", "value" => "two"},
+ "id" => "_local/two",
+ "key" => "_local/two",
+ "value" => %{"rev" => "0-1"}
+ },
+ %{
+ "doc" => %{
+ "_id" => "three",
+ "_rev" => "1-878d3724976748bc881841046a276ceb",
+ "value" => "three"
+ },
+ "id" => "three",
+ "key" => "three",
+ "value" => %{"rev" => "1-878d3724976748bc881841046a276ceb"}
+ },
+ %{"error" => "not_found", "key" => "missing"},
+ %{"error" => "not_found", "key" => "_local/missing"}
+ ]
+
+ db_name = context[:db_name]
+
+ docs = [
+ %{
+ _id: "_local/one",
+ value: "one"
+ },
+ %{
+ _id: "_local/two",
+ value: "two"
+ },
+ %{
+ _id: "three",
+ value: "three"
}
- )
+ ]
+
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: docs})
+ assert resp.status_code in [201, 202]
+
+ resp =
+ Couch.post(
+ "/#{db_name}/_all_docs",
+ body: %{
+ :keys => ["_local/one", "_local/two", "three", "missing", "_local/missing"],
+ :include_docs => true
+ }
+ )
assert resp.status_code == 200
- assert length(Map.get(resp, :body)["rows"]) == 1
+ rows = resp.body["rows"]
+ assert length(rows) == 5
+ assert rows == expected
end
@tag :with_db
@@ -261,15 +351,16 @@ defmodule AllDocsTest do
resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: create_docs(0..3)})
assert resp.status_code in [201, 202]
- resp = Couch.post(
- "/#{db_name}/_all_docs",
- query: %{
- :limit => 1
- },
- body: %{
- :keys => [1, 2]
- }
- )
+ resp =
+ Couch.post(
+ "/#{db_name}/_all_docs",
+ query: %{
+ :limit => 1
+ },
+ body: %{
+ :keys => [1, 2]
+ }
+ )
assert resp.status_code == 200
assert length(Map.get(resp, :body)["rows"]) == 1
@@ -282,18 +373,57 @@ defmodule AllDocsTest do
resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: create_docs(0..3)})
assert resp.status_code in [201, 202]
- resp = Couch.post(
- "/#{db_name}/_all_docs",
- query: %{
- :limit => 1
- },
- body: %{
- :keys => [1, 2],
- :limit => 2
- }
- )
+ resp =
+ Couch.post(
+ "/#{db_name}/_all_docs",
+ query: %{
+ :limit => 1
+ },
+ body: %{
+ :keys => [1, 2],
+ :limit => 2
+ }
+ )
assert resp.status_code == 200
assert length(Map.get(resp, :body)["rows"]) == 1
end
+
+ @tag :with_db
+ test "all_docs ordering", context do
+ db_name = context[:db_name]
+
+ docs = [
+ %{:_id => "a"},
+ %{:_id => "m"},
+ %{:_id => "z"}
+ ]
+
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs})
+ Enum.each(resp.body, &assert(&1["ok"]))
+
+ resp = Couch.get("/#{db_name}/_all_docs", query: %{:startkey => false}).body
+ rows = resp["rows"]
+ assert length(rows) === 3
+ assert get_ids(resp) == ["a", "m", "z"]
+
+ resp = Couch.get("/#{db_name}/_all_docs", query: %{:startkey => 0}).body
+ rows = resp["rows"]
+ assert length(rows) === 3
+ assert get_ids(resp) == ["a", "m", "z"]
+
+ resp = Couch.get("/#{db_name}/_all_docs", query: %{:startkey => "[1,2]"}).body
+ rows = resp["rows"]
+ assert length(rows) === 3
+ assert get_ids(resp) == ["a", "m", "z"]
+
+ resp = Couch.get("/#{db_name}/_all_docs", query: %{:end_key => 0}).body
+ rows = resp["rows"]
+ assert length(rows) === 0
+ end
+
+ defp get_ids(resp) do
+ %{"rows" => rows} = resp
+ Enum.map(rows, fn row -> row["id"] end)
+ end
end
diff --git a/test/elixir/test/auth_cache_test.exs b/test/elixir/test/auth_cache_test.exs
index 8b7c29c71..26d4c6ebe 100644
--- a/test/elixir/test/auth_cache_test.exs
+++ b/test/elixir/test/auth_cache_test.exs
@@ -56,7 +56,7 @@ defmodule AuthCacheTest do
end
defp login_fail(user, password) do
- resp = Couch.login(user, password, :fail)
+ resp = Couch.login(user, password, expect: :fail)
assert resp.error, "Login error is expected."
end
diff --git a/test/elixir/test/basics_test.exs b/test/elixir/test/basics_test.exs
index a03fa2922..f4f95689a 100644
--- a/test/elixir/test/basics_test.exs
+++ b/test/elixir/test/basics_test.exs
@@ -18,6 +18,12 @@ defmodule BasicsTest do
assert Couch.get("/").body["couchdb"] == "Welcome", "Should say welcome"
end
+ test "Ready endpoint" do
+ resp = Couch.get("/_up")
+ assert resp.status_code == 200
+ assert resp.body["status"] == "ok"
+ end
+
@tag :with_db
test "PUT on existing DB should return 412 instead of 500", context do
db_name = context[:db_name]
@@ -45,6 +51,13 @@ defmodule BasicsTest do
{:ok, _} = delete_db(db_name)
end
+ test "Exceeding configured DB name size limit returns an error" do
+ db_name = String.duplicate("x", 239)
+ resp = Couch.put("/#{db_name}")
+ assert resp.status_code == 400
+ assert resp.body["error"] == "database_name_too_long"
+ end
+
@tag :with_db
test "Created database has appropriate db info name", context do
db_name = context[:db_name]
@@ -100,7 +113,7 @@ defmodule BasicsTest do
db_name = context[:db_name]
{:ok, _} = create_doc(db_name, sample_doc_foo())
resp = Couch.get("/#{db_name}/foo", query: %{:local_seq => true})
- assert resp.body["_local_seq"] == 1, "Local seq value == 1"
+ assert is_binary(resp.body["_local_seq"]), "Local seq value is a binary"
end
@tag :with_db
@@ -178,21 +191,33 @@ defmodule BasicsTest do
assert Couch.get("/#{db_name}").body["doc_count"] == 8
+ # Disabling until we figure out reduce functions
+ # # Test reduce function
+ # resp = Couch.get("/#{db_name}/_design/bar/_view/baz")
+ # assert hd(resp.body["rows"])["value"] == 33
+
# Test reduce function
- resp = Couch.get("/#{db_name}/_design/bar/_view/baz")
- assert hd(resp.body["rows"])["value"] == 33
+ resp = Couch.get("/#{db_name}/_design/bar/_view/baz", query: %{:reduce => false})
+ assert resp.body["total_rows"] == 3
# Delete doc and test for updated view results
doc0 = Couch.get("/#{db_name}/0").body
assert Couch.delete("/#{db_name}/0?rev=#{doc0["_rev"]}").body["ok"]
- retry_until(fn ->
- Couch.get("/#{db_name}/_design/foo/_view/baz").body["total_rows"] == 2
- end)
+ # Disabling until we figure out reduce functions
+ # retry_until(fn ->
+ # Couch.get("/#{db_name}/_design/foo/_view/baz").body["total_rows"] == 2
+ # end)
+
+ resp = Couch.get("/#{db_name}/_design/bar/_view/baz", query: %{:reduce => false})
+ assert resp.body["total_rows"] == 2
assert Couch.get("/#{db_name}").body["doc_count"] == 7
assert Couch.get("/#{db_name}/0").status_code == 404
- refute Couch.get("/#{db_name}/0?rev=#{doc0["_rev"]}").status_code == 404
+
+ # No longer true. Old revisions are not stored after
+ # an update.
+ # refute Couch.get("/#{db_name}/0?rev=#{doc0["_rev"]}").status_code == 404
end
@tag :with_db
@@ -304,4 +329,192 @@ defmodule BasicsTest do
# TODO
assert true
end
+
+ @tag :with_db
+ test "_all_docs/queries works", context do
+ db_name = context[:db_name]
+
+ resp = Couch.post("/#{db_name}/_all_docs/queries", body: %{:queries => []})
+ assert resp.status_code == 200
+ assert resp.body["results"] == []
+
+ assert Couch.put("/#{db_name}/doc1", body: %{:a => 1}).body["ok"]
+
+ body = %{
+ :queries => [
+ %{:limit => 1},
+ %{:limit => 0}
+ ]
+ }
+ resp = Couch.post("/#{db_name}/_all_docs/queries", body: body)
+ assert resp.status_code == 200
+
+ assert Map.has_key?(resp.body, "results")
+ results = Enum.sort(resp.body["results"])
+ assert length(results) == 2
+ [res1, res2] = results
+
+ assert res1 == %{"offset" => :null, "rows" => [], "total_rows" => 1}
+
+ assert res2["offset"] == :null
+ assert res2["total_rows"] == 1
+ rows = res2["rows"]
+
+ assert length(rows) == 1
+ [row] = rows
+ assert row["id"] == "doc1"
+ assert row["key"] == "doc1"
+
+ val = row["value"]
+ assert Map.has_key?(val, "rev")
+ end
+
+ @tag :with_db
+ test "_design_docs works", context do
+ db_name = context[:db_name]
+ body = %{:a => 1}
+
+ resp = Couch.get("/#{db_name}/_design_docs")
+ assert resp.status_code == 200
+ assert resp.body == %{"offset" => :null, "rows" => [], "total_rows" => 0}
+
+ assert Couch.put("/#{db_name}/doc1", body: body).body["ok"]
+
+ # Make sure regular documents didn't get picked up
+ resp = Couch.get("/#{db_name}/_design_docs")
+ assert resp.status_code == 200
+ assert resp.body == %{"offset" => :null, "rows" => [], "total_rows" => 0}
+
+ # Add _design/doc1
+ assert Couch.put("/#{db_name}/_design/doc1", body: body).body["ok"]
+ resp = Couch.get("/#{db_name}/_design_docs")
+ assert resp.status_code == 200
+ assert resp.body["total_rows"] == 1
+ [row] = resp.body["rows"]
+
+ assert row["id"] == "_design/doc1"
+ assert row["key"] == "_design/doc1"
+
+ val = row["value"]
+ assert Map.has_key?(val, "rev")
+
+ # Add _design/doc5
+ assert Couch.put("/#{db_name}/_design/doc5", body: body).body["ok"]
+ resp = Couch.get("/#{db_name}/_design_docs")
+ assert resp.status_code == 200
+ [row1, row2] = resp.body["rows"]
+ assert row1["id"] == "_design/doc1"
+ assert row2["id"] == "_design/doc5"
+
+ # descending=true
+ resp = Couch.get("/#{db_name}/_design_docs?descending=true")
+ assert resp.status_code == 200
+ [row1, row2] = resp.body["rows"]
+ assert row1["id"] == "_design/doc5"
+ assert row2["id"] == "_design/doc1"
+
+ # start_key=doc2
+ resp = Couch.get("/#{db_name}/_design_docs?start_key=\"_design/doc2\"")
+ assert resp.status_code == 200
+ [row] = resp.body["rows"]
+ assert row["id"] == "_design/doc5"
+
+ # end_key=doc2
+ resp = Couch.get("/#{db_name}/_design_docs?end_key=\"_design/doc2\"")
+ assert resp.status_code == 200
+ [row] = resp.body["rows"]
+ assert row["id"] == "_design/doc1"
+
+ # inclusive_end=false
+ qstr = "start_key=\"_design/doc2\"&end_key=\"_design/doc5\"&inclusive_end=false"
+ resp = Couch.get("/#{db_name}/_design_docs?" <> qstr)
+ assert resp.status_code == 200
+ assert resp.body == %{"offset" => :null, "rows" => [], "total_rows" => 2}
+
+ # update_seq=true
+ resp = Couch.get("/#{db_name}/_design_docs?update_seq=true")
+ assert resp.status_code == 200
+ assert Map.has_key?(resp.body, "update_seq")
+ end
+
+ @tag :with_db
+ test "_local_docs works", context do
+ db_name = context[:db_name]
+ body = %{:a => 1}
+
+ resp = Couch.get("/#{db_name}/_local_docs")
+ assert resp.status_code == 200
+ assert resp.body == %{"offset" => :null, "rows" => [], "total_rows" => 0}
+
+ # Add _local/doc1
+ assert Couch.put("/#{db_name}/_local/doc1", body: body).body["ok"]
+ resp = Couch.get("/#{db_name}/_local_docs")
+ assert resp.status_code == 200
+ assert resp.body["total_rows"] == 1
+ [row] = resp.body["rows"]
+
+ assert row["id"] == "_local/doc1"
+ assert row["key"] == "_local/doc1"
+
+ val = row["value"]
+ assert Map.has_key?(val, "rev")
+
+ # Add _local/doc5
+ # Use a body > 100Kb to tests local docs chunkifier
+ body = %{:b => String.duplicate("b", 110_000)}
+ assert Couch.put("/#{db_name}/_local/doc5", body: body).body["ok"]
+ resp = Couch.get("/#{db_name}/_local_docs")
+ assert resp.status_code == 200
+ [row1, row2] = resp.body["rows"]
+ assert row1["id"] == "_local/doc1"
+ assert row2["id"] == "_local/doc5"
+
+ # descending=true
+ resp = Couch.get("/#{db_name}/_local_docs?descending=true")
+ assert resp.status_code == 200
+ [row1, row2] = resp.body["rows"]
+ assert row1["id"] == "_local/doc5"
+ assert row2["id"] == "_local/doc1"
+
+ # start_key=doc2
+ resp = Couch.get("/#{db_name}/_local_docs?start_key=\"_local/doc2\"")
+ assert resp.status_code == 200
+ [row] = resp.body["rows"]
+ assert row["id"] == "_local/doc5"
+
+ # end_key=doc2
+ resp = Couch.get("/#{db_name}/_local_docs?end_key=\"_local/doc2\"")
+ assert resp.status_code == 200
+ [row] = resp.body["rows"]
+ assert row["id"] == "_local/doc1"
+
+ # inclusive_end=false
+ qstr = "start_key=\"_local/doc2\"&end_key=\"_local/doc5\"&inclusive_end=false"
+ resp = Couch.get("/#{db_name}/_local_docs?" <> qstr)
+ assert resp.status_code == 200
+ assert resp.body == %{"offset" => :null, "rows" => [], "total_rows" => 2}
+
+ # update_seq=true
+ resp = Couch.get("/#{db_name}/_local_docs?update_seq=true")
+ assert resp.status_code == 200
+ assert Map.has_key?(resp.body, "update_seq")
+ end
+
+ @tag :with_db
+ test "Check _revs_limit", context do
+ db_name = context[:db_name]
+
+ resp = Couch.get("/#{db_name}/_revs_limit")
+ assert resp.status_code == 200
+ assert resp.body == 1000
+
+ body = "999"
+ resp = Couch.put("/#{db_name}/_revs_limit", body: "999")
+ assert resp.status_code == 200
+ assert resp.body["ok"] == true
+
+ resp = Couch.get("/#{db_name}/_revs_limit")
+ assert resp.status_code == 200
+ assert resp.body == 999
+ end
end
diff --git a/test/elixir/test/map_test.exs b/test/elixir/test/map_test.exs
new file mode 100644
index 000000000..9254cc4c3
--- /dev/null
+++ b/test/elixir/test/map_test.exs
@@ -0,0 +1,627 @@
+defmodule ViewMapTest do
+ use CouchTestCase
+
+ @moduledoc """
+ Test Map functionality for views
+ """
+ def get_ids(resp) do
+ %{:body => %{"rows" => rows}} = resp
+ Enum.map(rows, fn row -> row["id"] end)
+ end
+
+ def get_keys(resp) do
+ %{:body => %{"rows" => rows}} = resp
+ Enum.map(rows, fn row -> row["key"] end)
+ end
+
+ defp create_map_docs(db_name) do
+ docs =
+ for i <- 1..10 do
+ group =
+ if rem(i, 3) == 0 do
+ "one"
+ else
+ "two"
+ end
+
+ %{
+ :_id => "doc-id-#{i}",
+ :value => i,
+ :some => "field",
+ :group => group
+ }
+ end
+
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs, :w => 3})
+ assert resp.status_code == 201
+ end
+
+ setup do
+ db_name = random_db_name()
+ {:ok, _} = create_db(db_name)
+ on_exit(fn -> delete_db(db_name) end)
+
+ create_map_docs(db_name)
+
+ map_fun1 = """
+ function(doc) {
+ if (doc.some) {
+ emit(doc.value , doc.value);
+ }
+
+ if (doc._id.indexOf("_design") > -1) {
+ emit(0, "ddoc")
+ }
+ }
+ """
+
+ map_fun2 = """
+ function(doc) {
+ if (doc.group) {
+ emit([doc.some, doc.group], 1);
+ }
+ }
+ """
+
+ map_fun3 = """
+ function(doc) {
+ if (doc.group) {
+ emit(doc.group, 1);
+ }
+ }
+ """
+
+ body = %{
+ :w => 3,
+ :docs => [
+ %{
+ _id: "_design/map",
+ views: %{
+ some: %{map: map_fun1},
+ map_some: %{map: map_fun2},
+ map_group: %{map: map_fun3}
+ }
+ },
+ %{
+ _id: "_design/include_ddocs",
+ views: %{some: %{map: map_fun1}},
+ options: %{include_design: true}
+ }
+ ]
+ }
+
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: body)
+ Enum.each(resp.body, &assert(&1["ok"]))
+
+ {:ok, [db_name: db_name]}
+ end
+
+ def get_reduce_result(resp) do
+ %{:body => %{"rows" => rows}} = resp
+ rows
+ end
+
+ test "query returns docs", context do
+ db_name = context[:db_name]
+
+ url = "/#{db_name}/_design/map/_view/some"
+ resp = Couch.get(url)
+ assert resp.status_code == 200
+
+ ids = get_ids(resp)
+
+ assert ids == [
+ "doc-id-1",
+ "doc-id-2",
+ "doc-id-3",
+ "doc-id-4",
+ "doc-id-5",
+ "doc-id-6",
+ "doc-id-7",
+ "doc-id-8",
+ "doc-id-9",
+ "doc-id-10"
+ ]
+
+ url = "/#{db_name}/_design/map/_view/map_some"
+ resp = Couch.get(url)
+ assert resp.status_code == 200
+
+ ids = get_ids(resp)
+
+ assert ids == [
+ "doc-id-3",
+ "doc-id-6",
+ "doc-id-9",
+ "doc-id-1",
+ "doc-id-10",
+ "doc-id-2",
+ "doc-id-4",
+ "doc-id-5",
+ "doc-id-7",
+ "doc-id-8"
+ ]
+ end
+
+ test "updated docs rebuilds index", context do
+ db_name = context[:db_name]
+
+ url = "/#{db_name}/_design/map/_view/some"
+ resp = Couch.get(url)
+ assert resp.status_code == 200
+ ids = get_ids(resp)
+
+ assert ids == [
+ "doc-id-1",
+ "doc-id-2",
+ "doc-id-3",
+ "doc-id-4",
+ "doc-id-5",
+ "doc-id-6",
+ "doc-id-7",
+ "doc-id-8",
+ "doc-id-9",
+ "doc-id-10"
+ ]
+
+ update_doc_value(db_name, "doc-id-5", 0)
+ update_doc_value(db_name, "doc-id-6", 100)
+
+ resp = Couch.get("/#{db_name}/doc-id-3")
+ doc3 = convert(resp.body)
+ resp = Couch.delete("/#{db_name}/#{doc3["_id"]}", query: %{rev: doc3["_rev"]})
+ assert resp.status_code == 200
+ #
+ resp = Couch.get("/#{db_name}/doc-id-4")
+ doc4 = convert(resp.body)
+ doc4 = Map.delete(doc4, "some")
+ resp = Couch.put("/#{db_name}/#{doc4["_id"]}", body: doc4)
+ assert resp.status_code == 201
+ #
+ resp = Couch.get("/#{db_name}/doc-id-1")
+ doc1 = convert(resp.body)
+ doc1 = Map.put(doc1, "another", "value")
+ resp = Couch.put("/#{db_name}/#{doc1["_id"]}", body: doc1)
+ assert resp.status_code == 201
+
+ url = "/#{db_name}/_design/map/_view/some"
+ resp = Couch.get(url)
+ assert resp.status_code == 200
+ ids = get_ids(resp)
+
+ assert ids == [
+ "doc-id-5",
+ "doc-id-1",
+ "doc-id-2",
+ "doc-id-7",
+ "doc-id-8",
+ "doc-id-9",
+ "doc-id-10",
+ "doc-id-6"
+ ]
+ end
+
+ test "can index design docs", context do
+ db_name = context[:db_name]
+
+ url = "/#{db_name}/_design/include_ddocs/_view/some"
+ resp = Couch.get(url, query: %{limit: 3})
+ assert resp.status_code == 200
+ ids = get_ids(resp)
+
+ assert ids == ["_design/include_ddocs", "_design/map", "doc-id-1"]
+ end
+
+ test "can use key in query string", context do
+ db_name = context[:db_name]
+
+ url = "/#{db_name}/_design/map/_view/map_group"
+ resp = Couch.get(url, query: %{limit: 3, key: "\"one\""})
+ assert resp.status_code == 200
+ ids = get_ids(resp)
+ assert ids == ["doc-id-3", "doc-id-6", "doc-id-9"]
+
+ resp =
+ Couch.get(url,
+ query: %{
+ limit: 3,
+ key: "\"one\"",
+ descending: true
+ }
+ )
+
+ assert resp.status_code == 200
+ ids = get_ids(resp)
+ assert ids == ["doc-id-9", "doc-id-6", "doc-id-3"]
+ end
+
+ test "can use keys in query string", context do
+ db_name = context[:db_name]
+
+ url = "/#{db_name}/_design/map/_view/some"
+ resp = Couch.post(url, body: %{keys: [6, 3, 9]})
+ assert resp.status_code == 200
+ ids = get_ids(resp)
+ assert ids == ["doc-id-6", "doc-id-3", "doc-id-9"]
+
+ # should ignore descending = true
+ resp = Couch.post(url, body: %{keys: [6, 3, 9], descending: true})
+ assert resp.status_code == 200
+ ids = get_ids(resp)
+ assert ids == ["doc-id-6", "doc-id-3", "doc-id-9"]
+ end
+
+ test "inclusive = false", context do
+ db_name = context[:db_name]
+
+ docs = [
+ %{key: "key1"},
+ %{key: "key2"},
+ %{key: "key3"},
+ %{key: "key4"},
+ %{key: "key4"},
+ %{key: "key5"},
+ %{
+ _id: "_design/inclusive",
+ views: %{
+ by_key: %{
+ map: """
+ function (doc) {
+ if (doc.key) {
+ emit(doc.key, doc);
+ }
+ }
+ """
+ }
+ }
+ }
+ ]
+
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs, :w => 3})
+ assert resp.status_code == 201
+ url = "/#{db_name}/_design/inclusive/_view/by_key"
+
+ query = %{
+ endkey: "\"key4\"",
+ inclusive_end: false
+ }
+
+ resp = Couch.get(url, query: query)
+ assert resp.status_code == 200
+ keys = get_keys(resp)
+ assert keys == ["key1", "key2", "key3"]
+
+ query = %{
+ startkey: "\"key3\"",
+ endkey: "\"key4\"",
+ inclusive_end: false
+ }
+
+ resp = Couch.get(url, query: query)
+ assert resp.status_code == 200
+ keys = get_keys(resp)
+ assert keys == ["key3"]
+
+ query = %{
+ startkey: "\"key4\"",
+ endkey: "\"key1\"",
+ inclusive_end: false,
+ descending: true
+ }
+
+ resp = Couch.get(url, query: query)
+ assert resp.status_code == 200
+ keys = get_keys(resp)
+ assert keys == ["key4", "key4", "key3", "key2"]
+ end
+
+ test "supports linked documents", context do
+ db_name = context[:db_name]
+
+ docs = [
+ %{_id: "mydoc", foo: "bar"},
+ %{_id: "join-doc", doc_id: "mydoc"},
+ %{
+ _id: "_design/join",
+ views: %{
+ by_doc_id: %{
+ map: """
+ function (doc) {
+ if (doc.doc_id) {
+ emit(doc._id, {_id: doc.doc_id});
+ }
+ }
+ """
+ }
+ }
+ }
+ ]
+
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs, :w => 3})
+ assert resp.status_code == 201
+
+ url = "/#{db_name}/_design/join/_view/by_doc_id"
+ resp = Couch.get(url)
+ assert resp.status_code == 200
+ %{:body => %{"rows" => [row]}} = resp
+
+ assert row == %{
+ "id" => "join-doc",
+ "key" => "join-doc",
+ "value" => %{"_id" => "mydoc"}
+ }
+
+ url = "/#{db_name}/_design/join/_view/by_doc_id"
+ resp = Couch.get(url, query: %{include_docs: true})
+ assert resp.status_code == 200
+ %{:body => %{"rows" => [doc]}} = resp
+
+ assert doc["id"] == "join-doc"
+ assert doc["doc"]["_id"] == "mydoc"
+ end
+
+ test "bad range returns error", context do
+ db_name = context[:db_name]
+
+ url = "/#{db_name}/_design/map/_view/some"
+ resp = Couch.get(url, query: %{startkey: "5", endkey: "4"})
+ assert resp.status_code == 400
+ %{:body => %{"error" => error}} = resp
+ assert error == "query_parse_error"
+ end
+
+ test "multiple emits in correct value order", context do
+ db_name = context[:db_name]
+
+ docs = [
+ %{_id: "doc1", foo: "foo", bar: "bar"},
+ %{_id: "doc2", foo: "foo", bar: "bar"},
+ %{
+ _id: "_design/emit",
+ views: %{
+ multiple_emit: %{
+ map: """
+ function (doc) {
+ if (!doc.foo) {
+ return;
+ }
+ emit(doc.foo);
+ emit(doc.bar);
+ emit(doc.foo);
+ emit(doc.bar, 'multiple values!');
+ emit(doc.bar, 'crayon!');
+ }
+ """
+ }
+ }
+ }
+ ]
+
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs, :w => 3})
+ assert resp.status_code == 201
+
+ url = "/#{db_name}/_design/emit/_view/multiple_emit"
+ resp = Couch.post(url, body: %{keys: ["foo", "bar"]})
+ assert resp.status_code == 200
+ %{:body => %{"rows" => rows}} = resp
+
+ assert Enum.at(rows, 0)["key"] == "foo"
+ assert Enum.at(rows, 0)["id"] == "doc1"
+ assert Enum.at(rows, 1)["key"] == "foo"
+ assert Enum.at(rows, 1)["id"] == "doc1"
+
+ assert Enum.at(rows, 2)["key"] == "foo"
+ assert Enum.at(rows, 2)["id"] == "doc2"
+ assert Enum.at(rows, 3)["key"] == "foo"
+ assert Enum.at(rows, 3)["id"] == "doc2"
+
+ assert Enum.at(rows, 4)["key"] == "bar"
+ assert Enum.at(rows, 4)["id"] == "doc1"
+ assert Enum.at(rows, 4)["value"] == :null
+ assert Enum.at(rows, 5)["key"] == "bar"
+ assert Enum.at(rows, 5)["id"] == "doc1"
+ assert Enum.at(rows, 5)["value"] == "crayon!"
+ assert Enum.at(rows, 6)["key"] == "bar"
+ assert Enum.at(rows, 6)["id"] == "doc1"
+ assert Enum.at(rows, 6)["value"] == "multiple values!"
+
+ assert Enum.at(rows, 7)["key"] == "bar"
+ assert Enum.at(rows, 7)["id"] == "doc2"
+ assert Enum.at(rows, 7)["value"] == :null
+ assert Enum.at(rows, 8)["key"] == "bar"
+ assert Enum.at(rows, 8)["id"] == "doc2"
+ assert Enum.at(rows, 8)["value"] == "crayon!"
+ assert Enum.at(rows, 9)["key"] == "bar"
+ assert Enum.at(rows, 9)["id"] == "doc2"
+ assert Enum.at(rows, 9)["value"] == "multiple values!"
+ end
+
+ test "can do design doc swap", context do
+ db_name = context[:db_name]
+
+ docs = [
+ %{_id: "doc1", foo: "foo", bar: "bar"},
+ %{
+ _id: "_design/view1",
+ views: %{
+ view: %{
+ map: """
+ function (doc) {
+ if (!doc.foo) {
+ return;
+ }
+ emit(doc.foo);
+ }
+ """
+ }
+ }
+ },
+ %{
+ _id: "_design/view2",
+ views: %{
+ view: %{
+ map: """
+ function (doc) {
+ if (!doc.bar) {
+ return;
+ }
+ emit(doc.bar);
+ }
+ """
+ }
+ }
+ }
+ ]
+
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs})
+ assert resp.status_code == 201
+
+ url1 = "/#{db_name}/_design/view1/_view/view"
+ url2 = "/#{db_name}/_design/view2/_view/view"
+
+ resp = Couch.get(url1)
+ assert resp.status_code == 200
+ keys = get_keys(resp)
+ assert keys == ["foo"]
+
+ resp = Couch.get(url2)
+ assert resp.status_code == 200
+ keys = get_keys(resp)
+ assert keys == ["bar"]
+
+ view1 = Couch.get("/#{db_name}/_design/view1")
+ view2 = Couch.get("/#{db_name}/_design/view2")
+
+ new_view1 = Map.replace!(view1.body, "views", view2.body["views"])
+
+ resp = Couch.put("/#{db_name}/_design/view1", body: new_view1)
+ assert resp.status_code in [201, 202]
+
+ resp = Couch.get(url1, query: %{update: false})
+ assert resp.status_code == 200
+ keys = get_keys(resp)
+ assert keys == ["bar"]
+ end
+
+ test "send error for failed indexing", context do
+ db_name = context[:db_name]
+
+ docs = [
+ %{_id: "doc1", foo: "foo", bar: "bar"},
+ %{
+ _id: "_design/view1",
+ views: %{
+ view: %{
+ map: """
+ function (doc) {
+ for (var i=0; i<10000; i++) {
+ emit({doc: doc._id + 1}, doc._id);
+ }
+ }
+ """
+ }
+ }
+ }
+ ]
+
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs})
+ assert resp.status_code == 201
+
+ url = "/#{db_name}/_design/view1/_view/view"
+
+ resp = Couch.get(url, timeout: 500_000)
+ assert resp.status_code == 500
+ %{:body => %{"error" => error}} = resp
+ assert error == "foundationdb_error"
+ end
+
+ test "descending=true query with startkey_docid", context do
+ db_name = context[:db_name]
+
+ url = "/#{db_name}/_design/map/_view/some"
+
+ resp =
+ Couch.get(url,
+ query: %{descending: true, startkey: 8, startkey_docid: "doc-id-8", limit: 3}
+ )
+
+ ids = get_ids(resp)
+
+ assert resp.status_code == 200
+ assert ids == ["doc-id-8", "doc-id-7", "doc-id-6"]
+ end
+
+ test "_conflict is supported", context do
+ db_name = context[:db_name]
+ conflict = %{
+ :_id => "doc-id-1",
+ :value => 10,
+ :some => "field",
+ :group => false,
+ :_rev => "1-7cc2eea421141064893681a1582148d8"
+ }
+ ddoc = %{
+ _id: "_design/conflicts",
+ views: %{
+ view: %{
+ map: """
+ function (doc) {
+ if (!doc._conflicts) {
+ return;
+ }
+ emit(doc._id, doc._conflicts);
+ }
+ """
+ }
+ }
+ }
+
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => [ddoc]})
+ assert resp.status_code == 201
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => [conflict], :new_edits => false})
+ assert resp.status_code == 201
+
+ url = "/#{db_name}/_design/conflicts/_view/view"
+ resp = Couch.get(url)
+ assert get_ids(resp) == ["doc-id-1"]
+ end
+
+ test "_local_seq is supported", context do
+ db_name = context[:db_name]
+ ddoc = %{
+ _id: "_design/local_seq",
+ views: %{
+ view: %{
+ map: """
+ function (doc) {
+ emit(doc._local_seq, doc._id);
+ }
+ """
+ }
+ },
+ options: %{
+ local_seq: true
+ }
+ }
+
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => [ddoc]})
+ assert resp.status_code == 201
+
+ url = "/#{db_name}/_design/local_seq/_view/view"
+ resp = Couch.get(url, query: %{limit: 1})
+ key = Enum.at(resp.body["rows"], 0)["key"]
+ assert key != :null
+ end
+
+ def update_doc_value(db_name, id, value) do
+ resp = Couch.get("/#{db_name}/#{id}")
+ doc = convert(resp.body)
+ doc = Map.put(doc, "value", value)
+ resp = Couch.put("/#{db_name}/#{id}", body: doc)
+ assert resp.status_code == 201
+ end
+
+ def convert(value) do
+ :jiffy.decode(:jiffy.encode(value), [:return_maps])
+ end
+end
diff --git a/test/elixir/test/partition_all_docs_test.exs b/test/elixir/test/partition_all_docs_test.exs
deleted file mode 100644
index 816a8d6ed..000000000
--- a/test/elixir/test/partition_all_docs_test.exs
+++ /dev/null
@@ -1,204 +0,0 @@
-defmodule PartitionAllDocsTest do
- use CouchTestCase
- import PartitionHelpers
-
- @moduledoc """
- Test Partition functionality for for all_docs
- """
-
- setup_all do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name, query: %{partitioned: true, q: 1})
- on_exit(fn -> delete_db(db_name) end)
-
- create_partition_docs(db_name)
-
- {:ok, [db_name: db_name]}
- end
-
- test "all_docs with partitioned:true returns partitioned fields", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_all_docs"
- resp = Couch.get(url)
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert Enum.dedup(partitions) == ["foo"]
-
- url = "/#{db_name}/_partition/bar/_all_docs"
- resp = Couch.get(url)
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert Enum.dedup(partitions) == ["bar"]
- end
-
- test "partition all_docs errors with incorrect partition supplied", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/_bar/_all_docs"
- resp = Couch.get(url)
- assert resp.status_code == 400
-
- url = "/#{db_name}/_partition//_all_docs"
- resp = Couch.get(url)
- assert resp.status_code == 400
- end
-
- test "partitioned _all_docs works with startkey, endkey range", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_all_docs"
- resp = Couch.get(url, query: %{start_key: "\"foo:12\"", end_key: "\"foo:2\""})
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert Enum.dedup(partitions) == ["foo"]
- end
-
- test "partitioned _all_docs works with keys", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_all_docs"
- resp = Couch.post(url, body: %{keys: ["foo:2", "foo:4", "foo:6"]})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 3
- assert ids == ["foo:2", "foo:4", "foo:6"]
- end
-
- test "partition _all_docs works with limit", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_all_docs"
- resp = Couch.get(url, query: %{limit: 5})
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert Enum.dedup(partitions) == ["foo"]
- end
-
- test "partition _all_docs with descending", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_all_docs"
- resp = Couch.get(url, query: %{descending: true, limit: 5})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 5
- assert ids == ["foo:98", "foo:96", "foo:94", "foo:92", "foo:90"]
-
- resp = Couch.get(url, query: %{descending: false, limit: 5})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 5
- assert ids == ["foo:10", "foo:100", "foo:12", "foo:14", "foo:16"]
- end
-
- test "partition _all_docs with skip", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_all_docs"
- resp = Couch.get(url, query: %{skip: 5, limit: 5})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 5
- assert ids == ["foo:18", "foo:2", "foo:20", "foo:22", "foo:24"]
- end
-
- test "partition _all_docs with key", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_all_docs"
- resp = Couch.get(url, query: %{key: "\"foo:22\""})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 1
- assert ids == ["foo:22"]
- end
-
- test "partition all docs can set query limits", context do
- set_config({"query_server_config", "partition_query_limit", "2000"})
-
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_partition_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_all_docs"
-
- resp =
- Couch.get(
- url,
- query: %{
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 20
-
- resp = Couch.get(url)
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 50
-
- resp =
- Couch.get(
- url,
- query: %{
- limit: 2000
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 50
-
- resp =
- Couch.get(
- url,
- query: %{
- limit: 2001
- }
- )
-
- assert resp.status_code == 400
- %{:body => %{"reason" => reason}} = resp
- assert Regex.match?(~r/Limit is too large/, reason)
-
- resp =
- Couch.get(
- url,
- query: %{
- limit: 2000,
- skip: 25
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 25
- end
-
- # This test is timing based so it could be a little flaky.
- # If that turns out to be the case we should probably just skip it
- @tag :pending
- test "partition _all_docs with timeout", context do
- set_config({"fabric", "partition_view_timeout", "1"})
-
- db_name = context[:db_name]
- create_partition_docs(db_name)
-
- retry_until(fn ->
- url = "/#{db_name}/_partition/foo/_all_docs"
-
- case Couch.get(url) do
- %{:body => %{"reason" => reason}} ->
- Regex.match?(~r/not be processed in a reasonable amount of time./, reason)
-
- _ ->
- false
- end
- end)
- end
-end
diff --git a/test/elixir/test/partition_crud_test.exs b/test/elixir/test/partition_crud_test.exs
deleted file mode 100644
index 7e32abbdc..000000000
--- a/test/elixir/test/partition_crud_test.exs
+++ /dev/null
@@ -1,369 +0,0 @@
-defmodule PartitionCrudTest do
- use CouchTestCase
-
- @tag :with_partitioned_db
- test "Sets partition in db info", context do
- db_name = context[:db_name]
- resp = Couch.get("/#{db_name}")
- %{body: body} = resp
- assert body["props"] == %{"partitioned" => true}
- end
-
- @tag :with_partitioned_db
- test "PUT and GET document", context do
- db_name = context[:db_name]
- id = "my-partition:doc"
- url = "/#{db_name}/#{id}"
-
- resp = Couch.put(url, body: %{partitioned_doc: true})
- %{body: doc} = resp
- assert resp.status_code in [201, 202]
- assert doc["id"] == id
-
- resp = Couch.get(url)
- assert resp.status_code == 200
-
- %{body: doc} = resp
- assert doc["_id"] == id
- end
-
- @tag :with_partitioned_db
- test "PUT fails if a partition key is not supplied", context do
- db_name = context[:db_name]
- id = "not-partitioned"
- url = "/#{db_name}/#{id}"
-
- resp = Couch.put(url, body: %{partitioned_doc: false})
- assert resp.status_code == 400
-
- error = %{
- "error" => "illegal_docid",
- "reason" => "Doc id must be of form partition:id"
- }
-
- assert Map.get(resp, :body) == error
- end
-
- @tag :with_partitioned_db
- test "PUT fails for partitions with _", context do
- db_name = context[:db_name]
- id = "_bad:partitioned"
- url = "/#{db_name}/#{id}"
-
- resp = Couch.put(url, body: %{partitioned_doc: false})
-
- error = %{
- "error" => "illegal_docid",
- "reason" => "Only reserved document ids may start with underscore."
- }
-
- assert resp.status_code == 400
- assert Map.get(resp, :body) == error
- end
-
- @tag :with_partitioned_db
- test "PUT fails for bad partitions", context do
- db_name = context[:db_name]
- id = "bad:"
- url = "/#{db_name}/#{id}"
-
- resp = Couch.put(url, body: %{partitioned_doc: false})
-
- error = %{
- "error" => "illegal_docid",
- "reason" => "Document id must not be empty"
- }
-
- assert resp.status_code == 400
- assert Map.get(resp, :body) == error
- end
-
- @tag :with_partitioned_db
- test "POST and GET document", context do
- db_name = context[:db_name]
- id = "my-partition-post:doc"
- url = "/#{db_name}"
-
- resp = Couch.post(url, body: %{_id: id, partitioned_doc: true})
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("#{url}/#{id}")
- assert resp.status_code == 200
-
- %{body: doc} = resp
- assert doc["_id"] == id
- end
-
- @tag :with_partitioned_db
- test "GET to partition returns 400", context do
- db_name = context[:db_name]
- url = "/#{db_name}/_partition"
-
- resp = Couch.get("#{url}")
- assert resp.status_code == 400
- end
-
- @tag :with_partitioned_db
- test "POST and _bulk_get document", context do
- db_name = context[:db_name]
- id = "my-partition-post:doc"
- url = "/#{db_name}"
-
- resp = Couch.post(url, body: %{_id: id, partitioned_doc: true})
- assert resp.status_code in [201, 202]
-
- resp = Couch.post("#{url}/_bulk_get", body: %{docs: [%{id: id}]})
- assert resp.status_code == 200
-
- %{body: body} = resp
-
- assert %{
- "results" => [
- %{
- "docs" => [
- %{
- "ok" => %{
- "_id" => "my-partition-post:doc",
- "_rev" => "1-43d86359741cb629c0953a2beb6e9d7a",
- "partitioned_doc" => true
- }
- }
- ],
- "id" => "my-partition-post:doc"
- }
- ]
- } == body
- end
-
- @tag :with_partitioned_db
- test "_bulk_get bad partitioned document", context do
- db_name = context[:db_name]
- id = "my-partition-post"
- url = "/#{db_name}"
-
- resp = Couch.post("#{url}/_bulk_get", body: %{docs: [%{id: id}]})
- assert resp.status_code == 200
- %{:body => body} = resp
-
- assert %{
- "results" => [
- %{
- "docs" => [
- %{
- "error" => %{
- "error" => "illegal_docid",
- "id" => "my-partition-post",
- "reason" => "Doc id must be of form partition:id",
- "rev" => :null
- }
- }
- ],
- "id" => "my-partition-post"
- }
- ]
- } == body
- end
-
- @tag :with_partitioned_db
- test "POST fails if a partition key is not supplied", context do
- db_name = context[:db_name]
- id = "not-partitioned-post"
- url = "/#{db_name}"
-
- resp = Couch.post(url, body: %{_id: id, partitited_doc: false})
- assert resp.status_code == 400
- end
-
- @tag :with_partitioned_db
- test "_bulk_docs saves docs with partition key", context do
- db_name = context[:db_name]
-
- docs = [
- %{_id: "foo:1"},
- %{_id: "bar:1"}
- ]
-
- url = "/#{db_name}"
- resp = Couch.post("#{url}/_bulk_docs", body: %{:docs => docs})
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("#{url}/foo:1")
- assert resp.status_code == 200
-
- resp = Couch.get("#{url}/bar:1")
- assert resp.status_code == 200
- end
-
- @tag :with_partitioned_db
- test "_bulk_docs errors with missing partition key", context do
- db_name = context[:db_name]
-
- docs = [
- %{_id: "foo1"}
- ]
-
- error = %{
- "error" => "illegal_docid",
- "reason" => "Doc id must be of form partition:id"
- }
-
- url = "/#{db_name}"
- resp = Couch.post("#{url}/_bulk_docs", body: %{:docs => docs})
- assert resp.status_code == 400
- assert Map.get(resp, :body) == error
- end
-
- @tag :with_partitioned_db
- test "_bulk_docs errors with bad partition key", context do
- db_name = context[:db_name]
-
- docs = [
- %{_id: "_foo:1"}
- ]
-
- error = %{
- "error" => "illegal_docid",
- "reason" => "Only reserved document ids may start with underscore."
- }
-
- url = "/#{db_name}"
- resp = Couch.post("#{url}/_bulk_docs", body: %{:docs => docs})
- assert resp.status_code == 400
- assert Map.get(resp, :body) == error
- end
-
- @tag :with_partitioned_db
- test "_bulk_docs errors with bad doc key", context do
- db_name = context[:db_name]
-
- docs = [
- %{_id: "foo:"}
- ]
-
- error = %{
- "error" => "illegal_docid",
- "reason" => "Document id must not be empty"
- }
-
- url = "/#{db_name}"
- resp = Couch.post("#{url}/_bulk_docs", body: %{:docs => docs})
- assert resp.status_code == 400
- assert Map.get(resp, :body) == error
- end
-
- @tag :with_partitioned_db
- test "saves attachment with partitioned doc", context do
- db_name = context[:db_name]
- id = "foo:doc-with-attachment"
-
- doc = %{
- _id: id,
- _attachments: %{
- "foo.txt": %{
- content_type: "text/plain",
- data: Base.encode64("This is a text document to save")
- }
- }
- }
-
- resp = Couch.put("/#{db_name}/#{id}", body: doc)
-
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("/#{db_name}/#{id}")
- assert resp.status_code == 200
- body = Map.get(resp, :body)
- rev = Map.get(body, "_rev")
-
- assert body["_attachments"] == %{
- "foo.txt" => %{
- "content_type" => "text/plain",
- # "digest" => "md5-OW2BoZAtMqs1E+fAnLpNBw==",
- # Temp remove the digest part since the digest value
- # seems to be different on travis
- "digest" => body["_attachments"]["foo.txt"]["digest"],
- "length" => 31,
- "revpos" => 1,
- "stub" => true
- }
- }
-
- resp = Couch.get("/#{db_name}/#{id}/foo.txt")
- assert Map.get(resp, :body) == "This is a text document to save"
-
- resp =
- Couch.put(
- "/#{db_name}/#{id}/bar.txt?rev=#{rev}",
- headers: ["Content-Type": "text/plain"],
- body: "This is another document"
- )
-
- assert resp.status_code in [201, 202]
- %{:body => body} = resp
- assert body["ok"] == true
- assert body["id"] == id
- end
-
- @tag :with_partitioned_db
- test "can purge partitioned db docs", context do
- db_name = context[:db_name]
-
- doc = %{
- _id: "foo:bar",
- value: "some value"
- }
-
- resp = Couch.post("/#{db_name}", query: [w: 3], body: doc)
- assert resp.status_code in [201, 202]
- %{body: body} = resp
- rev = body["rev"]
-
- resp = Couch.get("/#{db_name}/foo:bar")
- assert resp.status_code == 200
-
- body = %{"foo:bar" => [rev]}
- resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: body)
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("/#{db_name}/foo:bar")
- assert resp.status_code == 404
- assert resp.body == %{"error" => "not_found", "reason" => "missing"}
- end
-
- @tag :with_partitioned_db
- test "purge rejects unpartitioned docid", context do
- db_name = context[:db_name]
- body = %{"no_partition" => ["1-967a00dff5e02add41819138abb3284d"]}
- resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: body)
- assert resp.status_code == 400
- %{body: body} = resp
- assert body["error"] == "illegal_docid"
- end
-
- test "create database with bad `partitioned` value", _context do
- resp = Couch.put("/bad-db?partitioned=tru")
- assert resp.status_code == 400
-
- assert Map.get(resp, :body) == %{
- "error" => "bad_request",
- "reason" => "Invalid `partitioned` parameter"
- }
- end
-
- test "can create unpartitioned system db", _context do
- Couch.delete("/_replicator")
- resp = Couch.put("/_replicator")
- assert resp.status_code in [201, 202]
- assert resp.body == %{"ok" => true}
- end
-
- test "cannot create partitioned system db", _context do
- Couch.delete("/_replicator")
-
- resp = Couch.put("/_replicator?partitioned=true")
- assert resp.status_code == 400
-
- %{:body => %{"reason" => reason}} = resp
- assert Regex.match?(~r/Cannot partition a system database/, reason)
- end
-end
diff --git a/test/elixir/test/partition_ddoc_test.exs b/test/elixir/test/partition_ddoc_test.exs
deleted file mode 100644
index 9fdfb9260..000000000
--- a/test/elixir/test/partition_ddoc_test.exs
+++ /dev/null
@@ -1,179 +0,0 @@
-defmodule PartitionDDocTest do
- use CouchTestCase
-
- @moduledoc """
- Test partition design doc interactions
- """
-
- setup do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name, query: %{partitioned: true, q: 1})
- on_exit(fn -> delete_db(db_name) end)
-
- {:ok, [db_name: db_name]}
- end
-
- test "PUT /dbname/_design/foo", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
- assert resp.status_code in [201, 202]
- end
-
- test "PUT /dbname/_design/foo to update", context do
- db_name = context[:db_name]
- ddoc_id = "_design/foo"
-
- ddoc = %{
- _id: ddoc_id,
- stuff: "here"
- }
-
- resp = Couch.put("/#{db_name}/#{ddoc_id}", body: ddoc)
- assert resp.status_code in [201, 202]
- %{body: body} = resp
-
- ddoc = Map.put(ddoc, :_rev, body["rev"])
- ddoc = Map.put(ddoc, :other, "attribute")
- resp = Couch.put("/#{db_name}/#{ddoc_id}", body: ddoc)
- assert resp.status_code in [201, 202]
- end
-
- test "PUT /dbname/_design/foo/readme.txt", context do
- db_name = context[:db_name]
- ddoc_id = "_design/foo"
-
- ddoc = %{
- _id: ddoc_id,
- stuff: "here"
- }
-
- resp = Couch.put("/#{db_name}/#{ddoc_id}", body: ddoc)
- assert resp.status_code in [201, 202]
- %{body: body} = resp
-
- att = "This is a readme.txt"
-
- opts = [
- headers: [{:"Content-Type", "text/plain"}],
- query: [rev: body["rev"]],
- body: att
- ]
-
- resp = Couch.put("/#{db_name}/#{ddoc_id}/readme.txt", opts)
- assert resp.status_code in [201, 202]
- end
-
- test "DELETE /dbname/_design/foo", context do
- db_name = context[:db_name]
- ddoc_id = "_design/foo"
-
- ddoc = %{
- _id: ddoc_id,
- stuff: "here"
- }
-
- resp = Couch.put("/#{db_name}/#{ddoc_id}", body: ddoc)
- assert resp.status_code in [201, 202]
- %{body: body} = resp
-
- resp = Couch.delete("/#{db_name}/#{ddoc_id}", query: [rev: body["rev"]])
- assert resp.status_code == 200
- end
-
- test "POST /dbname with design doc", context do
- db_name = context[:db_name]
- body = %{_id: "_design/foo", stuff: "here"}
- resp = Couch.post("/#{db_name}", body: body)
- assert resp.status_code in [201, 202]
- end
-
- test "POST /dbname/_bulk_docs with design doc", context do
- db_name = context[:db_name]
- body = %{:docs => [%{_id: "_design/foo", stuff: "here"}]}
- resp = Couch.post("/#{db_name}/_bulk_docs", body: body)
- assert resp.status_code in [201, 202]
- end
-
- test "GET /dbname/_design/foo", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("/#{db_name}/_design/foo")
- assert resp.status_code == 200
- end
-
- test "GET /dbname/_design/foo?rev=$rev", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
- assert resp.status_code in [201, 202]
- %{body: body} = resp
-
- resp = Couch.get("/#{db_name}/_design/foo", query: [rev: body["rev"]])
- assert resp.status_code == 200
- end
-
- test "GET /dbname/_bulk_get", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
- assert resp.status_code in [201, 202]
-
- body = %{docs: [%{id: "_design/foo"}]}
- resp = Couch.post("/#{db_name}/_bulk_get", body: body)
- assert resp.status_code == 200
- %{body: body} = resp
-
- assert length(body["results"]) == 1
-
- %{"results" => [%{"id" => "_design/foo", "docs" => [%{"ok" => _}]}]} = body
- end
-
- test "GET /dbname/_bulk_get with rev", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
- assert resp.status_code in [201, 202]
- %{body: body} = resp
-
- body = %{docs: [%{id: "_design/foo", rev: body["rev"]}]}
- resp = Couch.post("/#{db_name}/_bulk_get", body: body)
- assert resp.status_code == 200
- %{body: body} = resp
-
- assert length(body["results"]) == 1
- %{"results" => [%{"id" => "_design/foo", "docs" => [%{"ok" => _}]}]} = body
- end
-
- test "GET /dbname/_all_docs?key=$ddoc_id", context do
- db_name = context[:db_name]
- resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"}, query: [w: 3])
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("/#{db_name}/_all_docs", query: [key: "\"_design/foo\""])
- assert resp.status_code == 200
- %{body: body} = resp
-
- assert length(body["rows"]) == 1
- assert %{"rows" => [%{"id" => "_design/foo"}]} = body
- end
-
- @tag :skip_on_jenkins
- test "GET /dbname/_design_docs", context do
- db_name = context[:db_name]
-
- retry_until(
- fn ->
- resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
- assert resp.status_code in [201, 202]
-
- resp = Couch.get("/#{db_name}/_design_docs")
- assert resp.status_code == 200
- %{body: body} = resp
-
- assert length(body["rows"]) == 1
- %{"rows" => [%{"id" => "_design/foo"}]} = body
- end,
- 500,
- 10_000
- )
- end
-end
diff --git a/test/elixir/test/partition_design_docs_test.exs b/test/elixir/test/partition_design_docs_test.exs
deleted file mode 100644
index 4ccd63fe0..000000000
--- a/test/elixir/test/partition_design_docs_test.exs
+++ /dev/null
@@ -1,16 +0,0 @@
-defmodule PartitionDesignDocsTest do
- use CouchTestCase
-
- @moduledoc """
- Test Partition functionality for partition design docs
- """
-
- @tag :with_partitioned_db
- test "/_partition/:pk/_design/doc 404", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/fakekey/_design/mrtest/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 404
- end
-end
diff --git a/test/elixir/test/partition_helpers.exs b/test/elixir/test/partition_helpers.exs
deleted file mode 100644
index 3322ed7f5..000000000
--- a/test/elixir/test/partition_helpers.exs
+++ /dev/null
@@ -1,76 +0,0 @@
-defmodule PartitionHelpers do
- use ExUnit.Case
-
- def create_partition_docs(db_name, pk1 \\ "foo", pk2 \\ "bar") do
- docs =
- for i <- 1..100 do
- id =
- if rem(i, 2) == 0 do
- "#{pk1}:#{i}"
- else
- "#{pk2}:#{i}"
- end
-
- group =
- if rem(i, 3) == 0 do
- "one"
- else
- "two"
- end
-
- %{
- :_id => id,
- :value => i,
- :some => "field",
- :group => group
- }
- end
-
- resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:w => 3, :docs => docs})
- assert resp.status_code in [201, 202]
- end
-
- def create_partition_ddoc(db_name, opts \\ %{}) do
- map_fn = """
- function(doc) {
- if (doc.some) {
- emit(doc.value, doc.some);
- }
- }
- """
-
- default_ddoc = %{
- views: %{
- some: %{
- map: map_fn
- }
- }
- }
-
- ddoc = Enum.into(opts, default_ddoc)
-
- resp = Couch.put("/#{db_name}/_design/mrtest", body: ddoc)
- assert resp.status_code in [201, 202]
- assert Map.has_key?(resp.body, "ok") == true
- end
-
- def get_ids(resp) do
- %{:body => %{"rows" => rows}} = resp
- Enum.map(rows, fn row -> row["id"] end)
- end
-
- def get_partitions(resp) do
- %{:body => %{"rows" => rows}} = resp
-
- Enum.map(rows, fn row ->
- [partition, _] = String.split(row["id"], ":")
- partition
- end)
- end
-
- def assert_correct_partition(partitions, correct_partition) do
- assert Enum.all?(partitions, fn partition ->
- partition == correct_partition
- end)
- end
-end
diff --git a/test/elixir/test/partition_mango_test.exs b/test/elixir/test/partition_mango_test.exs
deleted file mode 100644
index 992999fb9..000000000
--- a/test/elixir/test/partition_mango_test.exs
+++ /dev/null
@@ -1,683 +0,0 @@
-defmodule PartitionMangoTest do
- use CouchTestCase
- import PartitionHelpers, except: [get_partitions: 1]
-
- @moduledoc """
- Test Partition functionality for mango
- """
- def create_index(db_name, fields \\ ["some"], opts \\ %{}) do
- default_index = %{
- index: %{
- fields: fields
- }
- }
-
- index = Enum.into(opts, default_index)
- resp = Couch.post("/#{db_name}/_index", body: index)
-
- assert resp.status_code == 200
- assert resp.body["result"] == "created"
- assert resp.body["id"] != nil
- assert resp.body["name"] != nil
-
- # wait until the database reports the index as available
- retry_until(fn ->
- get_index(db_name, resp.body["id"], resp.body["name"]) != nil
- end)
- end
-
- def list_indexes(db_name) do
- resp = Couch.get("/#{db_name}/_index")
- assert resp.status_code == 200
- resp.body["indexes"]
- end
-
- def get_index(db_name, ddocid, name) do
- indexes = list_indexes(db_name)
- Enum.find(indexes, fn(index) ->
- match?(%{"ddoc" => ^ddocid, "name" => ^name}, index)
- end)
- end
-
- def get_partitions(resp) do
- %{:body => %{"docs" => docs}} = resp
-
- Enum.map(docs, fn doc ->
- [partition, _] = String.split(doc["_id"], ":")
- partition
- end)
- end
-
- @tag :with_partitioned_db
- test "query using _id and partition works", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name)
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- _id: %{
- "$gt": "foo:"
- }
- },
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 20
- assert_correct_partition(partitions, "foo")
-
- url = "/#{db_name}/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- _id: %{
- "$lt": "foo:"
- }
- },
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 20
- assert_correct_partition(partitions, "bar")
- end
-
- @tag :with_partitioned_db
- test "query using _id works for global and local query", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name)
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- _id: %{
- "$gt": 0
- }
- },
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 20
- assert_correct_partition(partitions, "foo")
-
- url = "/#{db_name}/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- _id: %{
- "$gt": 0
- }
- },
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 20
- assert_correct_partition(partitions, "bar")
- end
-
- @tag :with_partitioned_db
- test "query with partitioned:true using index and $eq", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name)
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- some: "field"
- },
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 20
- assert_correct_partition(partitions, "foo")
-
- url = "/#{db_name}/_partition/bar/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- some: "field"
- },
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 20
- assert_correct_partition(partitions, "bar")
- end
-
- @tag :with_partitioned_db
- test "partitioned query using _all_docs with $eq", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- some: "field"
- },
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 20
- assert_correct_partition(partitions, "foo")
-
- url = "/#{db_name}/_partition/bar/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- some: "field"
- },
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 20
- assert_correct_partition(partitions, "bar")
- end
-
- @tag :with_db
- test "non-partitioned query using _all_docs and $eq", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
-
- url = "/#{db_name}/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- some: "field"
- },
- skip: 40,
- limit: 5
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert partitions == ["bar", "bar", "bar", "bar", "bar"]
-
- url = "/#{db_name}/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- some: "field"
- },
- skip: 50,
- limit: 5
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert partitions == ["foo", "foo", "foo", "foo", "foo"]
- end
-
- @tag :with_partitioned_db
- test "partitioned query using index and range scan", context do
- db_name = context[:db_name]
- create_partition_docs(db_name, "foo", "bar42")
- create_index(db_name, ["value"])
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- }
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert_correct_partition(partitions, "foo")
-
- url = "/#{db_name}/_partition/bar42/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- }
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert_correct_partition(partitions, "bar42")
- end
-
- @tag :with_partitioned_db
- test "partitioned query using _all_docs and range scan", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- }
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert_correct_partition(partitions, "foo")
-
- url = "/#{db_name}/_partition/bar/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- }
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert_correct_partition(partitions, "bar")
- end
-
- @tag :with_partitioned_db
- test "partitioned query using _all_docs", context do
- db_name = context[:db_name]
- create_partition_docs(db_name, "foo", "bar42")
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- }
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert_correct_partition(partitions, "foo")
-
- url = "/#{db_name}/_partition/bar42/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- }
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert_correct_partition(partitions, "bar42")
- end
-
- @tag :with_partitioned_db
- test "explain works with partitions", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name, ["some"])
-
- url = "/#{db_name}/_partition/foo/_explain"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- }
- }
- )
-
- %{:body => body} = resp
-
- assert body["index"]["name"] == "_all_docs"
- assert body["mrargs"]["partition"] == "foo"
-
- url = "/#{db_name}/_partition/bar/_explain"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- some: "field"
- }
- }
- )
-
- %{:body => body} = resp
-
- assert body["index"]["def"] == %{"fields" => [%{"some" => "asc"}]}
- assert body["mrargs"]["partition"] == "bar"
- end
-
- @tag :with_db
- test "explain works with non partitioned db", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name, ["some"])
-
- url = "/#{db_name}/_explain"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- }
- }
- )
-
- %{:body => body} = resp
-
- assert body["index"]["name"] == "_all_docs"
- assert body["mrargs"]["partition"] == :null
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- some: "field"
- }
- }
- )
-
- %{:body => body} = resp
-
- assert body["index"]["def"] == %{"fields" => [%{"some" => "asc"}]}
- assert body["mrargs"]["partition"] == :null
- end
-
- @tag :with_partitioned_db
- test "partitioned query using bookmarks", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name, ["value"])
-
- url = "/#{db_name}/_partition/foo/_find"
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- },
- limit: 3
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 3
- assert_correct_partition(partitions, "foo")
-
- %{:body => %{"bookmark" => bookmark}} = resp
-
- resp =
- Couch.post(
- url,
- body: %{
- selector: %{
- value: %{
- "$gte": 6,
- "$lt": 16
- }
- },
- limit: 3,
- bookmark: bookmark
- }
- )
-
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 2
- assert_correct_partition(partitions, "foo")
- end
-
- @tag :with_partitioned_db
- test "global query uses global index", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name, ["some"], %{partitioned: false})
-
- url = "/#{db_name}/_explain"
-
- selector = %{
- selector: %{
- some: "field"
- },
- limit: 100
- }
-
- resp = Couch.post(url, body: selector)
- assert resp.status_code == 200
- %{:body => body} = resp
- assert body["index"]["def"] == %{"fields" => [%{"some" => "asc"}]}
-
- url = "/#{db_name}/_find"
- resp = Couch.post(url, body: selector)
- assert resp.status_code == 200
-
- partitions = get_partitions(resp)
- assert length(partitions) == 100
- end
-
- @tag :with_partitioned_db
- test "global query does not use partition index", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name, ["some"])
-
- url = "/#{db_name}/_explain"
-
- selector = %{
- selector: %{
- some: "field"
- },
- limit: 100
- }
-
- resp = Couch.post(url, body: selector)
- %{:body => body} = resp
- assert body["index"]["name"] == "_all_docs"
-
- url = "/#{db_name}/_find"
- resp = Couch.post(url, body: selector)
-
- assert resp.status_code == 200
-
- partitions = get_partitions(resp)
- assert length(partitions) == 100
- end
-
- @tag :with_partitioned_db
- test "partitioned query does not use global index", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_index(db_name, ["some"], %{partitioned: false})
-
- url = "/#{db_name}/_partition/foo/_explain"
-
- selector = %{
- selector: %{
- some: "field"
- },
- limit: 50
- }
-
- resp = Couch.post(url, body: selector)
- assert resp.status_code == 200
- %{:body => body} = resp
- assert body["index"]["name"] == "_all_docs"
-
- url = "/#{db_name}/_partition/foo/_find"
- resp = Couch.post(url, body: selector)
- assert resp.status_code == 200
-
- partitions = get_partitions(resp)
- assert length(partitions) == 50
- assert_correct_partition(partitions, "foo")
- end
-
- @tag :with_partitioned_db
- test "partitioned _find and _explain with missing partition returns 400", context do
- db_name = context[:db_name]
-
- selector = %{
- selector: %{
- some: "field"
- }
- }
-
- resp = Couch.get("/#{db_name}/_partition/_find", body: selector)
- validate_missing_partition(resp)
-
- resp = Couch.get("/#{db_name}/_partition/_explain", body: selector)
- validate_missing_partition(resp)
- end
-
- defp validate_missing_partition(resp) do
- assert resp.status_code == 400
- %{:body => %{"reason" => reason}} = resp
- assert Regex.match?(~r/Partition must not start/, reason)
- end
-
- @tag :with_partitioned_db
- test "partitioned query sends correct errors for sort errors", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
-
- url = "/#{db_name}/_partition/foo/_find"
-
- selector = %{
- selector: %{
- some: "field"
- },
- sort: ["some"],
- limit: 50
- }
-
- resp = Couch.post(url, body: selector)
- assert resp.status_code == 400
- %{:body => %{"reason" => reason}} = resp
- assert Regex.match?(~r/No partitioned index exists for this sort/, reason)
-
- url = "/#{db_name}/_find"
- resp = Couch.post(url, body: selector)
- assert resp.status_code == 400
- %{:body => %{"reason" => reason}} = resp
- assert Regex.match?(~r/No global index exists for this sort/, reason)
- end
-end
diff --git a/test/elixir/test/partition_size_test.exs b/test/elixir/test/partition_size_test.exs
deleted file mode 100644
index 2ba8139fc..000000000
--- a/test/elixir/test/partition_size_test.exs
+++ /dev/null
@@ -1,361 +0,0 @@
-defmodule PartitionSizeTest do
- use CouchTestCase
-
- @moduledoc """
- Test Partition size functionality
- """
-
- setup do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name, query: %{partitioned: true, q: 1})
- on_exit(fn -> delete_db(db_name) end)
-
- {:ok, [db_name: db_name]}
- end
-
- def get_db_info(dbname) do
- resp = Couch.get("/#{dbname}")
- assert resp.status_code == 200
- %{:body => body} = resp
- body
- end
-
- def get_partition_info(dbname, partition) do
- resp = Couch.get("/#{dbname}/_partition/#{partition}")
- assert resp.status_code == 200
- %{:body => body} = resp
- body
- end
-
- def mk_partition(i) do
- i |> rem(10) |> Integer.to_string() |> String.pad_leading(3, "0")
- end
-
- def mk_docid(i) do
- id = i |> Integer.to_string() |> String.pad_leading(4, "0")
- "#{mk_partition(i)}:#{id}"
- end
-
- def mk_docs(db_name) do
- docs =
- for i <- 1..1000 do
- group = Integer.to_string(rem(i, 3))
-
- %{
- :_id => mk_docid(i),
- :value => i,
- :some => "field",
- :group => group
- }
- end
-
- body = %{:w => 3, :docs => docs}
-
- retry_until(fn ->
- resp = Couch.post("/#{db_name}/_bulk_docs", body: body)
- assert resp.status_code in [201, 202]
- end)
- end
-
- def save_doc(db_name, doc) do
- resp = Couch.post("/#{db_name}", query: [w: 3], body: doc)
- assert resp.status_code in [201, 202]
- %{:body => body} = resp
- body["rev"]
- end
-
- test "get empty partition", context do
- db_name = context[:db_name]
- partition = "non_existent_partition"
-
- info = get_partition_info(db_name, partition)
-
- assert info["doc_count"] == 0
- assert info["doc_del_count"] == 0
- assert info["partition"] == partition
- assert info["sizes"]["external"] == 0
- assert info["sizes"]["active"] == 0
- end
-
- test "unknown partition return's zero", context do
- db_name = context[:db_name]
- mk_docs(db_name)
-
- info = get_partition_info(db_name, "unknown")
- assert info["doc_count"] == 0
- assert info["doc_del_count"] == 0
- assert info["sizes"]["external"] == 0
- assert info["sizes"]["active"] == 0
- end
-
- test "simple partition size", context do
- db_name = context[:db_name]
- save_doc(db_name, %{_id: "foo:bar", val: 42})
-
- info = get_partition_info(db_name, "foo")
- assert info["doc_count"] == 1
- assert info["doc_del_count"] == 0
- assert info["sizes"]["external"] > 0
- assert info["sizes"]["active"] > 0
- end
-
- test "adding docs increases partition sizes", context do
- db_name = context[:db_name]
- save_doc(db_name, %{_id: "foo:bar", val: 42})
- pre_info = get_partition_info(db_name, "foo")
-
- save_doc(db_name, %{_id: "foo:baz", val: 24})
- post_info = get_partition_info(db_name, "foo")
-
- assert post_info["doc_count"] == 2
- assert post_info["doc_del_count"] == 0
- assert post_info["sizes"]["external"] > pre_info["sizes"]["external"]
- assert post_info["sizes"]["active"] > pre_info["sizes"]["active"]
- end
-
- test "updating docs affects partition sizes", context do
- db_name = context[:db_name]
- rev1 = save_doc(db_name, %{_id: "foo:bar", val: ""})
- info1 = get_partition_info(db_name, "foo")
-
- rev2 =
- save_doc(db_name, %{
- _id: "foo:bar",
- _rev: rev1,
- val: "this is a very long string that is so super long its beyond long"
- })
-
- info2 = get_partition_info(db_name, "foo")
-
- save_doc(db_name, %{
- _id: "foo:bar",
- _rev: rev2,
- val: "this string is shorter"
- })
-
- info3 = get_partition_info(db_name, "foo")
-
- assert info3["doc_count"] == 1
- assert info3["doc_del_count"] == 0
-
- assert info3["sizes"]["external"] > info1["sizes"]["external"]
- assert info2["sizes"]["external"] > info3["sizes"]["external"]
- end
-
- test "deleting a doc affects partition sizes", context do
- db_name = context[:db_name]
- rev1 = save_doc(db_name, %{_id: "foo:bar", val: "some stuff here"})
- info1 = get_partition_info(db_name, "foo")
-
- save_doc(db_name, %{_id: "foo:bar", _rev: rev1, _deleted: true})
- info2 = get_partition_info(db_name, "foo")
-
- assert info1["doc_count"] == 1
- assert info1["doc_del_count"] == 0
-
- assert info2["doc_count"] == 0
- assert info2["doc_del_count"] == 1
-
- assert info2["sizes"]["external"] < info1["sizes"]["external"]
- end
-
- test "design docs do not affect partition sizes", context do
- db_name = context[:db_name]
- mk_docs(db_name)
-
- pre_infos =
- 0..9
- |> Enum.map(fn i ->
- get_partition_info(db_name, mk_partition(i))
- end)
-
- 0..5
- |> Enum.map(fn i ->
- base = i |> Integer.to_string() |> String.pad_leading(5, "0")
- docid = "_design/#{base}"
- save_doc(db_name, %{_id: docid, value: "some stuff here"})
- end)
-
- post_infos =
- 0..9
- |> Enum.map(fn i ->
- get_partition_info(db_name, mk_partition(i))
- end)
-
- assert post_infos == pre_infos
- end
-
- @tag :skip_on_jenkins
- test "get all partition sizes", context do
- db_name = context[:db_name]
- mk_docs(db_name)
-
- {esum, asum} =
- 0..9
- |> Enum.reduce({0, 0}, fn i, {esize, asize} ->
- partition = mk_partition(i)
- info = get_partition_info(db_name, partition)
- assert info["doc_count"] == 100
- assert info["doc_del_count"] == 0
- assert info["sizes"]["external"] > 0
- assert info["sizes"]["active"] > 0
- {esize + info["sizes"]["external"], asize + info["sizes"]["active"]}
- end)
-
- db_info = get_db_info(db_name)
- assert db_info["sizes"]["external"] >= esum
- assert db_info["sizes"]["active"] >= asum
- end
-
- test "get partition size with attachment", context do
- db_name = context[:db_name]
-
- doc = %{
- _id: "foo:doc-with-attachment",
- _attachments: %{
- "foo.txt": %{
- content_type: "text/plain",
- data: Base.encode64("This is a text document to save")
- }
- }
- }
-
- save_doc(db_name, doc)
-
- db_info = get_db_info(db_name)
- foo_info = get_partition_info(db_name, "foo")
-
- assert foo_info["doc_count"] == 1
- assert foo_info["doc_del_count"] == 0
- assert foo_info["sizes"]["active"] > 0
- assert foo_info["sizes"]["external"] > 0
-
- assert foo_info["sizes"]["active"] <= db_info["sizes"]["active"]
- assert foo_info["sizes"]["external"] <= db_info["sizes"]["external"]
- end
-
- test "attachments don't affect other partitions", context do
- db_name = context[:db_name]
- mk_docs(db_name)
-
- pre_infos =
- 0..9
- |> Enum.map(fn i ->
- get_partition_info(db_name, mk_partition(i))
- end)
-
- doc = %{
- _id: "foo:doc-with-attachment",
- _attachments: %{
- "foo.txt": %{
- content_type: "text/plain",
- data: Base.encode64("This is a text document to save")
- }
- }
- }
-
- save_doc(db_name, doc)
-
- att_info = get_partition_info(db_name, "foo")
- assert att_info["doc_count"] == 1
- assert att_info["sizes"]["external"] > 0
-
- post_infos =
- 0..9
- |> Enum.map(fn i ->
- get_partition_info(db_name, mk_partition(i))
- end)
-
- assert post_infos == pre_infos
-
- esize =
- ([att_info] ++ post_infos)
- |> Enum.reduce(0, fn info, acc ->
- info["sizes"]["external"] + acc
- end)
-
- db_info = get_db_info(db_name)
- assert esize == db_info["sizes"]["external"]
- end
-
- test "partition activity not affect other partition sizes", context do
- db_name = context[:db_name]
- mk_docs(db_name)
-
- partition1 = "000"
- partition2 = "001"
-
- info2 = get_partition_info(db_name, partition2)
-
- doc_id = "#{partition1}:doc-with-attachment"
-
- doc = %{
- _id: doc_id,
- _attachments: %{
- "foo.txt": %{
- content_type: "text/plain",
- data: Base.encode64("This is a text document to save")
- }
- }
- }
-
- doc_rev = save_doc(db_name, doc)
-
- info2_attach = get_partition_info(db_name, partition2)
- assert info2_attach == info2
-
- doc =
- Enum.into(
- %{
- another: "add another field",
- _rev: doc_rev
- },
- doc
- )
-
- doc_rev = save_doc(db_name, doc)
-
- info2_update = get_partition_info(db_name, partition2)
- assert info2_update == info2
-
- resp = Couch.delete("/#{db_name}/#{doc_id}", query: %{rev: doc_rev})
- assert resp.status_code == 200
-
- info2_delete = get_partition_info(db_name, partition2)
- assert info2_delete == info2
- end
-
- test "purging docs decreases partition size", context do
- db_name = context[:db_name]
- mk_docs(db_name)
-
- partition = "000"
-
- query = [
- start_key: "\"#{partition}:0000\"",
- end_key: "\"#{partition}:9999\"",
- limit: 50
- ]
-
- resp = Couch.get("/#{db_name}/_all_docs", query: query)
- assert resp.status_code == 200
- %{body: body} = resp
-
- pre_info = get_partition_info(db_name, partition)
-
- pbody =
- body["rows"]
- |> Enum.reduce(%{}, fn row, acc ->
- Map.put(acc, row["id"], [row["value"]["rev"]])
- end)
-
- resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: pbody)
- assert resp.status_code in [201, 202]
-
- post_info = get_partition_info(db_name, partition)
- assert post_info["doc_count"] == pre_info["doc_count"] - 50
- assert post_info["doc_del_count"] == 0
- assert post_info["sizes"]["active"] < pre_info["sizes"]["active"]
- assert post_info["sizes"]["external"] < pre_info["sizes"]["external"]
- end
-end
diff --git a/test/elixir/test/partition_view_test.exs b/test/elixir/test/partition_view_test.exs
deleted file mode 100644
index 0a55c2443..000000000
--- a/test/elixir/test/partition_view_test.exs
+++ /dev/null
@@ -1,374 +0,0 @@
-defmodule ViewPartitionTest do
- use CouchTestCase
- import PartitionHelpers
-
- @moduledoc """
- Test Partition functionality for views
- """
-
- setup_all do
- db_name = random_db_name()
- {:ok, _} = create_db(db_name, query: %{partitioned: true, q: 1})
- on_exit(fn -> delete_db(db_name) end)
-
- create_partition_docs(db_name)
-
- map_fun1 = """
- function(doc) {
- if (doc.some) {
- emit(doc.value, doc.some);
- }
- }
- """
-
- map_fun2 = """
- function(doc) {
- if (doc.group) {
- emit([doc.some, doc.group], 1);
- }
- }
- """
-
- query = %{:w => 3}
-
- body = %{
- :docs => [
- %{
- _id: "_design/map",
- views: %{some: %{map: map_fun1}}
- },
- %{
- _id: "_design/map_some",
- views: %{some: %{map: map_fun2}}
- },
- %{
- _id: "_design/partitioned_true",
- views: %{some: %{map: map_fun1}},
- options: %{partitioned: true}
- },
- %{
- _id: "_design/partitioned_false",
- views: %{some: %{map: map_fun1}},
- options: %{partitioned: false}
- },
- %{
- _id: "_design/reduce",
- views: %{some: %{map: map_fun2, reduce: "_count"}}
- },
- %{
- _id: "_design/include_ddocs",
- views: %{some: %{map: map_fun1}},
- options: %{include_design: true}
- }
- ]
- }
-
- resp = Couch.post("/#{db_name}/_bulk_docs", query: query, body: body)
- Enum.each(resp.body, &assert(&1["ok"]))
-
- {:ok, [db_name: db_name]}
- end
-
- def get_reduce_result(resp) do
- %{:body => %{"rows" => rows}} = resp
- rows
- end
-
- test "query with partitioned:true returns partitioned fields", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/partitioned_true/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert Enum.dedup(partitions) == ["foo"]
-
- url = "/#{db_name}/_partition/bar/_design/partitioned_true/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert Enum.dedup(partitions) == ["bar"]
- end
-
- test "default view query returns partitioned fields", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert Enum.dedup(partitions) == ["foo"]
-
- url = "/#{db_name}/_partition/bar/_design/map/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert Enum.dedup(partitions) == ["bar"]
- end
-
- test "conflicting partitions in path and query string rejected", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.get(url, query: %{partition: "bar"})
- assert resp.status_code == 400
- %{:body => %{"reason" => reason}} = resp
- assert Regex.match?(~r/Conflicting value/, reason)
- end
-
- test "query will return zero results for wrong inputs", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.get(url, query: %{start_key: "\"foo:12\""})
- assert resp.status_code == 200
- assert Map.get(resp, :body)["rows"] == []
- end
-
- test "partitioned ddoc cannot be used in global query", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_design/map/_view/some"
- resp = Couch.get(url)
- %{:body => %{"reason" => reason}} = resp
- assert resp.status_code == 400
- assert Regex.match?(~r/mandatory for queries to this view./, reason)
- end
-
- test "partitioned query cannot be used with global ddoc", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/partitioned_false/_view/some"
- resp = Couch.get(url)
- %{:body => %{"reason" => reason}} = resp
- assert resp.status_code == 400
- assert Regex.match?(~r/is not supported in this design doc/, reason)
- end
-
- test "view query returns all docs for global query", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_design/partitioned_false/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 100
- end
-
- test "partition query errors with incorrect partition supplied", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/_bar/_design/map/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 400
-
- url = "/#{db_name}/_partition//_design/map/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 400
- end
-
- test "partitioned query works with startkey, endkey range", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.get(url, query: %{start_key: 12, end_key: 20})
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert Enum.dedup(partitions) == ["foo"]
- end
-
- test "partitioned query works with keys", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.post(url, body: %{keys: [2, 4, 6]})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 3
- assert ids == ["foo:2", "foo:4", "foo:6"]
- end
-
- test "global query works with keys", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_design/partitioned_false/_view/some"
- resp = Couch.post(url, body: %{keys: [2, 4, 6]})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 3
- assert ids == ["foo:2", "foo:4", "foo:6"]
- end
-
- test "partition query works with limit", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.get(url, query: %{limit: 5})
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 5
- assert Enum.dedup(partitions) == ["foo"]
- end
-
- test "partition query with descending", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.get(url, query: %{descending: true, limit: 5})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 5
- assert ids == ["foo:100", "foo:98", "foo:96", "foo:94", "foo:92"]
-
- resp = Couch.get(url, query: %{descending: false, limit: 5})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 5
- assert ids == ["foo:2", "foo:4", "foo:6", "foo:8", "foo:10"]
- end
-
- test "partition query with skip", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.get(url, query: %{skip: 5, limit: 5})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 5
- assert ids == ["foo:12", "foo:14", "foo:16", "foo:18", "foo:20"]
- end
-
- test "partition query with key", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map/_view/some"
- resp = Couch.get(url, query: %{key: 22})
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 1
- assert ids == ["foo:22"]
- end
-
- test "partition query with startkey_docid and endkey_docid", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/map_some/_view/some"
-
- resp =
- Couch.get(
- url,
- query: %{
- startkey: "[\"field\",\"one\"]",
- endkey: "[\"field\",\"one\"]",
- startkey_docid: "foo:12",
- endkey_docid: "foo:30"
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["foo:12", "foo:18", "foo:24", "foo:30"]
- end
-
- test "query with reduce works", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/reduce/_view/some"
- resp = Couch.get(url, query: %{reduce: true, group_level: 1})
- assert resp.status_code == 200
- results = get_reduce_result(resp)
- assert results == [%{"key" => ["field"], "value" => 50}]
-
- resp = Couch.get(url, query: %{reduce: true, group_level: 2})
- results = get_reduce_result(resp)
-
- assert results == [
- %{"key" => ["field", "one"], "value" => 16},
- %{"key" => ["field", "two"], "value" => 34}
- ]
-
- resp = Couch.get(url, query: %{reduce: true, group: true})
- results = get_reduce_result(resp)
-
- assert results == [
- %{"key" => ["field", "one"], "value" => 16},
- %{"key" => ["field", "two"], "value" => 34}
- ]
- end
-
- test "partition query can set query limits", context do
- set_config({"query_server_config", "partition_query_limit", "2000"})
-
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_partition_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/mrtest/_view/some"
-
- resp =
- Couch.get(
- url,
- query: %{
- limit: 20
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 20
-
- resp = Couch.get(url)
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 50
-
- resp =
- Couch.get(
- url,
- query: %{
- limit: 2000
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 50
-
- resp =
- Couch.get(
- url,
- query: %{
- limit: 2001
- }
- )
-
- assert resp.status_code == 400
- %{:body => %{"reason" => reason}} = resp
- assert Regex.match?(~r/Limit is too large/, reason)
-
- resp =
- Couch.get(
- url,
- query: %{
- limit: 2000,
- skip: 25
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert length(ids) == 25
- end
-
- test "include_design works correctly", context do
- db_name = context[:db_name]
-
- url = "/#{db_name}/_partition/foo/_design/include_ddocs/_view/some"
- resp = Couch.get(url)
- assert resp.status_code == 200
- partitions = get_partitions(resp)
- assert length(partitions) == 50
- assert Enum.dedup(partitions) == ["foo"]
- end
-end
diff --git a/test/elixir/test/partition_view_update_test.exs b/test/elixir/test/partition_view_update_test.exs
deleted file mode 100644
index 5c1cb09f0..000000000
--- a/test/elixir/test/partition_view_update_test.exs
+++ /dev/null
@@ -1,160 +0,0 @@
-defmodule PartitionViewUpdateTest do
- use CouchTestCase
- import PartitionHelpers
-
- @moduledoc """
- Test Partition view update functionality
- """
- @tag :with_partitioned_db
- test "view updates properly remove old keys", context do
- db_name = context[:db_name]
- create_partition_docs(db_name, "foo", "bar")
- create_partition_ddoc(db_name)
-
- check_key = fn key, num_rows ->
- url = "/#{db_name}/_partition/foo/_design/mrtest/_view/some"
- resp = Couch.get(url, query: [key: key])
- assert resp.status_code == 200
- assert length(resp.body["rows"]) == num_rows
- end
-
- check_key.(2, 1)
-
- resp = Couch.get("/#{db_name}/foo:2")
- doc = Map.put(resp.body, "value", 4)
- resp = Couch.put("/#{db_name}/foo:2", query: [w: 3], body: doc)
- assert resp.status_code >= 201 and resp.status_code <= 202
-
- check_key.(4, 2)
- check_key.(2, 0)
- end
-
- @tag :skip_on_jenkins
- @tag :with_partitioned_db
- test "query with update=false works", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_partition_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/mrtest/_view/some"
-
- resp =
- Couch.get(
- url,
- query: %{
- update: "true",
- limit: 3
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["foo:2", "foo:4", "foo:6"]
-
- # Avoid race conditions by attempting to get a full response
- # from every shard before we do our update:false test
- for _ <- 1..12 do
- resp = Couch.get(url)
- assert resp.status_code == 200
- end
-
- Couch.put("/#{db_name}/foo:1", body: %{some: "field"})
-
- retry_until(fn ->
- resp =
- Couch.get(
- url,
- query: %{
- update: "false",
- limit: 3
- }
- )
-
- assert resp.status_code == 200
- ids = get_ids(resp)
- assert ids == ["foo:2", "foo:4", "foo:6"]
- end)
- end
-
- @tag :with_partitioned_db
- test "purge removes view rows", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_partition_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/mrtest/_view/some"
-
- resp = Couch.get(url)
- assert resp.status_code == 200
- %{body: body} = resp
- assert length(body["rows"]) == 50
-
- resp = Couch.get("/#{db_name}/foo:2")
- assert resp.status_code == 200
- %{body: body} = resp
- rev = body["_rev"]
-
- body = %{"foo:2" => [rev]}
- resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: body)
- assert resp.status_code in [201, 202]
-
- resp = Couch.get(url)
- assert resp.status_code == 200
- %{body: body} = resp
- assert length(body["rows"]) == 49
- end
-
- @tag :with_partitioned_db
- test "purged conflict changes view rows", context do
- db_name = context[:db_name]
- create_partition_docs(db_name)
- create_partition_ddoc(db_name)
-
- url = "/#{db_name}/_partition/foo/_design/mrtest/_view/some"
-
- resp = Couch.get(url)
- assert resp.status_code == 200
- %{body: body} = resp
- assert length(body["rows"]) == 50
-
- # Create a conflict on foo:2. Since the 4096
- # value is deeper than the conflict we can assert
- # that's in the view before the purge and assert
- # that 8192 is in the view after the purge.
- resp = Couch.get("/#{db_name}/foo:2")
- assert resp.status_code == 200
- %{body: body} = resp
- rev1 = body["_rev"]
-
- doc = %{_id: "foo:2", _rev: rev1, value: 4096, some: "field"}
- resp = Couch.post("/#{db_name}", query: [w: 3], body: doc)
- assert resp.status_code in [201, 202]
- %{body: body} = resp
- rev2 = body["rev"]
-
- query = [w: 3, new_edits: false]
- conflict_rev = "1-4a75b4efa0804859b3dfd327cbc1c2f9"
- doc = %{_id: "foo:2", _rev: conflict_rev, value: 8192, some: "field"}
- resp = Couch.put("/#{db_name}/foo:2", query: query, body: doc)
- assert resp.status_code in [201, 202]
-
- # Check that our expected row exists
- resp = Couch.get(url, query: [key: 4096])
- assert resp.status_code == 200
- %{body: body} = resp
- [row] = body["rows"]
- assert row["id"] == "foo:2"
-
- # Remove the current row to be replaced with
- # a row from the conflict
- body = %{"foo:2" => [rev2]}
- resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: body)
- assert resp.status_code in [201, 202]
-
- resp = Couch.get(url, query: [key: 8192])
- assert resp.status_code == 200
- %{body: body} = resp
- [row] = body["rows"]
- assert row["id"] == "foo:2"
- end
-end
diff --git a/test/elixir/test/replication_test.exs b/test/elixir/test/replication_test.exs
index 075f65bfa..afd4d132e 100644
--- a/test/elixir/test/replication_test.exs
+++ b/test/elixir/test/replication_test.exs
@@ -126,7 +126,7 @@ defmodule ReplicationTest do
task = get_task(repl_id, 3_000)
assert is_map(task)
- assert task["replication_id"] == repl_id
+ assert task["id"] == repl_id
repl_body = %{
"replication_id" => repl_id,
@@ -427,6 +427,76 @@ defmodule ReplicationTest do
assert change["id"] == del_doc["_id"]
assert change["deleted"]
+ # Test new deletion is replicated, document wasn't on the target yet
+ [del_doc] = save_docs(src_db_name, [%{"_id" => "new_del_doc_1"}])
+
+ del_doc = Map.put(del_doc, "_deleted", true)
+ [del_doc] = save_docs(src_db_name, [del_doc])
+
+ result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
+ assert result["ok"]
+
+ retry_until(fn ->
+ src_info = get_db_info(src_db_name)
+ tgt_info = get_db_info(tgt_db_name)
+
+ assert tgt_info["doc_count"] == src_info["doc_count"]
+ assert tgt_info["doc_del_count"] == src_info["doc_del_count"]
+ assert tgt_info["doc_del_count"] == 2
+ end)
+
+ assert is_list(result["history"])
+ assert length(result["history"]) == 4
+ history = Enum.at(result["history"], 0)
+ assert history["missing_checked"] == 29
+ assert history["missing_found"] == 29
+ assert history["docs_read"] == 29
+ assert history["docs_written"] == 29
+ assert history["doc_write_failures"] == 0
+
+ resp = Couch.get("/#{tgt_db_name}/#{del_doc["_id"]}")
+ assert resp.status_code == 404
+
+ resp = Couch.get!("/#{tgt_db_name}/_changes")
+ [change] = Enum.filter(resp.body["results"], &(&1["id"] == del_doc["_id"]))
+ assert change["id"] == del_doc["_id"]
+ assert change["deleted"]
+
+ # Test an already deleted deletion being replicated
+ [del_doc] = save_docs(src_db_name, [%{"_id" => "new_del_doc_1"}])
+ del_doc = Map.put(del_doc, "_deleted", true)
+ [del_doc] = save_docs(src_db_name, [del_doc])
+
+ result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
+ assert result["ok"]
+
+ retry_until(fn ->
+ src_info = get_db_info(src_db_name)
+ tgt_info = get_db_info(tgt_db_name)
+
+ assert tgt_info["doc_count"] == src_info["doc_count"]
+ assert tgt_info["doc_del_count"] == src_info["doc_del_count"]
+ assert tgt_info["doc_del_count"] == 2
+ end)
+
+ assert is_list(result["history"])
+ assert length(result["history"]) == 5
+ history = Enum.at(result["history"], 0)
+ assert history["missing_checked"] == 30
+ assert history["missing_found"] == 30
+ assert history["docs_read"] == 30
+ assert history["docs_written"] == 30
+ assert history["doc_write_failures"] == 0
+
+ resp = Couch.get("/#{tgt_db_name}/#{del_doc["_id"]}")
+ assert resp.status_code == 404
+
+ resp = Couch.get!("/#{tgt_db_name}/_changes")
+ [change] = Enum.filter(resp.body["results"], &(&1["id"] == del_doc["_id"]))
+ assert change["id"] == del_doc["_id"]
+ assert change["deleted"]
+
+
# Test replicating a conflict
doc = Couch.get!("/#{src_db_name}/2").body
[doc] = save_docs(src_db_name, [Map.put(doc, :value, "white")])
@@ -443,12 +513,12 @@ defmodule ReplicationTest do
assert tgt_info["doc_count"] == src_info["doc_count"]
assert is_list(result["history"])
- assert length(result["history"]) == 4
+ assert length(result["history"]) == 6
history = Enum.at(result["history"], 0)
- assert history["missing_checked"] == 29
- assert history["missing_found"] == 29
- assert history["docs_read"] == 29
- assert history["docs_written"] == 29
+ assert history["missing_checked"] == 31
+ assert history["missing_found"] == 31
+ assert history["docs_read"] == 31
+ assert history["docs_written"] == 31
assert history["doc_write_failures"] == 0
copy = Couch.get!("/#{tgt_db_name}/2", query: %{:conflicts => true}).body
@@ -470,12 +540,12 @@ defmodule ReplicationTest do
assert tgt_info["doc_count"] == src_info["doc_count"]
assert is_list(result["history"])
- assert length(result["history"]) == 5
+ assert length(result["history"]) == 7
history = Enum.at(result["history"], 0)
- assert history["missing_checked"] == 30
- assert history["missing_found"] == 30
- assert history["docs_read"] == 30
- assert history["docs_written"] == 30
+ assert history["missing_checked"] == 32
+ assert history["missing_found"] == 32
+ assert history["docs_read"] == 32
+ assert history["docs_written"] == 32
assert history["doc_write_failures"] == 0
copy = Couch.get!("/#{tgt_db_name}/2", query: %{:conflicts => true}).body
@@ -499,12 +569,12 @@ defmodule ReplicationTest do
assert tgt_info["doc_count"] == src_info["doc_count"]
assert is_list(result["history"])
- assert length(result["history"]) == 6
+ assert length(result["history"]) == 8
history = Enum.at(result["history"], 0)
- assert history["missing_checked"] == 31
- assert history["missing_found"] == 31
- assert history["docs_read"] == 31
- assert history["docs_written"] == 31
+ assert history["missing_checked"] == 33
+ assert history["missing_found"] == 33
+ assert history["docs_read"] == 33
+ assert history["docs_written"] == 33
assert history["doc_write_failures"] == 0
copy = Couch.get!("/#{tgt_db_name}/2", query: %{:conflicts => true}).body
@@ -531,12 +601,12 @@ defmodule ReplicationTest do
assert tgt_info["doc_count"] == src_info["doc_count"]
assert is_list(result["history"])
- assert length(result["history"]) == 7
+ assert length(result["history"]) == 9
history = Enum.at(result["history"], 0)
- assert history["missing_checked"] == 34
- assert history["missing_found"] == 32
- assert history["docs_read"] == 32
- assert history["docs_written"] == 32
+ assert history["missing_checked"] == 36
+ assert history["missing_found"] == 34
+ assert history["docs_read"] == 34
+ assert history["docs_written"] == 34
assert history["doc_write_failures"] == 0
docs = [
@@ -556,12 +626,12 @@ defmodule ReplicationTest do
assert tgt_info["doc_count"] == src_info["doc_count"]
assert is_list(result["history"])
- assert length(result["history"]) == 8
+ assert length(result["history"]) == 10
history = Enum.at(result["history"], 0)
- assert history["missing_checked"] == 36
- assert history["missing_found"] == 32
- assert history["docs_read"] == 32
- assert history["docs_written"] == 32
+ assert history["missing_checked"] == 38
+ assert history["missing_found"] == 34
+ assert history["docs_read"] == 34
+ assert history["docs_written"] == 34
assert history["doc_write_failures"] == 0
# Test nothing to replicate
@@ -710,9 +780,10 @@ defmodule ReplicationTest do
assert tgt_info["doc_count"] == src_info["doc_count"]
- src_shards = seq_to_shards(src_info["update_seq"])
- tgt_shards = seq_to_shards(tgt_info["update_seq"])
- assert tgt_shards == src_shards
+ # This assertion is no longer valid
+ # src_shards = seq_to_shards(src_info["update_seq"])
+ # tgt_shards = seq_to_shards(tgt_info["update_seq"])
+ # assert tgt_shards == src_shards
end)
end
@@ -1653,8 +1724,13 @@ defmodule ReplicationTest do
def wait_for_repl(src_db_name, repl_id, expect_revs_checked, wait_left) do
task = get_task(repl_id, 0)
- through_seq = task["through_seq"] || "0"
- revs_checked = task["revisions_checked"]
+ info = if task["info"] == :null do
+ %{"through_seq" => "0", "revisions_checked" => "0"}
+ else
+ task["info"]
+ end
+ through_seq = info["through_seq"] || "0"
+ revs_checked = info["revisions_checked"] || "0"
changes = get_db_changes(src_db_name, %{:since => through_seq})
if length(changes["results"]) > 0 or revs_checked < expect_revs_checked do
@@ -1703,13 +1779,14 @@ defmodule ReplicationTest do
end
def try_get_task(repl_id) do
- resp = Couch.get("/_active_tasks")
- assert HTTPotion.Response.success?(resp)
- assert is_list(resp.body)
+ resp = Couch.get("/_scheduler/jobs/#{repl_id}")
- Enum.find(resp.body, nil, fn task ->
- task["replication_id"] == repl_id
- end)
+ if HTTPotion.Response.success?(resp) do
+ assert is_map(resp.body)
+ resp.body
+ else
+ nil
+ end
end
def get_att1_data do
diff --git a/test/elixir/test/reshard_all_docs_test.exs b/test/elixir/test/reshard_all_docs_test.exs
deleted file mode 100644
index ab8c6b75b..000000000
--- a/test/elixir/test/reshard_all_docs_test.exs
+++ /dev/null
@@ -1,79 +0,0 @@
-defmodule ReshardAllDocsTest do
- use CouchTestCase
- import ReshardHelpers
-
- @moduledoc """
- Test _all_docs interaction with resharding
- """
-
- setup do
- db = random_db_name()
- {:ok, _} = create_db(db, query: %{q: 2})
-
- on_exit(fn ->
- reset_reshard_state()
- delete_db(db)
- end)
-
- {:ok, [db: db]}
- end
-
- test "all_docs after splitting all shards on node1", context do
- db = context[:db]
- node1 = get_first_node()
- docs = add_docs(1..100, db)
-
- before_split_all_docs = all_docs(db)
- assert docs == before_split_all_docs
-
- resp = post_job_node(db, node1)
- assert resp.status_code in [201, 202]
- jobid = hd(resp.body)["id"]
- wait_job_completed(jobid)
-
- assert before_split_all_docs == all_docs(db)
-
- assert remove_job(jobid).status_code == 200
- end
-
- test "all_docs after splitting the same range on all nodes", context do
- db = context[:db]
- docs = add_docs(1..100, db)
-
- before_split_all_docs = all_docs(db)
- assert docs == before_split_all_docs
-
- resp = post_job_range(db, "00000000-7fffffff")
- assert resp.status_code in [201, 202]
-
- resp.body
- |> Enum.map(fn j -> j["id"] end)
- |> Enum.each(fn id -> wait_job_completed(id) end)
-
- assert before_split_all_docs == all_docs(db)
-
- get_jobs()
- |> Enum.map(fn j -> j["id"] end)
- |> Enum.each(fn id -> remove_job(id) end)
- end
-
- defp add_docs(range, db) do
- docs = create_docs(range)
- w3 = %{:w => 3}
- resp = Couch.post("/#{db}/_bulk_docs", body: %{docs: docs}, query: w3)
- assert resp.status_code in [201, 202]
- assert length(resp.body) == length(docs)
-
- docs
- |> rev(resp.body)
- |> Enum.into(%{}, fn %{:_id => id, :_rev => rev} -> {id, rev} end)
- end
-
- defp all_docs(db, query \\ %{}) do
- resp = Couch.get("/#{db}/_all_docs", query: query)
- assert resp.status_code == 200
-
- resp.body["rows"]
- |> Enum.into(%{}, fn %{"id" => id, "value" => v} -> {id, v["rev"]} end)
- end
-end
diff --git a/test/elixir/test/reshard_basic_test.exs b/test/elixir/test/reshard_basic_test.exs
deleted file mode 100644
index dcb198c46..000000000
--- a/test/elixir/test/reshard_basic_test.exs
+++ /dev/null
@@ -1,174 +0,0 @@
-defmodule ReshardBasicTest do
- use CouchTestCase
- import ReshardHelpers
-
- @moduledoc """
- Test resharding basic functionality
- """
-
- setup_all do
- db1 = random_db_name()
- {:ok, _} = create_db(db1, query: %{q: 1})
- db2 = random_db_name()
- {:ok, _} = create_db(db2, query: %{q: 2})
-
- on_exit(fn ->
- reset_reshard_state()
- delete_db(db1)
- delete_db(db2)
- end)
-
- {:ok, [db1: db1, db2: db2]}
- end
-
- test "basic api querying, no jobs present" do
- summary = get_summary()
- assert summary["state"] == "running"
- assert summary["state_reason"] == :null
- assert summary["total"] == 0
- assert summary["completed"] == 0
- assert summary["failed"] == 0
- assert summary["stopped"] == 0
- assert get_state() == %{"state" => "running", "reason" => :null}
- assert get_jobs() == []
- end
-
- test "check validation of invalid parameters", context do
- db1 = context[:db1]
- node1 = get_first_node()
-
- resp = post_job_node(db1, "badnode")
- assert resp.status_code == 400
-
- resp = post_job_node("badresharddb", node1)
- assert resp.status_code == 400
-
- resp = post_job_db("badresharddb")
- assert resp.status_code == 400
-
- resp = post_job_range("badresharddb", "randomgarbage")
- assert resp.status_code == 400
-
- resp = get_job("badjobid")
- assert resp.status_code == 404
-
- resp = remove_job("badjobid")
- assert resp.status_code == 404
- end
-
- test "toggle global state" do
- assert get_state() == %{"state" => "running", "reason" => :null}
- put_state_stopped("xyz")
- assert get_state() == %{"state" => "stopped", "reason" => "xyz"}
- put_state_running()
- assert get_state() == %{"state" => "running", "reason" => :null}
- end
-
- test "split q=1 db shards on node1 (1 job)", context do
- db = context[:db1]
- node1 = get_first_node()
-
- resp = post_job_node(db, node1)
- assert resp.status_code in [201, 202]
-
- body = resp.body
- assert is_list(body)
- assert length(body) == 1
-
- [job] = body
- id = job["id"]
- assert is_binary(id)
- node = job["node"]
- assert is_binary(node)
- assert node == node1
- assert job["ok"] == true
- shard = job["shard"]
- assert is_binary(shard)
-
- resp = get_job(id)
- assert resp.status_code == 200
-
- body = resp.body
- assert body["type"] == "split"
- assert body["id"] == id
- assert body["source"] == shard
- assert is_list(body["history"])
- assert body["job_state"] in ["new", "running", "completed"]
- assert is_list(body["target"])
- assert length(body["target"]) == 2
-
- wait_job_completed(id)
-
- resp = get_job(id)
- assert resp.status_code == 200
-
- body = resp.body
- assert body["job_state"] == "completed"
- assert body["split_state"] == "completed"
-
- resp = Couch.get("/#{db}/_shards")
- assert resp.status_code == 200
- shards = resp.body["shards"]
- assert node1 not in Map.get(shards, "00000000-ffffffff", [])
- assert shards["00000000-7fffffff"] == [node1]
- assert shards["80000000-ffffffff"] == [node1]
-
- summary = get_summary()
- assert summary["total"] == 1
- assert summary["completed"] == 1
-
- resp = remove_job(id)
- assert resp.status_code == 200
-
- assert get_jobs() == []
-
- summary = get_summary()
- assert summary["total"] == 0
- assert summary["completed"] == 0
- end
-
- test "split q=2 shards on node1 (2 jobs)", context do
- db = context[:db2]
- node1 = get_first_node()
-
- resp = post_job_node(db, node1)
- assert resp.status_code in [201, 202]
-
- body = resp.body
- assert is_list(body)
- assert length(body) == 2
-
- [job1, job2] = Enum.sort(body)
- {id1, id2} = {job1["id"], job2["id"]}
-
- assert get_job(id1).body["id"] == id1
- assert get_job(id2).body["id"] == id2
-
- summary = get_summary()
- assert summary["total"] == 2
-
- wait_job_completed(id1)
- wait_job_completed(id2)
-
- summary = get_summary()
- assert summary["completed"] == 2
-
- resp = Couch.get("/#{db}/_shards")
- assert resp.status_code == 200
- shards = resp.body["shards"]
- assert node1 not in Map.get(shards, "00000000-7fffffff", [])
- assert node1 not in Map.get(shards, "80000000-ffffffff", [])
- assert shards["00000000-3fffffff"] == [node1]
- assert shards["40000000-7fffffff"] == [node1]
- assert shards["80000000-bfffffff"] == [node1]
- assert shards["c0000000-ffffffff"] == [node1]
-
- # deleting the source db should remove the jobs
- delete_db(db)
- wait_job_removed(id1)
- wait_job_removed(id2)
-
- summary = get_summary()
- assert summary["total"] == 0
- end
-end
diff --git a/test/elixir/test/reshard_changes_feed.exs b/test/elixir/test/reshard_changes_feed.exs
deleted file mode 100644
index 5498ded7b..000000000
--- a/test/elixir/test/reshard_changes_feed.exs
+++ /dev/null
@@ -1,81 +0,0 @@
-defmodule ReshardChangesFeedTest do
- use CouchTestCase
- import ReshardHelpers
-
- @moduledoc """
- Test _changes interaction with resharding
- """
-
- setup do
- db = random_db_name()
- {:ok, _} = create_db(db, query: %{q: 2})
-
- on_exit(fn ->
- reset_reshard_state()
- delete_db(db)
- end)
-
- {:ok, [db: db]}
- end
-
- test "all_docs after splitting all shards on node1", context do
- db = context[:db]
- add_docs(1..3, db)
-
- all_before = changes(db)
- first_seq = hd(all_before["results"])["seq"]
- last_seq = all_before["last_seq"]
- since_1_before = docset(changes(db, %{:since => first_seq}))
- since_last_before = docset(changes(db, %{:since => last_seq}))
-
- resp = post_job_range(db, "00000000-7fffffff")
- assert resp.status_code in [201, 202]
-
- resp.body
- |> Enum.map(fn j -> j["id"] end)
- |> Enum.each(fn id -> wait_job_completed(id) end)
-
- all_after = changes(db)
- since_1_after = docset(changes(db, %{:since => first_seq}))
- since_last_after = docset(changes(db, %{:since => last_seq}))
-
- assert docset(all_before) == docset(all_after)
- assert MapSet.subset?(since_1_before, since_1_after)
- assert MapSet.subset?(since_last_before, since_last_after)
-
- get_jobs()
- |> Enum.map(fn j -> j["id"] end)
- |> Enum.each(fn id -> remove_job(id) end)
- end
-
- defp docset(changes) do
- changes["results"]
- |> Enum.map(fn %{"id" => id} -> id end)
- |> MapSet.new()
- end
-
- defp changes(db, query \\ %{}) do
- resp = Couch.get("/#{db}/_changes", query: query)
- assert resp.status_code == 200
- resp.body
- end
-
- defp add_docs(range, db) do
- docs = create_docs(range)
- w3 = %{:w => 3}
- resp = Couch.post("/#{db}/_bulk_docs", body: %{docs: docs}, query: w3)
- assert resp.status_code in [201, 202]
- assert length(resp.body) == length(docs)
-
- docs
- |> rev(resp.body)
- |> Enum.into(%{}, fn %{:_id => id, :_rev => rev} -> {id, rev} end)
- end
-
- # (Keep for debugging)
- # defp unpack_seq(seq) when is_binary(seq) do
- # [_, opaque] = String.split(seq, "-")
- # {:ok, binblob} = Base.url_decode64(opaque, padding: false)
- # :erlang.binary_to_term(binblob)
- # end
-end
diff --git a/test/elixir/test/reshard_helpers.exs b/test/elixir/test/reshard_helpers.exs
deleted file mode 100644
index 282d98c82..000000000
--- a/test/elixir/test/reshard_helpers.exs
+++ /dev/null
@@ -1,114 +0,0 @@
-defmodule ReshardHelpers do
- use CouchTestCase
-
- def get_summary do
- resp = Couch.get("/_reshard")
- assert resp.status_code == 200
- resp.body
- end
-
- def get_state do
- resp = Couch.get("/_reshard/state")
- assert resp.status_code == 200
- resp.body
- end
-
- def put_state_running do
- resp = Couch.put("/_reshard/state", body: %{:state => "running"})
- assert resp.status_code == 200
- resp
- end
-
- def put_state_stopped(reason \\ "") do
- body = %{:state => "stopped", :reason => reason}
- resp = Couch.put("/_reshard/state", body: body)
- assert resp.status_code == 200
- resp
- end
-
- def get_jobs do
- resp = Couch.get("/_reshard/jobs")
- assert resp.status_code == 200
- resp.body["jobs"]
- end
-
- def post_job_db(db) do
- body = %{:type => :split, :db => db}
- Couch.post("/_reshard/jobs", body: body)
- end
-
- def post_job_node(db, node) do
- body = %{:type => :split, :db => db, :node => node}
- Couch.post("/_reshard/jobs", body: body)
- end
-
- def post_job_range(db, range) do
- body = %{:type => :split, :db => db, :range => range}
- Couch.post("/_reshard/jobs", body: body)
- end
-
- def post_job_node_and_range(db, node, range) do
- body = %{:type => :split, :db => db, :node => node, :range => range}
- Couch.post("/_reshard/jobs", body: body)
- end
-
- def get_job(id) when is_binary(id) do
- Couch.get("/_reshard/jobs/#{id}")
- end
-
- def remove_job(id) when is_binary(id) do
- Couch.delete("/_reshard/jobs/#{id}")
- end
-
- def get_job_state(id) when is_binary(id) do
- resp = Couch.get("/_reshard/jobs/#{id}/state")
- assert resp.status_code == 200
- resp.body["state"]
- end
-
- def stop_job(id, reason \\ "") when is_binary(id) do
- body = %{:state => "stopped", :reason => reason}
- Couch.post("/_reshard/jobs/#{id}/state", body: body)
- end
-
- def resume_job(id) when is_binary(id) do
- body = %{:state => "running"}
- Couch.post("/_reshard/jobs/#{id}/state", body: body)
- end
-
- def job_ids(jobs) do
- Enum.map(fn job -> job["id"] end, jobs)
- end
-
- def get_first_node do
- mresp = Couch.get("/_membership")
- assert mresp.status_code == 200
- all_nodes = mresp.body["all_nodes"]
-
- mresp.body["cluster_nodes"]
- |> Enum.filter(fn n -> n in all_nodes end)
- |> Enum.sort()
- |> hd()
- end
-
- def wait_job_removed(id) do
- retry_until(fn -> get_job(id).status_code == 404 end, 200, 60_000)
- end
-
- def wait_job_completed(id) do
- wait_job_state(id, "completed")
- end
-
- def wait_job_state(id, state) do
- retry_until(fn -> get_job_state(id) == state end, 200, 60_000)
- end
-
- def reset_reshard_state do
- get_jobs()
- |> Enum.map(fn j -> j["id"] end)
- |> Enum.each(fn id -> remove_job(id) end)
-
- assert get_jobs() == []
- put_state_running()
- end
-end
diff --git a/test/elixir/test/security_validation_test.exs b/test/elixir/test/security_validation_test.exs
index 0df3a780b..e10331477 100644
--- a/test/elixir/test/security_validation_test.exs
+++ b/test/elixir/test/security_validation_test.exs
@@ -53,9 +53,6 @@ defmodule SecurityValidationTest do
on_exit(fn -> delete_db(auth_db_name) end)
configs = [
- {"httpd", "authentication_handlers",
- "{couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}"},
- {"couch_httpd_auth", "authentication_db", auth_db_name},
{"chttpd_auth", "authentication_db", auth_db_name}
]
@@ -72,6 +69,7 @@ defmodule SecurityValidationTest do
Enum.each(users, fn {name, pass} ->
doc = %{
:_id => "org.couchdb.user:#{name}",
+ :type => "user",
:name => name,
:roles => [],
:password => pass
diff --git a/test/elixir/test/test_helper.exs b/test/elixir/test/test_helper.exs
index 4bf65bcf6..6311fca44 100644
--- a/test/elixir/test/test_helper.exs
+++ b/test/elixir/test/test_helper.exs
@@ -14,5 +14,3 @@ ExUnit.configure(
)
ExUnit.start()
-Code.require_file("partition_helpers.exs", __DIR__)
-Code.require_file("reshard_helpers.exs", __DIR__)
diff --git a/test/elixir/test/view_collation_test.exs b/test/elixir/test/view_collation_test.exs
index 7563ba416..bf30031e0 100644
--- a/test/elixir/test/view_collation_test.exs
+++ b/test/elixir/test/view_collation_test.exs
@@ -70,34 +70,28 @@ defmodule ViewCollationTest do
end
test "ascending collation order", context do
- retry_until(fn ->
- resp = Couch.get(url(context))
- pairs = Enum.zip(resp.body["rows"], @values)
+ resp = Couch.get(url(context))
+ pairs = Enum.zip(resp.body["rows"], @values)
- Enum.each(pairs, fn {row, value} ->
- assert row["key"] == convert(value)
- end)
+ Enum.each(pairs, fn {row, value} ->
+ assert row["key"] == convert(value)
end)
end
test "descending collation order", context do
- retry_until(fn ->
- resp = Couch.get(url(context), query: %{"descending" => "true"})
- pairs = Enum.zip(resp.body["rows"], Enum.reverse(@values))
+ resp = Couch.get(url(context), query: %{"descending" => "true"})
+ pairs = Enum.zip(resp.body["rows"], Enum.reverse(@values))
- Enum.each(pairs, fn {row, value} ->
- assert row["key"] == convert(value)
- end)
+ Enum.each(pairs, fn {row, value} ->
+ assert row["key"] == convert(value)
end)
end
test "key query option", context do
Enum.each(@values, fn value ->
- retry_until(fn ->
- resp = Couch.get(url(context), query: %{:key => :jiffy.encode(value)})
- assert length(resp.body["rows"]) == 1
- assert Enum.at(resp.body["rows"], 0)["key"] == convert(value)
- end)
+ resp = Couch.get(url(context), query: %{:key => :jiffy.encode(value)})
+ assert length(resp.body["rows"]) == 1
+ assert Enum.at(resp.body["rows"], 0)["key"] == convert(value)
end)
end