summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoan Touzet <wohali@users.noreply.github.com>2019-09-05 18:53:20 +0000
committerGitHub <noreply@github.com>2019-09-05 18:53:20 +0000
commit7685c3c5608dbd6e9361bf51e552dccdddf22d6b (patch)
treed6898446651f5a7c989e5f3b27183615a27270f1
parent044e57815f7bfc407f15d5514e6689e3cd0e9dae (diff)
parent4a15a4c33c85bed023c4c9fa5a608741b4bbf285 (diff)
downloadcouchdb-1523-bye-bye-5986.tar.gz
Merge branch 'master' into 1523-bye-bye-59861523-bye-bye-5986
-rw-r--r--.credo.exs (renamed from test/elixir/.credo.exs)11
-rw-r--r--.formatter.exs9
-rw-r--r--.gitignore7
-rw-r--r--.travis.yml5
-rw-r--r--Jenkinsfile297
-rw-r--r--Makefile53
-rw-r--r--Makefile.win26
-rw-r--r--config/config.exs30
-rw-r--r--config/dev.exs1
-rw-r--r--config/integration.exs12
-rw-r--r--config/prod.exs1
-rw-r--r--config/test.exs12
-rwxr-xr-xdev/run41
-rw-r--r--mix.exs61
-rw-r--r--mix.lock (renamed from test/elixir/mix.lock)6
-rw-r--r--rebar.config.script2
-rw-r--r--src/chttpd/test/eunit/chttpd_cors_test.erl (renamed from src/chttpd/test/chttpd_cors_test.erl)0
-rw-r--r--src/chttpd/test/eunit/chttpd_csp_tests.erl (renamed from src/chttpd/test/chttpd_csp_tests.erl)0
-rw-r--r--src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl (renamed from src/chttpd/test/chttpd_db_attachment_size_tests.erl)0
-rw-r--r--src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl (renamed from src/chttpd/test/chttpd_db_bulk_get_multipart_test.erl)0
-rw-r--r--src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl (renamed from src/chttpd/test/chttpd_db_bulk_get_test.erl)0
-rw-r--r--src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl (renamed from src/chttpd/test/chttpd_db_doc_size_tests.erl)0
-rw-r--r--src/chttpd/test/eunit/chttpd_db_test.erl (renamed from src/chttpd/test/chttpd_db_test.erl)109
-rw-r--r--src/chttpd/test/eunit/chttpd_dbs_info_test.erl (renamed from src/chttpd/test/chttpd_dbs_info_test.erl)0
-rw-r--r--src/chttpd/test/eunit/chttpd_error_info_tests.erl (renamed from src/chttpd/test/chttpd_error_info_tests.erl)0
-rw-r--r--src/chttpd/test/eunit/chttpd_handlers_tests.erl (renamed from src/chttpd/test/chttpd_handlers_tests.erl)0
-rw-r--r--src/chttpd/test/eunit/chttpd_open_revs_error_test.erl (renamed from src/chttpd/test/chttpd_open_revs_error_test.erl)0
-rw-r--r--src/chttpd/test/eunit/chttpd_plugin_tests.erl (renamed from src/chttpd/test/chttpd_plugin_tests.erl)0
-rw-r--r--src/chttpd/test/eunit/chttpd_prefer_header_test.erl (renamed from src/chttpd/test/chttpd_prefer_header_test.erl)0
-rw-r--r--src/chttpd/test/eunit/chttpd_purge_tests.erl (renamed from src/chttpd/test/chttpd_purge_tests.erl)0
-rw-r--r--src/chttpd/test/eunit/chttpd_security_tests.erl (renamed from src/chttpd/test/chttpd_security_tests.erl)0
-rw-r--r--src/chttpd/test/eunit/chttpd_socket_buffer_size_test.erl (renamed from src/chttpd/test/chttpd_socket_buffer_size_test.erl)0
-rw-r--r--src/chttpd/test/eunit/chttpd_view_test.erl (renamed from src/chttpd/test/chttpd_view_test.erl)13
-rw-r--r--src/chttpd/test/eunit/chttpd_welcome_test.erl (renamed from src/chttpd/test/chttpd_welcome_test.erl)0
-rw-r--r--src/chttpd/test/eunit/chttpd_xframe_test.erl (renamed from src/chttpd/test/chttpd_xframe_test.erl)0
-rw-r--r--src/couch/include/couch_eunit.hrl2
-rw-r--r--src/couch/test/eunit/chttpd_endpoints_tests.erl (renamed from src/couch/test/chttpd_endpoints_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_auth_cache_tests.erl (renamed from src/couch/test/couch_auth_cache_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_base32_tests.erl (renamed from src/couch/test/couch_base32_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_bt_engine_compactor_tests.erl (renamed from src/couch/test/couch_bt_engine_compactor_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_bt_engine_tests.erl (renamed from src/couch/test/couch_bt_engine_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl (renamed from src/couch/test/couch_bt_engine_upgrade_tests.erl)17
-rw-r--r--src/couch/test/eunit/couch_btree_tests.erl (renamed from src/couch/test/couch_btree_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_changes_tests.erl (renamed from src/couch/test/couch_changes_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_compress_tests.erl (renamed from src/couch/test/couch_compress_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_db_doc_tests.erl (renamed from src/couch/test/couch_db_doc_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_db_mpr_tests.erl (renamed from src/couch/test/couch_db_mpr_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_db_plugin_tests.erl (renamed from src/couch/test/couch_db_plugin_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_db_props_upgrade_tests.erl (renamed from src/couch/test/couch_db_props_upgrade_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_db_split_tests.erl (renamed from src/couch/test/couch_db_split_tests.erl)5
-rw-r--r--src/couch/test/eunit/couch_db_tests.erl (renamed from src/couch/test/couch_db_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_doc_json_tests.erl (renamed from src/couch/test/couch_doc_json_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_doc_tests.erl (renamed from src/couch/test/couch_doc_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_ejson_size_tests.erl (renamed from src/couch/test/couch_ejson_size_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_etag_tests.erl (renamed from src/couch/test/couch_etag_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_file_tests.erl (renamed from src/couch/test/couch_file_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_flags_config_tests.erl (renamed from src/couch/test/couch_flags_config_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_flags_tests.erl (renamed from src/couch/test/couch_flags_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_hotp_tests.erl (renamed from src/couch/test/couch_hotp_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_index_tests.erl (renamed from src/couch/test/couch_index_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_key_tree_prop_tests.erl (renamed from src/couch/test/couch_key_tree_prop_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_key_tree_tests.erl (renamed from src/couch/test/couch_key_tree_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_passwords_tests.erl (renamed from src/couch/test/couch_passwords_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_query_servers_tests.erl (renamed from src/couch/test/couch_query_servers_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_server_tests.erl (renamed from src/couch/test/couch_server_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_stream_tests.erl (renamed from src/couch/test/couch_stream_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_task_status_tests.erl (renamed from src/couch/test/couch_task_status_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_totp_tests.erl (renamed from src/couch/test/couch_totp_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_util_tests.erl (renamed from src/couch/test/couch_util_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_uuids_tests.erl (renamed from src/couch/test/couch_uuids_tests.erl)0
-rw-r--r--src/couch/test/eunit/couch_work_queue_tests.erl (renamed from src/couch/test/couch_work_queue_tests.erl)0
-rw-r--r--src/couch/test/eunit/couchdb_attachments_tests.erl (renamed from src/couch/test/couchdb_attachments_tests.erl)0
-rw-r--r--src/couch/test/eunit/couchdb_auth_tests.erl (renamed from src/couch/test/couchdb_auth_tests.erl)0
-rwxr-xr-xsrc/couch/test/eunit/couchdb_cookie_domain_tests.erl (renamed from src/couch/test/couchdb_cookie_domain_tests.erl)0
-rw-r--r--src/couch/test/eunit/couchdb_cors_tests.erl (renamed from src/couch/test/couchdb_cors_tests.erl)0
-rw-r--r--src/couch/test/eunit/couchdb_db_tests.erl (renamed from src/couch/test/couchdb_db_tests.erl)0
-rw-r--r--src/couch/test/eunit/couchdb_design_doc_tests.erl (renamed from src/couch/test/couchdb_design_doc_tests.erl)0
-rw-r--r--src/couch/test/eunit/couchdb_file_compression_tests.erl (renamed from src/couch/test/couchdb_file_compression_tests.erl)0
-rw-r--r--src/couch/test/eunit/couchdb_location_header_tests.erl (renamed from src/couch/test/couchdb_location_header_tests.erl)0
-rw-r--r--src/couch/test/eunit/couchdb_mrview_cors_tests.erl (renamed from src/couch/test/couchdb_mrview_cors_tests.erl)0
-rw-r--r--src/couch/test/eunit/couchdb_mrview_tests.erl (renamed from src/couch/test/couchdb_mrview_tests.erl)0
-rw-r--r--src/couch/test/eunit/couchdb_os_proc_pool.erl (renamed from src/couch/test/couchdb_os_proc_pool.erl)0
-rw-r--r--src/couch/test/eunit/couchdb_update_conflicts_tests.erl (renamed from src/couch/test/couchdb_update_conflicts_tests.erl)0
-rw-r--r--src/couch/test/eunit/couchdb_vhosts_tests.erl (renamed from src/couch/test/couchdb_vhosts_tests.erl)0
-rw-r--r--src/couch/test/eunit/couchdb_views_tests.erl (renamed from src/couch/test/couchdb_views_tests.erl)0
-rw-r--r--src/couch/test/eunit/fixtures/3b835456c235b1827e012e25666152f3.view (renamed from src/couch/test/fixtures/3b835456c235b1827e012e25666152f3.view)bin4192 -> 4192 bytes
-rw-r--r--src/couch/test/eunit/fixtures/couch_stats_aggregates.cfg (renamed from src/couch/test/fixtures/couch_stats_aggregates.cfg)0
-rw-r--r--src/couch/test/eunit/fixtures/couch_stats_aggregates.ini (renamed from src/couch/test/fixtures/couch_stats_aggregates.ini)0
-rw-r--r--src/couch/test/eunit/fixtures/db_non_partitioned.couch (renamed from src/couch/test/fixtures/db_non_partitioned.couch)bin12479 -> 12479 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v6_with_1_purge_req.couch (renamed from src/couch/test/fixtures/db_v6_with_1_purge_req.couch)bin12470 -> 12470 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v6_with_1_purge_req_for_2_docs.couch (renamed from src/couch/test/fixtures/db_v6_with_1_purge_req_for_2_docs.couch)bin16557 -> 16557 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v6_with_2_purge_req.couch (renamed from src/couch/test/fixtures/db_v6_with_2_purge_req.couch)bin16566 -> 16566 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v6_without_purge_req.couch (renamed from src/couch/test/fixtures/db_v6_without_purge_req.couch)bin61644 -> 61644 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v7_with_1_purge_req.couch (renamed from src/couch/test/fixtures/db_v7_with_1_purge_req.couch)bin16617 -> 16617 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v7_with_1_purge_req_for_2_docs.couch (renamed from src/couch/test/fixtures/db_v7_with_1_purge_req_for_2_docs.couch)bin20705 -> 20705 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v7_with_2_purge_req.couch (renamed from src/couch/test/fixtures/db_v7_with_2_purge_req.couch)bin20713 -> 20713 bytes
-rw-r--r--src/couch/test/eunit/fixtures/db_v7_without_purge_req.couch (renamed from src/couch/test/fixtures/db_v7_without_purge_req.couch)bin65781 -> 65781 bytes
-rw-r--r--src/couch/test/eunit/fixtures/logo.png (renamed from src/couch/test/fixtures/logo.png)bin3010 -> 3010 bytes
-rw-r--r--src/couch/test/eunit/fixtures/multipart.http (renamed from src/couch/test/fixtures/multipart.http)0
-rw-r--r--src/couch/test/eunit/fixtures/os_daemon_bad_perm.sh (renamed from src/couch/test/fixtures/os_daemon_bad_perm.sh)0
-rwxr-xr-xsrc/couch/test/eunit/fixtures/os_daemon_can_reboot.sh (renamed from src/couch/test/fixtures/os_daemon_can_reboot.sh)0
-rwxr-xr-xsrc/couch/test/eunit/fixtures/os_daemon_configer.escript (renamed from src/couch/test/fixtures/os_daemon_configer.escript)0
-rwxr-xr-xsrc/couch/test/eunit/fixtures/os_daemon_die_on_boot.sh (renamed from src/couch/test/fixtures/os_daemon_die_on_boot.sh)0
-rwxr-xr-xsrc/couch/test/eunit/fixtures/os_daemon_die_quickly.sh (renamed from src/couch/test/fixtures/os_daemon_die_quickly.sh)0
-rwxr-xr-xsrc/couch/test/eunit/fixtures/os_daemon_looper.escript (renamed from src/couch/test/fixtures/os_daemon_looper.escript)0
-rw-r--r--src/couch/test/eunit/fixtures/test.couch (renamed from src/couch/test/fixtures/test.couch)bin16482 -> 16482 bytes
-rw-r--r--src/couch/test/eunit/global_changes_tests.erl (renamed from src/couch/test/global_changes_tests.erl)0
-rw-r--r--src/couch/test/eunit/json_stream_parse_tests.erl (renamed from src/couch/test/json_stream_parse_tests.erl)0
-rw-r--r--src/couch/test/eunit/test_web.erl (renamed from src/couch/test/test_web.erl)0
-rw-r--r--src/couch/test/exunit/fabric_test.exs101
-rw-r--r--src/couch/test/exunit/test_helper.exs2
-rw-r--r--src/couch_epi/src/couch_epi_sup.erl5
-rw-r--r--src/couch_epi/test/eunit/couch_epi_basic_test.erl (renamed from src/couch_epi/test/couch_epi_basic_test.erl)34
-rw-r--r--src/couch_epi/test/eunit/couch_epi_tests.erl (renamed from src/couch_epi/test/couch_epi_tests.erl)4
-rw-r--r--src/couch_epi/test/eunit/fixtures/app_data1.cfg (renamed from src/couch_epi/test/fixtures/app_data1.cfg)0
-rw-r--r--src/couch_epi/test/eunit/fixtures/app_data2.cfg (renamed from src/couch_epi/test/fixtures/app_data2.cfg)0
-rw-r--r--src/couch_index/test/eunit/couch_index_compaction_tests.erl (renamed from src/couch_index/test/couch_index_compaction_tests.erl)0
-rw-r--r--src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl (renamed from src/couch_index/test/couch_index_ddoc_updated_tests.erl)0
-rw-r--r--src/couch_log/test/eunit/couch_log_config_listener_test.erl (renamed from src/couch_log/test/couch_log_config_listener_test.erl)0
-rw-r--r--src/couch_log/test/eunit/couch_log_config_test.erl (renamed from src/couch_log/test/couch_log_config_test.erl)0
-rw-r--r--src/couch_log/test/eunit/couch_log_error_logger_h_test.erl (renamed from src/couch_log/test/couch_log_error_logger_h_test.erl)0
-rw-r--r--src/couch_log/test/eunit/couch_log_formatter_test.erl (renamed from src/couch_log/test/couch_log_formatter_test.erl)0
-rw-r--r--src/couch_log/test/eunit/couch_log_monitor_test.erl (renamed from src/couch_log/test/couch_log_monitor_test.erl)0
-rw-r--r--src/couch_log/test/eunit/couch_log_server_test.erl (renamed from src/couch_log/test/couch_log_server_test.erl)0
-rw-r--r--src/couch_log/test/eunit/couch_log_test.erl (renamed from src/couch_log/test/couch_log_test.erl)0
-rw-r--r--src/couch_log/test/eunit/couch_log_test_util.erl (renamed from src/couch_log/test/couch_log_test_util.erl)0
-rw-r--r--src/couch_log/test/eunit/couch_log_trunc_io_fmt_test.erl (renamed from src/couch_log/test/couch_log_trunc_io_fmt_test.erl)0
-rw-r--r--src/couch_log/test/eunit/couch_log_util_test.erl (renamed from src/couch_log/test/couch_log_util_test.erl)0
-rw-r--r--src/couch_log/test/eunit/couch_log_writer_ets.erl (renamed from src/couch_log/test/couch_log_writer_ets.erl)0
-rw-r--r--src/couch_log/test/eunit/couch_log_writer_file_test.erl (renamed from src/couch_log/test/couch_log_writer_file_test.erl)0
-rw-r--r--src/couch_log/test/eunit/couch_log_writer_stderr_test.erl (renamed from src/couch_log/test/couch_log_writer_stderr_test.erl)0
-rw-r--r--src/couch_log/test/eunit/couch_log_writer_syslog_test.erl (renamed from src/couch_log/test/couch_log_writer_syslog_test.erl)0
-rw-r--r--src/couch_log/test/eunit/couch_log_writer_test.erl (renamed from src/couch_log/test/couch_log_writer_test.erl)0
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl (renamed from src/couch_mrview/test/couch_mrview_all_docs_tests.erl)0
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_changes_since_tests.erl (renamed from src/couch_mrview/test/couch_mrview_changes_since_tests.erl)0
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl (renamed from src/couch_mrview/test/couch_mrview_collation_tests.erl)0
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl (renamed from src/couch_mrview/test/couch_mrview_compact_tests.erl)0
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl (renamed from src/couch_mrview/test/couch_mrview_ddoc_updated_tests.erl)0
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl (renamed from src/couch_mrview/test/couch_mrview_ddoc_validation_tests.erl)0
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl (renamed from src/couch_mrview/test/couch_mrview_design_docs_tests.erl)0
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_http_tests.erl (renamed from src/couch_mrview/test/couch_mrview_http_tests.erl)0
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_index_changes_tests.erl (renamed from src/couch_mrview/test/couch_mrview_index_changes_tests.erl)0
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl (renamed from src/couch_mrview/test/couch_mrview_index_info_tests.erl)0
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl (renamed from src/couch_mrview/test/couch_mrview_local_docs_tests.erl)0
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl (renamed from src/couch_mrview/test/couch_mrview_map_views_tests.erl)0
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl (renamed from src/couch_mrview/test/couch_mrview_purge_docs_fabric_tests.erl)10
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl (renamed from src/couch_mrview/test/couch_mrview_purge_docs_tests.erl)0
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl (renamed from src/couch_mrview/test/couch_mrview_red_views_tests.erl)0
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_util_tests.erl (renamed from src/couch_mrview/test/couch_mrview_util_tests.erl)0
-rw-r--r--src/couch_peruser/test/eunit/couch_peruser_test.erl (renamed from src/couch_peruser/test/couch_peruser_test.erl)0
-rw-r--r--src/couch_pse_tests/src/cpse_test_purge_replication.erl21
-rw-r--r--src/couch_replicator/src/couch_replicator.erl16
-rw-r--r--src/couch_replicator/src/couch_replicator_api_wrap.erl150
-rw-r--r--src/couch_replicator/src/couch_replicator_doc_processor.erl12
-rw-r--r--src/couch_replicator/src/couch_replicator_doc_processor_worker.erl6
-rw-r--r--src/couch_replicator/src/couch_replicator_docs.erl29
-rw-r--r--src/couch_replicator/src/couch_replicator_filters.erl13
-rw-r--r--src/couch_replicator/src/couch_replicator_ids.erl44
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler.erl82
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler_job.erl31
-rw-r--r--src/couch_replicator/src/couch_replicator_utils.erl56
-rw-r--r--src/couch_replicator/src/couch_replicator_worker.erl143
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl (renamed from src/couch_replicator/test/couch_replicator_attachments_too_large.erl)4
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl (renamed from src/couch_replicator/test/couch_replicator_compact_tests.erl)5
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_connection_tests.erl (renamed from src/couch_replicator/test/couch_replicator_connection_tests.erl)0
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl (renamed from src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl)0
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl (renamed from src/couch_replicator/test/couch_replicator_filtered_tests.erl)10
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_httpc_pool_tests.erl (renamed from src/couch_replicator/test/couch_replicator_httpc_pool_tests.erl)0
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl (renamed from src/couch_replicator/test/couch_replicator_id_too_long_tests.erl)5
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl (renamed from src/couch_replicator/test/couch_replicator_large_atts_tests.erl)5
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl (renamed from src/couch_replicator/test/couch_replicator_many_leaves_tests.erl)6
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl (renamed from src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl)5
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_proxy_tests.erl (renamed from src/couch_replicator/test/couch_replicator_proxy_tests.erl)0
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_rate_limiter_tests.erl (renamed from src/couch_replicator/test/couch_replicator_rate_limiter_tests.erl)0
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl (renamed from src/couch_replicator/test/couch_replicator_retain_stats_between_job_runs.erl)0
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl (renamed from src/couch_replicator/test/couch_replicator_selector_tests.erl)5
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl (renamed from src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl)5
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_test_helper.erl (renamed from src/couch_replicator/test/couch_replicator_test_helper.erl)0
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl (renamed from src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl)5
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl (renamed from src/ddoc_cache/test/ddoc_cache_basic_test.erl)0
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl (renamed from src/ddoc_cache/test/ddoc_cache_coverage_test.erl)0
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl (renamed from src/ddoc_cache/test/ddoc_cache_disabled_test.erl)0
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl (renamed from src/ddoc_cache/test/ddoc_cache_entry_test.erl)0
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_ev.erl (renamed from src/ddoc_cache/test/ddoc_cache_ev.erl)0
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl (renamed from src/ddoc_cache/test/ddoc_cache_eviction_test.erl)0
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl (renamed from src/ddoc_cache/test/ddoc_cache_lru_test.erl)19
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl (renamed from src/ddoc_cache/test/ddoc_cache_no_cache_test.erl)76
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl (renamed from src/ddoc_cache/test/ddoc_cache_open_error_test.erl)0
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl (renamed from src/ddoc_cache/test/ddoc_cache_open_test.erl)0
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl (renamed from src/ddoc_cache/test/ddoc_cache_opener_test.erl)0
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl (renamed from src/ddoc_cache/test/ddoc_cache_refresh_test.erl)0
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl (renamed from src/ddoc_cache/test/ddoc_cache_remove_test.erl)0
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_test.hrl (renamed from src/ddoc_cache/test/ddoc_cache_test.hrl)0
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl (renamed from src/ddoc_cache/test/ddoc_cache_tutil.erl)0
-rw-r--r--src/dreyfus/src/dreyfus_fabric_cleanup.erl8
-rw-r--r--src/dreyfus/src/dreyfus_util.erl5
-rw-r--r--src/dreyfus/test/elixir/test/partition_search_test.exs4
-rw-r--r--src/fabric/src/fabric.erl25
-rw-r--r--src/fabric/src/fabric_view_all_docs.erl9
-rw-r--r--src/fabric/test/eunit/fabric_rpc_purge_tests.erl (renamed from src/fabric/test/fabric_rpc_purge_tests.erl)0
-rw-r--r--src/global_changes/test/eunit/global_changes_hooks_tests.erl (renamed from src/global_changes/test/global_changes_hooks_tests.erl)0
-rw-r--r--src/mango/src/mango_error.erl2
-rw-r--r--src/mem3/src/mem3_sync_event_listener.erl28
-rw-r--r--src/mem3/src/mem3_util.erl16
-rw-r--r--src/mem3/test/eunit/mem3_cluster_test.erl (renamed from src/mem3/test/mem3_cluster_test.erl)0
-rw-r--r--src/mem3/test/eunit/mem3_hash_test.erl (renamed from src/mem3/test/mem3_hash_test.erl)0
-rw-r--r--src/mem3/test/eunit/mem3_rep_test.erl (renamed from src/mem3/test/mem3_rep_test.erl)0
-rw-r--r--src/mem3/test/eunit/mem3_reshard_api_test.erl (renamed from src/mem3/test/mem3_reshard_api_test.erl)0
-rw-r--r--src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl (renamed from src/mem3/test/mem3_reshard_changes_feed_test.erl)0
-rw-r--r--src/mem3/test/eunit/mem3_reshard_test.erl (renamed from src/mem3/test/mem3_reshard_test.erl)0
-rw-r--r--src/mem3/test/eunit/mem3_ring_prop_tests.erl (renamed from src/mem3/test/mem3_ring_prop_tests.erl)0
-rw-r--r--src/mem3/test/eunit/mem3_seeds_test.erl (renamed from src/mem3/test/mem3_seeds_test.erl)0
-rw-r--r--src/mem3/test/eunit/mem3_sync_security_test.erl (renamed from src/mem3/test/mem3_sync_security_test.erl)0
-rw-r--r--src/mem3/test/eunit/mem3_util_test.erl (renamed from src/mem3/test/mem3_util_test.erl)0
-rw-r--r--test/elixir/Makefile4
-rw-r--r--test/elixir/README.md143
-rw-r--r--test/elixir/lib/couch.ex155
-rw-r--r--test/elixir/lib/couch/db_test.ex2
-rw-r--r--test/elixir/lib/ex_unit.ex48
-rw-r--r--test/elixir/lib/setup.ex97
-rw-r--r--test/elixir/lib/setup/common.ex27
-rw-r--r--test/elixir/lib/step.ex44
-rw-r--r--test/elixir/lib/step/config.ex33
-rw-r--r--test/elixir/lib/step/create_db.ex53
-rw-r--r--test/elixir/lib/step/start.ex85
-rw-r--r--test/elixir/lib/step/user.ex104
-rw-r--r--test/elixir/lib/utils.ex61
-rw-r--r--test/elixir/mix.exs37
-rwxr-xr-xtest/elixir/run6
-rw-r--r--test/elixir/test/compact_test.exs3
-rw-r--r--test/elixir/test/replication_test.exs42
-rw-r--r--test/elixir/test/reshard_helpers.exs4
-rw-r--r--test/elixir/test/test_helper.exs8
-rw-r--r--test/javascript/tests/replication.js1920
234 files changed, 1731 insertions, 2871 deletions
diff --git a/test/elixir/.credo.exs b/.credo.exs
index e24836c8f..2b84a5064 100644
--- a/test/elixir/.credo.exs
+++ b/.credo.exs
@@ -22,7 +22,16 @@
# In the latter case `**/*.{ex,exs}` will be used.
#
included: ["lib/", "src/", "test/", "web/", "apps/"],
- excluded: [~r"/_build/", ~r"/deps/", ~r"/node_modules/"]
+ excluded: [
+ ~r"/_build/",
+ ~r"/node_modules/",
+ ~r"/src/jason",
+ ~r"/src/httpotion",
+ ~r"/src/credo",
+ ~r"/src/junit_formatter",
+ ~r"/src/bunt",
+ ~r"/test/elixir/deps/"
+ ]
},
#
# If you create your own checks, you must specify the source files for
diff --git a/.formatter.exs b/.formatter.exs
new file mode 100644
index 000000000..28b883d54
--- /dev/null
+++ b/.formatter.exs
@@ -0,0 +1,9 @@
+# Used by "mix format"
+[
+ inputs: [
+ "{mix,.formatter}.exs",
+ "{config,src}/*/test/exunit/*.{ex,exs}"
+ ],
+ line_length: 90,
+ rename_deprecated_at: "1.5.0"
+]
diff --git a/.gitignore b/.gitignore
index 36bc13007..6b9198d42 100644
--- a/.gitignore
+++ b/.gitignore
@@ -104,3 +104,10 @@ src/global_changes/ebin/
src/mango/ebin/
src/mango/test/*.pyc
src/mango/venv/
+
+/_build/
+/src/bunt
+/src/credo/
+/src/httpotion/
+/src/jason/
+/src/junit_formatter/
diff --git a/.travis.yml b/.travis.yml
index 7ef4aeadf..4af915ee9 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -41,10 +41,6 @@ env:
- secure: "UdA/gKIlyuXaW+hUgRx40t1TYjLCGxMqHvM5Uw7UbUH2dqEkgJiLfhZGchS1JVzl8M01VKZUUzS7v2nvRLiHZN1kvaw5kfq31VRoafUah8jfmvqNWZVdLovHl3aw5UX/HRt0RkbWbhdbdknTfh6+YinSZ+Nb54jCErMg9nabXtM="
- COUCHDB_IO_LOG_DIR=/tmp/couchjslogs
-# Change to elixir folder so that travis can run mix deps.get during install
-before_install:
- - cd test/elixir
-
# Enable this block if you want to build docs & fauxton too
#node_js:
# - 6
@@ -53,7 +49,6 @@ before_install:
# Then comment this section out
before_script:
- - cd ../..
- kerl list installations
- rm -rf /tmp/couchjslogs
- mkdir -p /tmp/couchjslogs
diff --git a/Jenkinsfile b/Jenkinsfile
index c57574824..342ac6c83 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -14,35 +14,33 @@
// the License.
// DRYing out the Jenkinsfile...
-build_script = '''
-mkdir -p ${COUCHDB_IO_LOG_DIR}
-echo
-echo "Build CouchDB from tarball & test"
-builddir=$(mktemp -d)
-cd ${builddir}
+build_and_test = '''
+mkdir -p ${COUCHDB_IO_LOG_DIR}
+rm -rf build
+mkdir build
+cd build
tar -xf ${WORKSPACE}/apache-couchdb-*.tar.gz
cd apache-couchdb-*
./configure --with-curl
make check || (build-aux/logfile-uploader.py && false)
+'''
-echo
-echo "Build CouchDB packages"
-cd ${builddir}
+make_packages = '''
git clone https://github.com/apache/couchdb-pkg
+rm -rf couchdb
mkdir couchdb
cp ${WORKSPACE}/apache-couchdb-*.tar.gz couchdb
tar -xf ${WORKSPACE}/apache-couchdb-*.tar.gz -C couchdb
cd couchdb-pkg
make ${platform} PLATFORM=${platform}
+'''
-echo
-echo "Cleanup & save for posterity"
+cleanup_and_save = '''
rm -rf ${WORKSPACE}/pkgs/${platform}
mkdir -p ${WORKSPACE}/pkgs/${platform}
-mv ../rpmbuild/RPMS/$(arch)/*rpm ${WORKSPACE}/pkgs/${platform} || true
-mv ../couchdb/*.deb ${WORKSPACE}/pkgs/${platform} || true
-rm -rf ${builddir} ${COUCHDB_IO_LOG_DIR}
+mv ${WORKSPACE}/rpmbuild/RPMS/$(arch)/*rpm ${WORKSPACE}/pkgs/${platform} || true
+mv ${WORKSPACE}/couchdb/*.deb ${WORKSPACE}/pkgs/${platform} || true
'''
pipeline {
@@ -113,11 +111,7 @@ pipeline {
// https://issues.jenkins-ci.org/browse/JENKINS-47962
// https://issues.jenkins-ci.org/browse/JENKINS-48050
- // The builddir stuff is to prevent all the builds from live syncing
- // their build results to each other during the build, which ACTUALLY
- // HAPPENS. Ugh.
-
- stage('make check') {
+ stage('Test and Package') {
parallel {
@@ -138,18 +132,25 @@ pipeline {
mkdir -p $COUCHDB_IO_LOG_DIR
# Build CouchDB from tarball & test
- builddir=$(mktemp -d)
- cd $builddir
+ mkdir build
+ cd build
tar -xf $WORKSPACE/apache-couchdb-*.tar.gz
cd apache-couchdb-*
./configure --with-curl
gmake check || (build-aux/logfile-uploader.py && false)
# No package build for FreeBSD at this time
- rm -rf $builddir $COUCHDB_IO_LOG_DIR
'''
} // withEnv
} // steps
+ post {
+ always {
+ junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml'
+ }
+ cleanup {
+ sh 'rm -rf $COUCHDB_IO_LOG_DIR'
+ }
+ } // post
} // stage FreeBSD
stage('CentOS 6') {
@@ -158,6 +159,8 @@ pipeline {
image 'couchdbdev/centos-6-erlang-19.3.6:latest'
alwaysPull true
label 'ubuntu'
+ // this keeps builds landing on the same host from clashing with each other
+ customWorkspace pwd() + '/centos6'
}
}
options {
@@ -167,14 +170,33 @@ pipeline {
environment {
platform = 'centos6'
}
- steps {
- sh 'rm -f apache-couchdb-*.tar.gz'
- unstash 'tarball'
- sh( script: build_script )
- } // steps
+ stages {
+ stage('Build from tarball & test') {
+ steps {
+ unstash 'tarball'
+ sh( script: build_and_test )
+ }
+ post {
+ always {
+ junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml'
+ }
+ }
+ }
+ stage('Build CouchDB packages') {
+ steps {
+ sh( script: make_packages )
+ sh( script: cleanup_and_save )
+ }
+ post {
+ success {
+ archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ }
+ }
+ }
+ } // stages
post {
- success {
- archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ cleanup {
+ sh 'rm -rf ${WORKSPACE}/*'
}
} // post
} // stage
@@ -185,6 +207,7 @@ pipeline {
image 'couchdbdev/centos-7-erlang-19.3.6:latest'
alwaysPull true
label 'ubuntu'
+ customWorkspace pwd() + '/centos7'
}
}
options {
@@ -194,14 +217,34 @@ pipeline {
environment {
platform = 'centos7'
}
- steps {
- sh 'rm -f apache-couchdb-*.tar.gz'
- unstash 'tarball'
- sh( script: build_script )
- } // steps
+ stages {
+ stage('Build from tarball & test') {
+ steps {
+ unstash 'tarball'
+ sh( script: build_and_test )
+ }
+ post {
+ always {
+ junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml'
+ }
+ }
+ }
+ stage('Build CouchDB packages') {
+ steps {
+ unstash 'tarball'
+ sh( script: make_packages )
+ sh( script: cleanup_and_save )
+ }
+ post {
+ success {
+ archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ }
+ }
+ }
+ } // stages
post {
- success {
- archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ cleanup {
+ sh 'rm -rf ${WORKSPACE}/*'
}
} // post
} // stage
@@ -212,6 +255,7 @@ pipeline {
image 'couchdbdev/ubuntu-xenial-erlang-19.3.6:latest'
alwaysPull true
label 'ubuntu'
+ customWorkspace pwd() + '/xenial'
}
}
options {
@@ -221,14 +265,33 @@ pipeline {
environment {
platform = 'xenial'
}
- steps {
- sh 'rm -f apache-couchdb-*.tar.gz'
- unstash 'tarball'
- sh( script: build_script )
- } // steps
+ stages {
+ stage('Build from tarball & test') {
+ steps {
+ unstash 'tarball'
+ sh( script: build_and_test )
+ }
+ post {
+ always {
+ junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml'
+ }
+ }
+ }
+ stage('Build CouchDB packages') {
+ steps {
+ sh( script: make_packages )
+ sh( script: cleanup_and_save )
+ }
+ post {
+ success {
+ archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ }
+ }
+ }
+ } // stages
post {
- success {
- archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ cleanup {
+ sh 'rm -rf ${WORKSPACE}/*'
}
} // post
} // stage
@@ -239,6 +302,7 @@ pipeline {
image 'couchdbdev/ubuntu-bionic-erlang-19.3.6:latest'
alwaysPull true
label 'ubuntu'
+ customWorkspace pwd() + '/bionic'
}
}
options {
@@ -246,16 +310,35 @@ pipeline {
timeout(time: 90, unit: "MINUTES")
}
environment {
- platform = 'xenial'
+ platform = 'bionic'
}
- steps {
- sh 'rm -f apache-couchdb-*.tar.gz'
- unstash 'tarball'
- sh( script: build_script )
- } // steps
+ stages {
+ stage('Build from tarball & test') {
+ steps {
+ unstash 'tarball'
+ sh( script: build_and_test )
+ }
+ post {
+ always {
+ junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml'
+ }
+ }
+ }
+ stage('Build CouchDB packages') {
+ steps {
+ sh( script: make_packages )
+ sh( script: cleanup_and_save )
+ }
+ post {
+ success {
+ archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ }
+ }
+ }
+ } // stages
post {
- success {
- archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ cleanup {
+ sh 'rm -rf ${WORKSPACE}/*'
}
} // post
} // stage
@@ -266,6 +349,7 @@ pipeline {
image 'couchdbdev/debian-jessie-erlang-19.3.6:latest'
alwaysPull true
label 'ubuntu'
+ customWorkspace pwd() + '/jessie'
}
}
options {
@@ -275,14 +359,33 @@ pipeline {
environment {
platform = 'jessie'
}
- steps {
- sh 'rm -f apache-couchdb-*.tar.gz'
- unstash 'tarball'
- sh( script: build_script )
- } // steps
+ stages {
+ stage('Build from tarball & test') {
+ steps {
+ unstash 'tarball'
+ sh( script: build_and_test )
+ }
+ post {
+ always {
+ junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml'
+ }
+ }
+ }
+ stage('Build CouchDB packages') {
+ steps {
+ sh( script: make_packages )
+ sh( script: cleanup_and_save )
+ }
+ post {
+ success {
+ archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ }
+ }
+ }
+ } // stages
post {
- success {
- archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ cleanup {
+ sh 'rm -rf ${WORKSPACE}/*'
}
} // post
} // stage
@@ -293,6 +396,7 @@ pipeline {
image 'couchdbdev/debian-stretch-erlang-19.3.6:latest'
alwaysPull true
label 'ubuntu'
+ customWorkspace pwd() + '/stretch'
}
}
options {
@@ -300,16 +404,35 @@ pipeline {
timeout(time: 90, unit: "MINUTES")
}
environment {
- platform = 'jessie'
+ platform = 'stretch'
}
- steps {
- sh 'rm -f apache-couchdb-*.tar.gz'
- unstash 'tarball'
- sh( script: build_script )
- } // steps
+ stages {
+ stage('Build from tarball & test') {
+ steps {
+ unstash 'tarball'
+ sh( script: build_and_test )
+ }
+ post {
+ always {
+ junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml'
+ }
+ }
+ }
+ stage('Build CouchDB packages') {
+ steps {
+ sh( script: make_packages )
+ sh( script: cleanup_and_save )
+ }
+ post {
+ success {
+ archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ }
+ }
+ }
+ } // stages
post {
- success {
- archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ cleanup {
+ sh 'rm -rf ${WORKSPACE}/*'
}
} // post
} // stage
@@ -320,6 +443,7 @@ pipeline {
image 'couchdbdev/aarch64-debian-stretch-erlang-20.3.8.20:latest'
alwaysPull true
label 'arm'
+ customWorkspace pwd() + '/arm'
}
}
options {
@@ -327,16 +451,37 @@ pipeline {
timeout(time: 90, unit: "MINUTES")
}
environment {
- platform = 'jessie'
+ platform = 'aarch64-debian-stretch'
}
- steps {
- sh 'rm -f apache-couchdb-*.tar.gz'
- unstash 'tarball'
- sh( script: build_script )
- } // steps
+ stages {
+ stage('Build from tarball & test') {
+ steps {
+ unstash 'tarball'
+ withEnv(['MIX_HOME='+pwd(), 'HEX_HOME='+pwd()]) {
+ sh( script: build_and_test )
+ }
+ }
+ post {
+ always {
+ junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml'
+ }
+ }
+ }
+ stage('Build CouchDB packages') {
+ steps {
+ sh( script: make_packages )
+ sh( script: cleanup_and_save )
+ }
+ post {
+ success {
+ archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ }
+ }
+ }
+ } // stages
post {
- success {
- archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ cleanup {
+ sh 'rm -rf ${WORKSPACE}/*'
}
} // post
} // stage
@@ -384,8 +529,6 @@ pipeline {
reprepro -b couchdb-pkg/repo includedeb jessie pkgs/jessie/*.deb
cp js/debian-stretch/*.deb pkgs/stretch
reprepro -b couchdb-pkg/repo includedeb stretch pkgs/stretch/*.deb
- cp js/ubuntu-trusty/*.deb pkgs/trusty
- reprepro -b couchdb-pkg/repo includedeb trusty pkgs/trusty/*.deb
cp js/ubuntu-xenial/*.deb pkgs/xenial
reprepro -b couchdb-pkg/repo includedeb xenial pkgs/xenial/*.deb
cp js/ubuntu-bionic/*.deb pkgs/bionic
@@ -442,7 +585,7 @@ pipeline {
body: "Boo, we failed. ${env.RUN_DISPLAY_URL}"
}
cleanup {
- sh 'rm -rf ${WORKSPACE}/*'
+ sh 'rm -rf ${COUCHDB_IO_LOG_DIR}'
}
}
diff --git a/Makefile b/Makefile
index 0acf8284d..f09ae326a 100644
--- a/Makefile
+++ b/Makefile
@@ -168,10 +168,30 @@ eunit: export BUILDDIR = $(shell pwd)
eunit: export ERL_AFLAGS = -config $(shell pwd)/rel/files/eunit.config
eunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell pwd)/bin/couchjs $(shell pwd)/share/server/main.js
eunit: couch
- @$(REBAR) setup_eunit 2> /dev/null
+ @COUCHDB_VERSION=$(COUCHDB_VERSION) COUCHDB_GIT_SHA=$(COUCHDB_GIT_SHA) $(REBAR) setup_eunit 2> /dev/null
@for dir in $(subdirs); do \
- $(REBAR) -r eunit $(EUNIT_OPTS) apps=$$dir || exit 1; \
- done
+ tries=0; \
+ while true; do \
+ COUCHDB_VERSION=$(COUCHDB_VERSION) COUCHDB_GIT_SHA=$(COUCHDB_GIT_SHA) $(REBAR) -r eunit $(EUNIT_OPTS) apps=$$dir ; \
+ if [ $$? -eq 0 ]; then \
+ break; \
+ else \
+ tries=$$((tries+1)); \
+ [ $$tries -gt 2 ] && exit 1; \
+ fi \
+ done \
+ done
+
+
+.PHONY: exunit
+# target: exunit - Run ExUnit tests
+exunit: export BUILDDIR = $(shell pwd)
+exunit: export MIX_ENV=test
+exunit: export ERL_LIBS = $(shell pwd)/src
+exunit: export ERL_AFLAGS = -config $(shell pwd)/rel/files/eunit.config
+exunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell pwd)/bin/couchjs $(shell pwd)/share/server/main.js
+exunit: couch elixir-init setup-eunit elixir-check-formatted elixir-credo
+ @mix test --trace $(EXUNIT_OPTS)
setup-eunit: export BUILDDIR = $(shell pwd)
setup-eunit: export ERL_AFLAGS = -config $(shell pwd)/rel/files/eunit.config
@@ -212,34 +232,37 @@ python-black-update: .venv/bin/black
. dev/run rel/overlay/bin/couchup test/javascript/run
.PHONY: elixir
+elixir: export MIX_ENV=integration
elixir: elixir-init elixir-check-formatted elixir-credo devclean
- @dev/run -a adm:pass --no-eval 'test/elixir/run --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
+ @dev/run -a adm:pass --no-eval 'mix test --trace --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
.PHONY: elixir-init
elixir-init:
- @cd test/elixir && mix local.rebar --force && mix local.hex --force && mix deps.get
+ @mix local.rebar --force && mix local.hex --force && mix deps.get
.PHONY: elixir-cluster-without-quorum
-elixir-cluster-without-quorum: elixir-check-formatted elixir-credo devclean
+elixir-cluster-without-quorum: export MIX_ENV=integration
+elixir-cluster-without-quorum: elixir-init elixir-check-formatted elixir-credo devclean
@dev/run -n 3 -q -a adm:pass \
--degrade-cluster 2 \
- --no-eval 'test/elixir/run --only without_quorum_test $(EXUNIT_OPTS)'
+ --no-eval 'mix test --trace --only without_quorum_test $(EXUNIT_OPTS)'
.PHONY: elixir-cluster-with-quorum
-elixir-cluster-with-quorum: elixir-check-formatted elixir-credo devclean
+elixir-cluster-with-quorum: export MIX_ENV=integration
+elixir-cluster-with-quorum: elixir-init elixir-check-formatted elixir-credo devclean
@dev/run -n 3 -q -a adm:pass \
--degrade-cluster 1 \
- --no-eval 'test/elixir/run --only with_quorum_test $(EXUNIT_OPTS)'
+ --no-eval 'mix test --trace --only with_quorum_test $(EXUNIT_OPTS)'
.PHONY: elixir-check-formatted
-elixir-check-formatted:
- @cd test/elixir/ && mix format --check-formatted
+elixir-check-formatted: elixir-init
+ @mix format --check-formatted
# Credo is a static code analysis tool for Elixir.
# We use it in our tests
.PHONY: elixir-credo
-elixir-credo:
- @cd test/elixir/ && mix credo
+elixir-credo: elixir-init
+ @mix credo
.PHONY: javascript
# target: javascript - Run JavaScript test suites or specific ones defined by suites option
@@ -347,8 +370,8 @@ build-test:
mango-test: devclean all
@cd src/mango && \
python3 -m venv .venv && \
- .venv/bin/pip3 install -r requirements.txt
- @cd src/mango && ../../dev/run -n 1 --admin=testuser:testpass .venv/bin/nosetests
+ .venv/bin/python3 -m pip install -r requirements.txt
+ @cd src/mango && ../../dev/run -n 1 --admin=testuser:testpass '.venv/bin/python3 -m nose'
################################################################################
# Developing
diff --git a/Makefile.win b/Makefile.win
index 99ec71278..eda27a02a 100644
--- a/Makefile.win
+++ b/Makefile.win
@@ -141,8 +141,18 @@ eunit: export ERL_AFLAGS = $(shell echo "-config rel/files/eunit.config")
eunit: export BUILDDIR = $(shell echo %cd%)
eunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell echo %cd%)/bin/couchjs $(shell echo %cd%)/share/server/main.js
eunit: couch
- @$(REBAR) setup_eunit 2> nul
- @$(REBAR) -r eunit $(EUNIT_OPTS)
+ @set COUCHDB_VERSION=$(COUCHDB_VERSION) && set COUCHDB_GIT_SHA=$(COUCHDB_GIT_SHA) && $(REBAR) setup_eunit 2> nul
+ @set COUCHDB_VERSION=$(COUCHDB_VERSION) && set COUCHDB_GIT_SHA=$(COUCHDB_GIT_SHA) && $(REBAR) -r eunit $(EUNIT_OPTS)
+
+.PHONY: exunit
+# target: exunit - Run ExUnit tests
+exunit: export BUILDDIR = $(shell echo %cd%)
+exunit: export MIX_ENV=test
+exunit: export ERL_LIBS = $(shell echo %cd%)\src
+exunit: export ERL_AFLAGS = -config $(shell echo %cd%)/rel/files/eunit.config
+exunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell echo %cd%)/bin/couchjs $(shell echo %cd%)/share/server/main.js
+exunit: couch elixir-init setup-eunit elixir-check-formatted elixir-credo
+ @mix test --trace $(EXUNIT_OPTS)
setup-eunit: export BUILDDIR = $(shell pwd)
setup-eunit: export ERL_AFLAGS = $(shell echo "-config rel/files/eunit.config")
@@ -177,33 +187,33 @@ python-black-update: .venv/bin/black
.PHONY: elixir
elixir: elixir-init elixir-check-formatted elixir-credo devclean
- @dev\run -a adm:pass --no-eval 'test\elixir\run.cmd --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
+ @dev\run -a adm:pass --no-eval 'mix test --trace --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
.PHONY: elixir-init
elixir-init:
- @cd test/elixir && mix local.rebar --force && mix local.hex --force && mix deps.get
+ @mix local.rebar --force && mix local.hex --force && mix deps.get
.PHONY: elixir-cluster-without-quorum
elixir-cluster-without-quorum: elixir-check-formatted elixir-credo devclean
@dev\run -n 3 -q -a adm:pass \
--degrade-cluster 2 \
- --no-eval 'test\elixir\run.cmd --only without_quorum_test $(EXUNIT_OPTS)'
+ --no-eval 'mix test --trace --only without_quorum_test $(EXUNIT_OPTS)'
.PHONY: elixir-cluster-with-quorum
elixir-cluster-with-quorum: elixir-check-formatted elixir-credo devclean
@dev\run -n 3 -q -a adm:pass \
--degrade-cluster 1 \
- --no-eval 'test\elixir\run.cmd --only with_quorum_test $(EXUNIT_OPTS)'
+ --no-eval 'mix test --trace --only with_quorum_test $(EXUNIT_OPTS)'
.PHONY: elixir-check-formatted
elixir-check-formatted:
- @cd test\elixir && mix format --check-formatted
+ @mix format --check-formatted
# Credo is a static code analysis tool for Elixir.
# We use it in our tests
.PHONY: elixir-credo
elixir-credo:
- @cd test/elixir/ && mix credo
+ @mix credo
.PHONY: test-cluster-with-quorum
test-cluster-with-quorum: devclean
diff --git a/config/config.exs b/config/config.exs
new file mode 100644
index 000000000..8e52433cc
--- /dev/null
+++ b/config/config.exs
@@ -0,0 +1,30 @@
+# This file is responsible for configuring your application
+# and its dependencies with the aid of the Mix.Config module.
+use Mix.Config
+
+# This configuration is loaded before any dependency and is restricted
+# to this project. If another project depends on this project, this
+# file won't be loaded nor affect the parent project. For this reason,
+# if you want to provide default values for your application for
+# 3rd-party users, it should be done in your "mix.exs" file.
+
+# You can configure your application as:
+#
+# config :couchdbtest, key: :value
+#
+# and access this configuration in your application as:
+#
+# Application.get_env(:couchdbtest, :key)
+#
+# You can also configure a 3rd-party app:
+#
+# config :logger, level: :info
+#
+
+# It is also possible to import configuration files, relative to this
+# directory. For example, you can emulate configuration per environment
+# by uncommenting the line below and defining dev.exs, test.exs and such.
+# Configuration from the imported file will override the ones defined
+# here (which is why it is important to import them last).
+#
+import_config "#{Mix.env}.exs" \ No newline at end of file
diff --git a/config/dev.exs b/config/dev.exs
new file mode 100644
index 000000000..d2d855e6d
--- /dev/null
+++ b/config/dev.exs
@@ -0,0 +1 @@
+use Mix.Config
diff --git a/config/integration.exs b/config/integration.exs
new file mode 100644
index 000000000..c5a5ed24a
--- /dev/null
+++ b/config/integration.exs
@@ -0,0 +1,12 @@
+use Mix.Config
+
+config :logger,
+ backends: [:console],
+ compile_time_purge_level: :debug,
+ level: :debug
+
+config :kernel,
+ error_logger: false
+
+config :sasl,
+ sasl_error_logger: false
diff --git a/config/prod.exs b/config/prod.exs
new file mode 100644
index 000000000..d2d855e6d
--- /dev/null
+++ b/config/prod.exs
@@ -0,0 +1 @@
+use Mix.Config
diff --git a/config/test.exs b/config/test.exs
new file mode 100644
index 000000000..c5a5ed24a
--- /dev/null
+++ b/config/test.exs
@@ -0,0 +1,12 @@
+use Mix.Config
+
+config :logger,
+ backends: [:console],
+ compile_time_purge_level: :debug,
+ level: :debug
+
+config :kernel,
+ error_logger: false
+
+config :sasl,
+ sasl_error_logger: false
diff --git a/dev/run b/dev/run
index 60e7d5c35..10351eb86 100755
--- a/dev/run
+++ b/dev/run
@@ -690,27 +690,37 @@ def generate_cookie():
def cluster_setup_with_admin_party(ctx):
+ connect_nodes(ctx)
+ host, port = "127.0.0.1", cluster_port(ctx, 1)
+ create_system_databases(host, port)
+
+
+def connect_nodes(ctx):
host, port = "127.0.0.1", backend_port(ctx, 1)
for node in ctx["nodes"]:
- body = "{}"
- conn = httpclient.HTTPConnection(host, port)
- conn.request("PUT", "/_nodes/%s@127.0.0.1" % node, body)
- resp = conn.getresponse()
- if resp.status not in (200, 201, 202, 409):
- print(("Failed to join %s into cluster: %s" % (node, resp.read())))
- sys.exit(1)
- create_system_databases(host, cluster_port(ctx, 1))
+ path = "/_nodes/%s@127.0.0.1" % node
+ try_request(
+ host,
+ port,
+ "PUT",
+ path,
+ (200, 201, 202, 409),
+ body="{}",
+ error="Failed to join %s into cluster:\n" % node,
+ )
-def try_request(host, port, meth, path, success_codes, retries=10, retry_dt=1):
+def try_request(
+ host, port, meth, path, success_codes, body=None, retries=10, retry_dt=1, error=""
+):
while True:
conn = httpclient.HTTPConnection(host, port)
- conn.request(meth, path)
+ conn.request(meth, path, body=body)
resp = conn.getresponse()
if resp.status in success_codes:
return resp.status, resp.read()
elif retries <= 0:
- assert resp.status in success_codes, resp.read()
+ assert resp.status in success_codes, "%s%s" % (error, resp.read())
retries -= 1
time.sleep(retry_dt)
@@ -721,7 +731,14 @@ def create_system_databases(host, port):
conn.request("HEAD", "/" + dbname)
resp = conn.getresponse()
if resp.status == 404:
- try_request(host, port, "PUT", "/" + dbname, (201, 202, 412))
+ try_request(
+ host,
+ port,
+ "PUT",
+ "/" + dbname,
+ (201, 202, 412),
+ error="Failed to create '%s' database:\n" % dbname,
+ )
@log(
diff --git a/mix.exs b/mix.exs
new file mode 100644
index 000000000..2859da3d1
--- /dev/null
+++ b/mix.exs
@@ -0,0 +1,61 @@
+defmodule CouchDBTest.Mixfile do
+ use Mix.Project
+
+ def project do
+ [
+ app: :couchdbtest,
+ version: "0.1.0",
+ elixir: "~> 1.5",
+ lockfile: Path.expand("mix.lock", __DIR__),
+ deps_path: Path.expand("src", __DIR__),
+ build_path: Path.expand("_build", __DIR__),
+ compilers: [:elixir, :app],
+ start_permanent: Mix.env() == :prod,
+ build_embedded: Mix.env() == :prod,
+ deps: deps(),
+ consolidate_protocols: Mix.env() not in [:test, :dev, :integration],
+ test_paths: get_test_paths(Mix.env()),
+ elixirc_paths: elixirc_paths(Mix.env())
+ ]
+ end
+
+ # Run "mix help compile.app" to learn about applications.
+ def application do
+ [
+ extra_applications: [:logger],
+ applications: [:httpotion]
+ ]
+ end
+
+ # Specifies which paths to compile per environment.
+ defp elixirc_paths(:test), do: ["test/elixir/lib", "test/elixir/test/support"]
+ defp elixirc_paths(:integration), do: ["test/elixir/lib", "test/elixir/test/support"]
+ defp elixirc_paths(_), do: ["test/elixir/lib"]
+
+ # Run "mix help deps" to learn about dependencies.
+ defp deps() do
+ [
+ {:junit_formatter, "~> 3.0", only: [:dev, :test, :integration]},
+ {:httpotion, ">= 3.1.3", only: [:dev, :test, :integration], runtime: false},
+ {:jiffy, path: Path.expand("src/jiffy", __DIR__)},
+ {:ibrowse,
+ path: Path.expand("src/ibrowse", __DIR__), override: true, compile: false},
+ {:credo, "~> 1.0.0", only: [:dev, :test, :integration], runtime: false}
+ ]
+ end
+
+ def get_test_paths(:test) do
+ Path.wildcard("src/*/test/exunit") |> Enum.filter(&File.dir?/1)
+ end
+
+ def get_test_paths(:integration) do
+ integration_tests =
+ Path.wildcard("src/*/test/integration") |> Enum.filter(&File.dir?/1)
+
+ ["test/elixir/test" | integration_tests]
+ end
+
+ def get_test_paths(_) do
+ []
+ end
+end
diff --git a/test/elixir/mix.lock b/mix.lock
index 0fc391a92..343215fdc 100644
--- a/test/elixir/mix.lock
+++ b/mix.lock
@@ -1,8 +1,8 @@
%{
"bunt": {:hex, :bunt, "0.2.0", "951c6e801e8b1d2cbe58ebbd3e616a869061ddadcc4863d0a2182541acae9a38", [:mix], [], "hexpm"},
- "credo": {:hex, :credo, "1.0.0", "aaa40fdd0543a0cf8080e8c5949d8c25f0a24e4fc8c1d83d06c388f5e5e0ea42", [:mix], [{:bunt, "~> 0.2.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm"},
- "httpotion": {:hex, :httpotion, "3.1.0", "14d20d9b0ce4e86e253eb91e4af79e469ad949f57a5d23c0a51b2f86559f6589", [:mix], [{:ibrowse, "~> 4.4", [hex: :ibrowse, repo: "hexpm", optional: false]}], "hexpm"},
- "ibrowse": {:hex, :ibrowse, "4.4.1", "2b7d0637b0f8b9b4182de4bd0f2e826a4da2c9b04898b6e15659ba921a8d6ec2", [:rebar3], [], "hexpm"},
+ "credo": {:hex, :credo, "1.0.5", "fdea745579f8845315fe6a3b43e2f9f8866839cfbc8562bb72778e9fdaa94214", [:mix], [{:bunt, "~> 0.2.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm"},
+ "httpotion": {:hex, :httpotion, "3.1.3", "fdaf1e16b9318dcb722de57e75ac368c93d4c6e3c9125f93e960f953a750fb77", [:mix], [{:ibrowse, "== 4.4.0", [hex: :ibrowse, repo: "hexpm", optional: false]}], "hexpm"},
+ "ibrowse": {:hex, :ibrowse, "4.4.0", "2d923325efe0d2cb09b9c6a047b2835a5eda69d8a47ed6ff8bc03628b764e991", [:rebar3], [], "hexpm"},
"jason": {:hex, :jason, "1.1.2", "b03dedea67a99223a2eaf9f1264ce37154564de899fd3d8b9a21b1a6fd64afe7", [:mix], [{:decimal, "~> 1.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm"},
"jiffy": {:hex, :jiffy, "0.15.2", "de266c390111fd4ea28b9302f0bc3d7472468f3b8e0aceabfbefa26d08cd73b7", [:rebar3], [], "hexpm"},
"junit_formatter": {:hex, :junit_formatter, "3.0.0", "13950d944dbd295da7d8cc4798b8faee808a8bb9b637c88069954eac078ac9da", [:mix], [], "hexpm"},
diff --git a/rebar.config.script b/rebar.config.script
index 6445057e7..c38b6e235 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -156,7 +156,7 @@ AddConfig = [
{sub_dirs, SubDirs},
{lib_dirs, ["src"]},
{erl_opts, [{i, "../"} | ErlOpts]},
- {eunit_opts, [verbose]},
+ {eunit_opts, [verbose, {report,{eunit_surefire,[{dir,"."}]}}]},
{plugins, [eunit_plugin]},
{dialyzer, [
{plt_location, local},
diff --git a/src/chttpd/test/chttpd_cors_test.erl b/src/chttpd/test/eunit/chttpd_cors_test.erl
index 19e851561..19e851561 100644
--- a/src/chttpd/test/chttpd_cors_test.erl
+++ b/src/chttpd/test/eunit/chttpd_cors_test.erl
diff --git a/src/chttpd/test/chttpd_csp_tests.erl b/src/chttpd/test/eunit/chttpd_csp_tests.erl
index e86436254..e86436254 100644
--- a/src/chttpd/test/chttpd_csp_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_csp_tests.erl
diff --git a/src/chttpd/test/chttpd_db_attachment_size_tests.erl b/src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl
index 0ab08dd80..0ab08dd80 100644
--- a/src/chttpd/test/chttpd_db_attachment_size_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl
diff --git a/src/chttpd/test/chttpd_db_bulk_get_multipart_test.erl b/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl
index 8a95c92ac..8a95c92ac 100644
--- a/src/chttpd/test/chttpd_db_bulk_get_multipart_test.erl
+++ b/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl
diff --git a/src/chttpd/test/chttpd_db_bulk_get_test.erl b/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl
index 864e7079a..864e7079a 100644
--- a/src/chttpd/test/chttpd_db_bulk_get_test.erl
+++ b/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl
diff --git a/src/chttpd/test/chttpd_db_doc_size_tests.erl b/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl
index 88e2797a3..88e2797a3 100644
--- a/src/chttpd/test/chttpd_db_doc_size_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl
diff --git a/src/chttpd/test/chttpd_db_test.erl b/src/chttpd/test/eunit/chttpd_db_test.erl
index 2708aa033..c819bdf6e 100644
--- a/src/chttpd/test/chttpd_db_test.erl
+++ b/src/chttpd/test/eunit/chttpd_db_test.erl
@@ -23,6 +23,7 @@
-define(DESTHEADER2, {"Destination", "foo%2Fbar%23baz%3Fpow%3Afiz"}).
-define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
-define(i2l(I), integer_to_list(I)).
+-define(TIMEOUT, 60). % seconds
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
@@ -72,6 +73,7 @@ all_test_() ->
fun should_not_return_update_seq_when_unset_on_all_docs/1,
fun should_return_correct_id_on_doc_copy/1,
fun should_return_400_for_bad_engine/1,
+ fun should_not_change_db_proper_after_rewriting_shardmap/1,
fun should_succeed_on_all_docs_with_queries_keys/1,
fun should_succeed_on_all_docs_with_queries_limit_skip/1,
fun should_succeed_on_all_docs_with_multiple_queries/1,
@@ -88,7 +90,7 @@ all_test_() ->
should_return_ok_true_on_bulk_update(Url) ->
- ?_assertEqual(true,
+ {timeout, ?TIMEOUT, ?_assertEqual(true,
begin
{ok, _, _, Body} = create_doc(Url, "testdoc"),
{Json} = ?JSON_DECODE(Body),
@@ -99,27 +101,27 @@ should_return_ok_true_on_bulk_update(Url) ->
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = lists:nth(1, ResultJson),
couch_util:get_value(<<"ok">>, InnerJson, undefined)
- end).
+ end)}.
should_return_ok_true_on_ensure_full_commit(Url0) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
Url = Url0 ++ "/_ensure_full_commit",
{ok, RC, _, Body} = test_request:post(Url, [?CONTENT_JSON, ?AUTH], []),
{Json} = ?JSON_DECODE(Body),
?assertEqual(201, RC),
?assert(couch_util:get_value(<<"ok">>, Json))
- end).
+ end)}.
should_return_404_for_ensure_full_commit_on_no_db(Url0) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
Url = Url0 ++ "-missing-db" ++ "/_ensure_full_commit",
{ok, RC, _, Body} = test_request:post(Url, [?CONTENT_JSON, ?AUTH], []),
{Json} = ?JSON_DECODE(Body),
?assertEqual(404, RC),
?assertEqual(<<"not_found">>, couch_util:get_value(<<"error">>, Json))
- end).
+ end)}.
should_accept_live_as_an_alias_for_continuous(Url) ->
@@ -135,7 +137,7 @@ should_accept_live_as_an_alias_for_continuous(Url) ->
end,
couch_util:get_value(<<"last_seq">>, Result, undefined)
end,
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
{ok, _, _, ResultBody1} =
test_request:get(Url ++ "/_changes?feed=live&timeout=1", [?AUTH]),
LastSeq1 = GetLastSeq(ResultBody1),
@@ -146,11 +148,11 @@ should_accept_live_as_an_alias_for_continuous(Url) ->
LastSeq2 = GetLastSeq(ResultBody2),
?assertNotEqual(LastSeq1, LastSeq2)
- end).
+ end)}.
should_return_404_for_delete_att_on_notadoc(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
{ok, RC, _, RespBody} = test_request:delete(
Url ++ "/notadoc/att.pdf",
[?CONTENT_JSON, ?AUTH],
@@ -168,11 +170,11 @@ should_return_404_for_delete_att_on_notadoc(Url) ->
[]
),
?assertEqual(404, RC1)
- end).
+ end)}.
should_return_409_for_del_att_without_rev(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
{ok, RC, _, _} = test_request:put(
Url ++ "/testdoc3",
[?CONTENT_JSON, ?AUTH],
@@ -186,11 +188,11 @@ should_return_409_for_del_att_without_rev(Url) ->
[]
),
?assertEqual(409, RC1)
- end).
+ end)}.
should_return_200_for_del_att_with_rev(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
{ok, RC, _Headers, RespBody} = test_request:put(
Url ++ "/testdoc4",
[?CONTENT_JSON, ?AUTH],
@@ -207,11 +209,11 @@ should_return_200_for_del_att_with_rev(Url) ->
[]
),
?assertEqual(200, RC1)
- end).
+ end)}.
should_return_409_for_put_att_nonexistent_rev(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
{ok, RC, _Headers, RespBody} = test_request:put(
Url ++ "/should_return_404/file.erl?rev=1-000",
[?CONTENT_JSON, ?AUTH],
@@ -222,11 +224,11 @@ should_return_409_for_put_att_nonexistent_rev(Url) ->
{<<"error">>,<<"not_found">>},
{<<"reason">>,<<"missing_rev">>}]},
?JSON_DECODE(RespBody))
- end).
+ end)}.
should_return_update_seq_when_set_on_all_docs(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
[create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)],
{ok, RC, _, RespBody} = test_request:get(Url ++ "/_all_docs/"
++ "?update_seq=true&keys=[\"testdoc1\"]",[?CONTENT_JSON, ?AUTH]),
@@ -236,11 +238,11 @@ should_return_update_seq_when_set_on_all_docs(Url) ->
couch_util:get_value(<<"update_seq">>, ResultJson)),
?assertNotEqual(undefined,
couch_util:get_value(<<"offset">>, ResultJson))
- end).
+ end)}.
should_not_return_update_seq_when_unset_on_all_docs(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
[create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)],
{ok, RC, _, RespBody} = test_request:get(Url ++ "/_all_docs/"
++ "?update_seq=false&keys=[\"testdoc1\"]",[?CONTENT_JSON, ?AUTH]),
@@ -250,11 +252,11 @@ should_not_return_update_seq_when_unset_on_all_docs(Url) ->
couch_util:get_value(<<"update_seq">>, ResultJson)),
?assertNotEqual(undefined,
couch_util:get_value(<<"offset">>, ResultJson))
- end).
+ end)}.
should_return_correct_id_on_doc_copy(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
{ok, _, _, _} = create_doc(Url, "testdoc"),
{_, _, _, ResultBody1} = test_request:copy(Url ++ "/testdoc/",
[?CONTENT_JSON, ?AUTH, ?DESTHEADER1]),
@@ -269,7 +271,7 @@ should_return_correct_id_on_doc_copy(Url) ->
?assertEqual(<<102,111,111,229,149,138,98,97,114>>, Id1),
?assertEqual(<<"foo/bar#baz?pow:fiz">>, Id2)
]
- end).
+ end)}.
attachment_doc() ->
@@ -285,7 +287,7 @@ attachment_doc() ->
should_return_400_for_bad_engine(_) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
TmpDb = ?tempdb(),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
@@ -293,11 +295,34 @@ should_return_400_for_bad_engine(_) ->
Url = BaseUrl ++ "?engine=cowabunga",
{ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
?assertEqual(400, Status)
- end).
+ end)}.
+
+
+should_not_change_db_proper_after_rewriting_shardmap(_) ->
+ {timeout, ?TIMEOUT, ?_test(begin
+ TmpDb = ?tempdb(),
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(chttpd, port),
+ AdmPort = mochiweb_socket_server:get(couch_httpd, port),
+
+ BaseUrl = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
+ Url = BaseUrl ++ "?partitioned=true&q=1",
+ {ok, 201, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
+
+ ShardDbName = ?l2b(config:get("mem3", "shards_db", "_dbs")),
+ {ok, ShardDb} = mem3_util:ensure_exists(ShardDbName),
+ {ok, #doc{body = {Props}}} = couch_db:open_doc(
+ ShardDb, TmpDb, [ejson_body]),
+ Shards = mem3_util:build_shards(TmpDb, Props),
+
+ {Prop2} = ?JSON_DECODE(?JSON_ENCODE({Props})),
+ Shards2 = mem3_util:build_shards(TmpDb, Prop2),
+ ?assertEqual(Shards2, Shards)
+ end)}.
should_succeed_on_all_docs_with_queries_keys(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
[create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}]}",
{ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/queries/",
@@ -307,11 +332,11 @@ should_succeed_on_all_docs_with_queries_keys(Url) ->
ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
{InnerJson} = lists:nth(1, ResultJsonBody),
?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end).
+ end)}.
should_succeed_on_all_docs_with_queries_limit_skip(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
[create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
{ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/queries/",
@@ -322,11 +347,11 @@ should_succeed_on_all_docs_with_queries_limit_skip(Url) ->
{InnerJson} = lists:nth(1, ResultJsonBody),
?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end).
+ end)}.
should_succeed_on_all_docs_with_multiple_queries(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
[create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]},
{\"limit\": 5, \"skip\": 2}]}",
@@ -340,11 +365,11 @@ should_succeed_on_all_docs_with_multiple_queries(Url) ->
{InnerJson2} = lists:nth(2, ResultJsonBody),
?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
- end).
+ end)}.
should_succeed_on_design_docs_with_queries_keys(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
[create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
QueryDoc = "{\"queries\": [{\"keys\": [ \"_design/ddoc3\",
\"_design/ddoc8\"]}]}",
@@ -355,11 +380,11 @@ should_succeed_on_design_docs_with_queries_keys(Url) ->
ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
{InnerJson} = lists:nth(1, ResultJsonBody),
?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end).
+ end)}.
should_succeed_on_design_docs_with_queries_limit_skip(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
[create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
{ok, RC, _, RespBody} = test_request:post(Url ++
@@ -370,11 +395,11 @@ should_succeed_on_design_docs_with_queries_limit_skip(Url) ->
{InnerJson} = lists:nth(1, ResultJsonBody),
?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end).
+ end)}.
should_succeed_on_design_docs_with_multiple_queries(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
[create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
QueryDoc = "{\"queries\": [{\"keys\": [ \"_design/ddoc3\",
\"_design/ddoc8\"]}, {\"limit\": 5, \"skip\": 2}]}",
@@ -388,11 +413,11 @@ should_succeed_on_design_docs_with_multiple_queries(Url) ->
{InnerJson2} = lists:nth(2, ResultJsonBody),
?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
- end).
+ end)}.
should_succeed_on_local_docs_with_queries_keys(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
[create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
QueryDoc = "{\"queries\": [{\"keys\":
[ \"_local/doc3\", \"_local/doc8\"]}]}",
@@ -403,11 +428,11 @@ should_succeed_on_local_docs_with_queries_keys(Url) ->
ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
{InnerJson} = lists:nth(1, ResultJsonBody),
?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end).
+ end)}.
should_succeed_on_local_docs_with_queries_limit_skip(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
[create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
{ok, RC, _, RespBody} = test_request:post(Url ++
@@ -417,11 +442,11 @@ should_succeed_on_local_docs_with_queries_limit_skip(Url) ->
ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
{InnerJson} = lists:nth(1, ResultJsonBody),
?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end).
+ end)}.
should_succeed_on_local_docs_with_multiple_queries(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
[create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
QueryDoc = "{\"queries\": [{\"keys\": [ \"_local/doc3\",
\"_local/doc8\"]}, {\"limit\": 5, \"skip\": 2}]}",
@@ -434,4 +459,4 @@ should_succeed_on_local_docs_with_multiple_queries(Url) ->
?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
{InnerJson2} = lists:nth(2, ResultJsonBody),
?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
- end).
+ end)}.
diff --git a/src/chttpd/test/chttpd_dbs_info_test.erl b/src/chttpd/test/eunit/chttpd_dbs_info_test.erl
index 5b61d8831..5b61d8831 100644
--- a/src/chttpd/test/chttpd_dbs_info_test.erl
+++ b/src/chttpd/test/eunit/chttpd_dbs_info_test.erl
diff --git a/src/chttpd/test/chttpd_error_info_tests.erl b/src/chttpd/test/eunit/chttpd_error_info_tests.erl
index fdb015c08..fdb015c08 100644
--- a/src/chttpd/test/chttpd_error_info_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_error_info_tests.erl
diff --git a/src/chttpd/test/chttpd_handlers_tests.erl b/src/chttpd/test/eunit/chttpd_handlers_tests.erl
index f3e8f5dcd..f3e8f5dcd 100644
--- a/src/chttpd/test/chttpd_handlers_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_handlers_tests.erl
diff --git a/src/chttpd/test/chttpd_open_revs_error_test.erl b/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl
index d53d370f8..d53d370f8 100644
--- a/src/chttpd/test/chttpd_open_revs_error_test.erl
+++ b/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl
diff --git a/src/chttpd/test/chttpd_plugin_tests.erl b/src/chttpd/test/eunit/chttpd_plugin_tests.erl
index 36572a419..36572a419 100644
--- a/src/chttpd/test/chttpd_plugin_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_plugin_tests.erl
diff --git a/src/chttpd/test/chttpd_prefer_header_test.erl b/src/chttpd/test/eunit/chttpd_prefer_header_test.erl
index 0f43ba437..0f43ba437 100644
--- a/src/chttpd/test/chttpd_prefer_header_test.erl
+++ b/src/chttpd/test/eunit/chttpd_prefer_header_test.erl
diff --git a/src/chttpd/test/chttpd_purge_tests.erl b/src/chttpd/test/eunit/chttpd_purge_tests.erl
index dbd73de1f..dbd73de1f 100644
--- a/src/chttpd/test/chttpd_purge_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_purge_tests.erl
diff --git a/src/chttpd/test/chttpd_security_tests.erl b/src/chttpd/test/eunit/chttpd_security_tests.erl
index 955b4ff01..955b4ff01 100644
--- a/src/chttpd/test/chttpd_security_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_security_tests.erl
diff --git a/src/chttpd/test/chttpd_socket_buffer_size_test.erl b/src/chttpd/test/eunit/chttpd_socket_buffer_size_test.erl
index 937880621..937880621 100644
--- a/src/chttpd/test/chttpd_socket_buffer_size_test.erl
+++ b/src/chttpd/test/eunit/chttpd_socket_buffer_size_test.erl
diff --git a/src/chttpd/test/chttpd_view_test.erl b/src/chttpd/test/eunit/chttpd_view_test.erl
index 3457c6f30..4c224bb4e 100644
--- a/src/chttpd/test/chttpd_view_test.erl
+++ b/src/chttpd/test/eunit/chttpd_view_test.erl
@@ -24,6 +24,7 @@
-define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
-define(i2l(I), integer_to_list(I)).
+-define(TIMEOUT, 60). % seconds
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
@@ -71,7 +72,7 @@ all_view_test_() ->
should_succeed_on_view_with_queries_keys(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
[create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
{ok, _, _, _} = test_request:put(Url ++ "/_design/bar",
[?CONTENT_JSON, ?AUTH], ?DDOC),
@@ -83,11 +84,11 @@ should_succeed_on_view_with_queries_keys(Url) ->
ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
{InnerJson} = lists:nth(1, ResultJsonBody),
?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end).
+ end)}.
should_succeed_on_view_with_queries_limit_skip(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
[create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
{ok, _, _, _} = test_request:put(Url ++ "/_design/bar",
[?CONTENT_JSON, ?AUTH], ?DDOC),
@@ -100,11 +101,11 @@ should_succeed_on_view_with_queries_limit_skip(Url) ->
{InnerJson} = lists:nth(1, ResultJsonBody),
?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end).
+ end)}.
should_succeed_on_view_with_multiple_queries(Url) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
[create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
{ok, _, _, _} = test_request:put(Url ++ "/_design/bar",
[?CONTENT_JSON, ?AUTH], ?DDOC),
@@ -120,4 +121,4 @@ should_succeed_on_view_with_multiple_queries(Url) ->
{InnerJson2} = lists:nth(2, ResultJsonBody),
?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
- end).
+ end)}.
diff --git a/src/chttpd/test/chttpd_welcome_test.erl b/src/chttpd/test/eunit/chttpd_welcome_test.erl
index e427f4dff..e427f4dff 100644
--- a/src/chttpd/test/chttpd_welcome_test.erl
+++ b/src/chttpd/test/eunit/chttpd_welcome_test.erl
diff --git a/src/chttpd/test/chttpd_xframe_test.erl b/src/chttpd/test/eunit/chttpd_xframe_test.erl
index 1272c198c..1272c198c 100644
--- a/src/chttpd/test/chttpd_xframe_test.erl
+++ b/src/chttpd/test/eunit/chttpd_xframe_test.erl
diff --git a/src/couch/include/couch_eunit.hrl b/src/couch/include/couch_eunit.hrl
index f4617e1d3..d3611c88b 100644
--- a/src/couch/include/couch_eunit.hrl
+++ b/src/couch/include/couch_eunit.hrl
@@ -28,7 +28,7 @@
filename:join([?BUILDDIR(), "tmp", "etc", "local_eunit.ini"]),
filename:join([?BUILDDIR(), "tmp", "etc", "eunit.ini"])]).
-define(FIXTURESDIR,
- filename:join([?BUILDDIR(), "src", "couch", "test", "fixtures"])).
+ filename:join([?BUILDDIR(), "src", "couch", "test", "eunit", "fixtures"])).
-define(TEMPDIR,
filename:join([?BUILDDIR(), "tmp", "tmp_data"])).
diff --git a/src/couch/test/chttpd_endpoints_tests.erl b/src/couch/test/eunit/chttpd_endpoints_tests.erl
index 9b7430823..9b7430823 100644
--- a/src/couch/test/chttpd_endpoints_tests.erl
+++ b/src/couch/test/eunit/chttpd_endpoints_tests.erl
diff --git a/src/couch/test/couch_auth_cache_tests.erl b/src/couch/test/eunit/couch_auth_cache_tests.erl
index 706c0cee9..706c0cee9 100644
--- a/src/couch/test/couch_auth_cache_tests.erl
+++ b/src/couch/test/eunit/couch_auth_cache_tests.erl
diff --git a/src/couch/test/couch_base32_tests.erl b/src/couch/test/eunit/couch_base32_tests.erl
index 7e4d59a09..7e4d59a09 100644
--- a/src/couch/test/couch_base32_tests.erl
+++ b/src/couch/test/eunit/couch_base32_tests.erl
diff --git a/src/couch/test/couch_bt_engine_compactor_tests.erl b/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl
index 6c99ceb73..6c99ceb73 100644
--- a/src/couch/test/couch_bt_engine_compactor_tests.erl
+++ b/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl
diff --git a/src/couch/test/couch_bt_engine_tests.erl b/src/couch/test/eunit/couch_bt_engine_tests.erl
index 3e3ecbf25..3e3ecbf25 100644
--- a/src/couch/test/couch_bt_engine_tests.erl
+++ b/src/couch/test/eunit/couch_bt_engine_tests.erl
diff --git a/src/couch/test/couch_bt_engine_upgrade_tests.erl b/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl
index 3a516f8f7..a2a972caf 100644
--- a/src/couch/test/couch_bt_engine_upgrade_tests.erl
+++ b/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl
@@ -15,6 +15,7 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
+-define(TIMEOUT, 60). % seconds
setup(_) ->
Ctx = test_util:start_couch(),
@@ -63,7 +64,7 @@ upgrade_test_() ->
t_upgrade_without_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
% There are three documents in the fixture
% db with zero purge entries
DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom)
@@ -99,11 +100,11 @@ t_upgrade_without_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
?assertEqual({ok, 4}, couch_db:get_doc_count(Db)),
?assertEqual(1, couch_db:get_purge_seq(Db))
end)
- end).
+ end)}.
t_upgrade_with_1_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
% There are two documents in the fixture database
% with a single purge entry
DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom)
@@ -140,11 +141,11 @@ t_upgrade_with_1_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
?assertEqual(2, couch_db:get_purge_seq(Db))
end)
- end).
+ end)}.
t_upgrade_with_N_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
% There is one document in the fixture database
% with two docs that have been purged
DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom)
@@ -179,11 +180,11 @@ t_upgrade_with_N_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
?assertEqual({ok, 2}, couch_db:get_doc_count(Db)),
?assertEqual(3, couch_db:get_purge_seq(Db))
end)
- end).
+ end)}.
t_upgrade_with_1_purge_req_for_2_docs(VersionFrom, {_Ctx, _NewPaths}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
% There are two documents (Doc4 and Doc5) in the fixture database
% with three docs (Doc1, Doc2 and Doc3) that have been purged, and
% with one purge req for Doc1 and another purge req for Doc 2 and Doc3
@@ -219,7 +220,7 @@ t_upgrade_with_1_purge_req_for_2_docs(VersionFrom, {_Ctx, _NewPaths}) ->
?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
?assertEqual(4, couch_db:get_purge_seq(Db))
end)
- end).
+ end)}.
save_doc(DbName, Json) ->
diff --git a/src/couch/test/couch_btree_tests.erl b/src/couch/test/eunit/couch_btree_tests.erl
index c9b791d2c..c9b791d2c 100644
--- a/src/couch/test/couch_btree_tests.erl
+++ b/src/couch/test/eunit/couch_btree_tests.erl
diff --git a/src/couch/test/couch_changes_tests.erl b/src/couch/test/eunit/couch_changes_tests.erl
index 0c2f5f91f..0c2f5f91f 100644
--- a/src/couch/test/couch_changes_tests.erl
+++ b/src/couch/test/eunit/couch_changes_tests.erl
diff --git a/src/couch/test/couch_compress_tests.erl b/src/couch/test/eunit/couch_compress_tests.erl
index addb9a0e2..addb9a0e2 100644
--- a/src/couch/test/couch_compress_tests.erl
+++ b/src/couch/test/eunit/couch_compress_tests.erl
diff --git a/src/couch/test/couch_db_doc_tests.erl b/src/couch/test/eunit/couch_db_doc_tests.erl
index cdcf81d15..cdcf81d15 100644
--- a/src/couch/test/couch_db_doc_tests.erl
+++ b/src/couch/test/eunit/couch_db_doc_tests.erl
diff --git a/src/couch/test/couch_db_mpr_tests.erl b/src/couch/test/eunit/couch_db_mpr_tests.erl
index bb97c66d7..bb97c66d7 100644
--- a/src/couch/test/couch_db_mpr_tests.erl
+++ b/src/couch/test/eunit/couch_db_mpr_tests.erl
diff --git a/src/couch/test/couch_db_plugin_tests.erl b/src/couch/test/eunit/couch_db_plugin_tests.erl
index 93551adbc..93551adbc 100644
--- a/src/couch/test/couch_db_plugin_tests.erl
+++ b/src/couch/test/eunit/couch_db_plugin_tests.erl
diff --git a/src/couch/test/couch_db_props_upgrade_tests.erl b/src/couch/test/eunit/couch_db_props_upgrade_tests.erl
index 40ad283cf..40ad283cf 100644
--- a/src/couch/test/couch_db_props_upgrade_tests.erl
+++ b/src/couch/test/eunit/couch_db_props_upgrade_tests.erl
diff --git a/src/couch/test/couch_db_split_tests.erl b/src/couch/test/eunit/couch_db_split_tests.erl
index 7d2bb4006..312b5f84d 100644
--- a/src/couch/test/couch_db_split_tests.erl
+++ b/src/couch/test/eunit/couch_db_split_tests.erl
@@ -16,6 +16,7 @@
-include_lib("couch/include/couch_db.hrl").
-define(RINGTOP, 2 bsl 31).
+-define(TIMEOUT, 60). % seconds
setup() ->
@@ -68,7 +69,7 @@ should_split_shard({Desc, TotalDocs, Q}, DbName) ->
TMap = make_targets(Ranges),
DocsPerRange = TotalDocs div Q,
PickFun = make_pickfun(DocsPerRange),
- {Desc, ?_test(begin
+ {Desc, timeout, ?TIMEOUT, ?_test(begin
{ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun),
?assertEqual(ExpectSeq, UpdateSeq),
maps:map(fun(Range, Name) ->
@@ -154,7 +155,7 @@ should_copy_local_docs({Desc, TotalDocs, Q}, DbName) ->
TMap = make_targets(Ranges),
DocsPerRange = TotalDocs div Q,
PickFun = make_pickfun(DocsPerRange),
- {Desc, ?_test(begin
+ {Desc, timeout, ?TIMEOUT, ?_test(begin
{ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun),
?assertEqual(ExpectSeq, UpdateSeq),
Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun),
diff --git a/src/couch/test/couch_db_tests.erl b/src/couch/test/eunit/couch_db_tests.erl
index d64f7c640..d64f7c640 100644
--- a/src/couch/test/couch_db_tests.erl
+++ b/src/couch/test/eunit/couch_db_tests.erl
diff --git a/src/couch/test/couch_doc_json_tests.erl b/src/couch/test/eunit/couch_doc_json_tests.erl
index 51f228900..51f228900 100644
--- a/src/couch/test/couch_doc_json_tests.erl
+++ b/src/couch/test/eunit/couch_doc_json_tests.erl
diff --git a/src/couch/test/couch_doc_tests.erl b/src/couch/test/eunit/couch_doc_tests.erl
index cf41df61d..cf41df61d 100644
--- a/src/couch/test/couch_doc_tests.erl
+++ b/src/couch/test/eunit/couch_doc_tests.erl
diff --git a/src/couch/test/couch_ejson_size_tests.erl b/src/couch/test/eunit/couch_ejson_size_tests.erl
index df9168ed1..df9168ed1 100644
--- a/src/couch/test/couch_ejson_size_tests.erl
+++ b/src/couch/test/eunit/couch_ejson_size_tests.erl
diff --git a/src/couch/test/couch_etag_tests.erl b/src/couch/test/eunit/couch_etag_tests.erl
index 9d15e483f..9d15e483f 100644
--- a/src/couch/test/couch_etag_tests.erl
+++ b/src/couch/test/eunit/couch_etag_tests.erl
diff --git a/src/couch/test/couch_file_tests.erl b/src/couch/test/eunit/couch_file_tests.erl
index e9806c09a..e9806c09a 100644
--- a/src/couch/test/couch_file_tests.erl
+++ b/src/couch/test/eunit/couch_file_tests.erl
diff --git a/src/couch/test/couch_flags_config_tests.erl b/src/couch/test/eunit/couch_flags_config_tests.erl
index 1a66cdcff..1a66cdcff 100644
--- a/src/couch/test/couch_flags_config_tests.erl
+++ b/src/couch/test/eunit/couch_flags_config_tests.erl
diff --git a/src/couch/test/couch_flags_tests.erl b/src/couch/test/eunit/couch_flags_tests.erl
index 32ec57b77..32ec57b77 100644
--- a/src/couch/test/couch_flags_tests.erl
+++ b/src/couch/test/eunit/couch_flags_tests.erl
diff --git a/src/couch/test/couch_hotp_tests.erl b/src/couch/test/eunit/couch_hotp_tests.erl
index fee10ff5e..fee10ff5e 100644
--- a/src/couch/test/couch_hotp_tests.erl
+++ b/src/couch/test/eunit/couch_hotp_tests.erl
diff --git a/src/couch/test/couch_index_tests.erl b/src/couch/test/eunit/couch_index_tests.erl
index fab3806d0..fab3806d0 100644
--- a/src/couch/test/couch_index_tests.erl
+++ b/src/couch/test/eunit/couch_index_tests.erl
diff --git a/src/couch/test/couch_key_tree_prop_tests.erl b/src/couch/test/eunit/couch_key_tree_prop_tests.erl
index f8146926a..f8146926a 100644
--- a/src/couch/test/couch_key_tree_prop_tests.erl
+++ b/src/couch/test/eunit/couch_key_tree_prop_tests.erl
diff --git a/src/couch/test/couch_key_tree_tests.erl b/src/couch/test/eunit/couch_key_tree_tests.erl
index 5d9cc8372..5d9cc8372 100644
--- a/src/couch/test/couch_key_tree_tests.erl
+++ b/src/couch/test/eunit/couch_key_tree_tests.erl
diff --git a/src/couch/test/couch_passwords_tests.erl b/src/couch/test/eunit/couch_passwords_tests.erl
index 88de8530f..88de8530f 100644
--- a/src/couch/test/couch_passwords_tests.erl
+++ b/src/couch/test/eunit/couch_passwords_tests.erl
diff --git a/src/couch/test/couch_query_servers_tests.erl b/src/couch/test/eunit/couch_query_servers_tests.erl
index f8df896c4..f8df896c4 100644
--- a/src/couch/test/couch_query_servers_tests.erl
+++ b/src/couch/test/eunit/couch_query_servers_tests.erl
diff --git a/src/couch/test/couch_server_tests.erl b/src/couch/test/eunit/couch_server_tests.erl
index 530b7efd0..530b7efd0 100644
--- a/src/couch/test/couch_server_tests.erl
+++ b/src/couch/test/eunit/couch_server_tests.erl
diff --git a/src/couch/test/couch_stream_tests.erl b/src/couch/test/eunit/couch_stream_tests.erl
index a7fedf0af..a7fedf0af 100644
--- a/src/couch/test/couch_stream_tests.erl
+++ b/src/couch/test/eunit/couch_stream_tests.erl
diff --git a/src/couch/test/couch_task_status_tests.erl b/src/couch/test/eunit/couch_task_status_tests.erl
index 0ec03563b..0ec03563b 100644
--- a/src/couch/test/couch_task_status_tests.erl
+++ b/src/couch/test/eunit/couch_task_status_tests.erl
diff --git a/src/couch/test/couch_totp_tests.erl b/src/couch/test/eunit/couch_totp_tests.erl
index 6817a092a..6817a092a 100644
--- a/src/couch/test/couch_totp_tests.erl
+++ b/src/couch/test/eunit/couch_totp_tests.erl
diff --git a/src/couch/test/couch_util_tests.erl b/src/couch/test/eunit/couch_util_tests.erl
index 3e145c4f6..3e145c4f6 100644
--- a/src/couch/test/couch_util_tests.erl
+++ b/src/couch/test/eunit/couch_util_tests.erl
diff --git a/src/couch/test/couch_uuids_tests.erl b/src/couch/test/eunit/couch_uuids_tests.erl
index a836eccc6..a836eccc6 100644
--- a/src/couch/test/couch_uuids_tests.erl
+++ b/src/couch/test/eunit/couch_uuids_tests.erl
diff --git a/src/couch/test/couch_work_queue_tests.erl b/src/couch/test/eunit/couch_work_queue_tests.erl
index a192230ef..a192230ef 100644
--- a/src/couch/test/couch_work_queue_tests.erl
+++ b/src/couch/test/eunit/couch_work_queue_tests.erl
diff --git a/src/couch/test/couchdb_attachments_tests.erl b/src/couch/test/eunit/couchdb_attachments_tests.erl
index 04859dbc9..04859dbc9 100644
--- a/src/couch/test/couchdb_attachments_tests.erl
+++ b/src/couch/test/eunit/couchdb_attachments_tests.erl
diff --git a/src/couch/test/couchdb_auth_tests.erl b/src/couch/test/eunit/couchdb_auth_tests.erl
index ed2c064de..ed2c064de 100644
--- a/src/couch/test/couchdb_auth_tests.erl
+++ b/src/couch/test/eunit/couchdb_auth_tests.erl
diff --git a/src/couch/test/couchdb_cookie_domain_tests.erl b/src/couch/test/eunit/couchdb_cookie_domain_tests.erl
index e66ab31e6..e66ab31e6 100755
--- a/src/couch/test/couchdb_cookie_domain_tests.erl
+++ b/src/couch/test/eunit/couchdb_cookie_domain_tests.erl
diff --git a/src/couch/test/couchdb_cors_tests.erl b/src/couch/test/eunit/couchdb_cors_tests.erl
index 82630bba7..82630bba7 100644
--- a/src/couch/test/couchdb_cors_tests.erl
+++ b/src/couch/test/eunit/couchdb_cors_tests.erl
diff --git a/src/couch/test/couchdb_db_tests.erl b/src/couch/test/eunit/couchdb_db_tests.erl
index 734bafb9f..734bafb9f 100644
--- a/src/couch/test/couchdb_db_tests.erl
+++ b/src/couch/test/eunit/couchdb_db_tests.erl
diff --git a/src/couch/test/couchdb_design_doc_tests.erl b/src/couch/test/eunit/couchdb_design_doc_tests.erl
index eef12e039..eef12e039 100644
--- a/src/couch/test/couchdb_design_doc_tests.erl
+++ b/src/couch/test/eunit/couchdb_design_doc_tests.erl
diff --git a/src/couch/test/couchdb_file_compression_tests.erl b/src/couch/test/eunit/couchdb_file_compression_tests.erl
index 8f0fe5bf1..8f0fe5bf1 100644
--- a/src/couch/test/couchdb_file_compression_tests.erl
+++ b/src/couch/test/eunit/couchdb_file_compression_tests.erl
diff --git a/src/couch/test/couchdb_location_header_tests.erl b/src/couch/test/eunit/couchdb_location_header_tests.erl
index c6c039eb0..c6c039eb0 100644
--- a/src/couch/test/couchdb_location_header_tests.erl
+++ b/src/couch/test/eunit/couchdb_location_header_tests.erl
diff --git a/src/couch/test/couchdb_mrview_cors_tests.erl b/src/couch/test/eunit/couchdb_mrview_cors_tests.erl
index 0f69048a0..0f69048a0 100644
--- a/src/couch/test/couchdb_mrview_cors_tests.erl
+++ b/src/couch/test/eunit/couchdb_mrview_cors_tests.erl
diff --git a/src/couch/test/couchdb_mrview_tests.erl b/src/couch/test/eunit/couchdb_mrview_tests.erl
index 1c96a0ae0..1c96a0ae0 100644
--- a/src/couch/test/couchdb_mrview_tests.erl
+++ b/src/couch/test/eunit/couchdb_mrview_tests.erl
diff --git a/src/couch/test/couchdb_os_proc_pool.erl b/src/couch/test/eunit/couchdb_os_proc_pool.erl
index 69f8051ad..69f8051ad 100644
--- a/src/couch/test/couchdb_os_proc_pool.erl
+++ b/src/couch/test/eunit/couchdb_os_proc_pool.erl
diff --git a/src/couch/test/couchdb_update_conflicts_tests.erl b/src/couch/test/eunit/couchdb_update_conflicts_tests.erl
index e92c73856..e92c73856 100644
--- a/src/couch/test/couchdb_update_conflicts_tests.erl
+++ b/src/couch/test/eunit/couchdb_update_conflicts_tests.erl
diff --git a/src/couch/test/couchdb_vhosts_tests.erl b/src/couch/test/eunit/couchdb_vhosts_tests.erl
index 1c4117215..1c4117215 100644
--- a/src/couch/test/couchdb_vhosts_tests.erl
+++ b/src/couch/test/eunit/couchdb_vhosts_tests.erl
diff --git a/src/couch/test/couchdb_views_tests.erl b/src/couch/test/eunit/couchdb_views_tests.erl
index 60bb5c975..60bb5c975 100644
--- a/src/couch/test/couchdb_views_tests.erl
+++ b/src/couch/test/eunit/couchdb_views_tests.erl
diff --git a/src/couch/test/fixtures/3b835456c235b1827e012e25666152f3.view b/src/couch/test/eunit/fixtures/3b835456c235b1827e012e25666152f3.view
index 9c67648be..9c67648be 100644
--- a/src/couch/test/fixtures/3b835456c235b1827e012e25666152f3.view
+++ b/src/couch/test/eunit/fixtures/3b835456c235b1827e012e25666152f3.view
Binary files differ
diff --git a/src/couch/test/fixtures/couch_stats_aggregates.cfg b/src/couch/test/eunit/fixtures/couch_stats_aggregates.cfg
index 30e475da8..30e475da8 100644
--- a/src/couch/test/fixtures/couch_stats_aggregates.cfg
+++ b/src/couch/test/eunit/fixtures/couch_stats_aggregates.cfg
diff --git a/src/couch/test/fixtures/couch_stats_aggregates.ini b/src/couch/test/eunit/fixtures/couch_stats_aggregates.ini
index cc5cd2187..cc5cd2187 100644
--- a/src/couch/test/fixtures/couch_stats_aggregates.ini
+++ b/src/couch/test/eunit/fixtures/couch_stats_aggregates.ini
diff --git a/src/couch/test/fixtures/db_non_partitioned.couch b/src/couch/test/eunit/fixtures/db_non_partitioned.couch
index 327d9bb5d..327d9bb5d 100644
--- a/src/couch/test/fixtures/db_non_partitioned.couch
+++ b/src/couch/test/eunit/fixtures/db_non_partitioned.couch
Binary files differ
diff --git a/src/couch/test/fixtures/db_v6_with_1_purge_req.couch b/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req.couch
index b0d39c9ec..b0d39c9ec 100644
--- a/src/couch/test/fixtures/db_v6_with_1_purge_req.couch
+++ b/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req.couch
Binary files differ
diff --git a/src/couch/test/fixtures/db_v6_with_1_purge_req_for_2_docs.couch b/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req_for_2_docs.couch
index b584fce31..b584fce31 100644
--- a/src/couch/test/fixtures/db_v6_with_1_purge_req_for_2_docs.couch
+++ b/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req_for_2_docs.couch
Binary files differ
diff --git a/src/couch/test/fixtures/db_v6_with_2_purge_req.couch b/src/couch/test/eunit/fixtures/db_v6_with_2_purge_req.couch
index ee4e11b7f..ee4e11b7f 100644
--- a/src/couch/test/fixtures/db_v6_with_2_purge_req.couch
+++ b/src/couch/test/eunit/fixtures/db_v6_with_2_purge_req.couch
Binary files differ
diff --git a/src/couch/test/fixtures/db_v6_without_purge_req.couch b/src/couch/test/eunit/fixtures/db_v6_without_purge_req.couch
index 814feb8e1..814feb8e1 100644
--- a/src/couch/test/fixtures/db_v6_without_purge_req.couch
+++ b/src/couch/test/eunit/fixtures/db_v6_without_purge_req.couch
Binary files differ
diff --git a/src/couch/test/fixtures/db_v7_with_1_purge_req.couch b/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req.couch
index cab8331db..cab8331db 100644
--- a/src/couch/test/fixtures/db_v7_with_1_purge_req.couch
+++ b/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req.couch
Binary files differ
diff --git a/src/couch/test/fixtures/db_v7_with_1_purge_req_for_2_docs.couch b/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req_for_2_docs.couch
index b613646b1..b613646b1 100644
--- a/src/couch/test/fixtures/db_v7_with_1_purge_req_for_2_docs.couch
+++ b/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req_for_2_docs.couch
Binary files differ
diff --git a/src/couch/test/fixtures/db_v7_with_2_purge_req.couch b/src/couch/test/eunit/fixtures/db_v7_with_2_purge_req.couch
index 126fc919e..126fc919e 100644
--- a/src/couch/test/fixtures/db_v7_with_2_purge_req.couch
+++ b/src/couch/test/eunit/fixtures/db_v7_with_2_purge_req.couch
Binary files differ
diff --git a/src/couch/test/fixtures/db_v7_without_purge_req.couch b/src/couch/test/eunit/fixtures/db_v7_without_purge_req.couch
index 762dc8dad..762dc8dad 100644
--- a/src/couch/test/fixtures/db_v7_without_purge_req.couch
+++ b/src/couch/test/eunit/fixtures/db_v7_without_purge_req.couch
Binary files differ
diff --git a/src/couch/test/fixtures/logo.png b/src/couch/test/eunit/fixtures/logo.png
index d21ac025b..d21ac025b 100644
--- a/src/couch/test/fixtures/logo.png
+++ b/src/couch/test/eunit/fixtures/logo.png
Binary files differ
diff --git a/src/couch/test/fixtures/multipart.http b/src/couch/test/eunit/fixtures/multipart.http
index fe9f271cc..fe9f271cc 100644
--- a/src/couch/test/fixtures/multipart.http
+++ b/src/couch/test/eunit/fixtures/multipart.http
diff --git a/src/couch/test/fixtures/os_daemon_bad_perm.sh b/src/couch/test/eunit/fixtures/os_daemon_bad_perm.sh
index 345c8b40b..345c8b40b 100644
--- a/src/couch/test/fixtures/os_daemon_bad_perm.sh
+++ b/src/couch/test/eunit/fixtures/os_daemon_bad_perm.sh
diff --git a/src/couch/test/fixtures/os_daemon_can_reboot.sh b/src/couch/test/eunit/fixtures/os_daemon_can_reboot.sh
index 5bc10e83f..5bc10e83f 100755
--- a/src/couch/test/fixtures/os_daemon_can_reboot.sh
+++ b/src/couch/test/eunit/fixtures/os_daemon_can_reboot.sh
diff --git a/src/couch/test/fixtures/os_daemon_configer.escript b/src/couch/test/eunit/fixtures/os_daemon_configer.escript
index f146b8314..f146b8314 100755
--- a/src/couch/test/fixtures/os_daemon_configer.escript
+++ b/src/couch/test/eunit/fixtures/os_daemon_configer.escript
diff --git a/src/couch/test/fixtures/os_daemon_die_on_boot.sh b/src/couch/test/eunit/fixtures/os_daemon_die_on_boot.sh
index 256ee7935..256ee7935 100755
--- a/src/couch/test/fixtures/os_daemon_die_on_boot.sh
+++ b/src/couch/test/eunit/fixtures/os_daemon_die_on_boot.sh
diff --git a/src/couch/test/fixtures/os_daemon_die_quickly.sh b/src/couch/test/eunit/fixtures/os_daemon_die_quickly.sh
index f5a13684e..f5a13684e 100755
--- a/src/couch/test/fixtures/os_daemon_die_quickly.sh
+++ b/src/couch/test/eunit/fixtures/os_daemon_die_quickly.sh
diff --git a/src/couch/test/fixtures/os_daemon_looper.escript b/src/couch/test/eunit/fixtures/os_daemon_looper.escript
index 73974e905..73974e905 100755
--- a/src/couch/test/fixtures/os_daemon_looper.escript
+++ b/src/couch/test/eunit/fixtures/os_daemon_looper.escript
diff --git a/src/couch/test/fixtures/test.couch b/src/couch/test/eunit/fixtures/test.couch
index 32c79af32..32c79af32 100644
--- a/src/couch/test/fixtures/test.couch
+++ b/src/couch/test/eunit/fixtures/test.couch
Binary files differ
diff --git a/src/couch/test/global_changes_tests.erl b/src/couch/test/eunit/global_changes_tests.erl
index 4392aafac..4392aafac 100644
--- a/src/couch/test/global_changes_tests.erl
+++ b/src/couch/test/eunit/global_changes_tests.erl
diff --git a/src/couch/test/json_stream_parse_tests.erl b/src/couch/test/eunit/json_stream_parse_tests.erl
index e690d7728..e690d7728 100644
--- a/src/couch/test/json_stream_parse_tests.erl
+++ b/src/couch/test/eunit/json_stream_parse_tests.erl
diff --git a/src/couch/test/test_web.erl b/src/couch/test/eunit/test_web.erl
index b1b3e65c9..b1b3e65c9 100644
--- a/src/couch/test/test_web.erl
+++ b/src/couch/test/eunit/test_web.erl
diff --git a/src/couch/test/exunit/fabric_test.exs b/src/couch/test/exunit/fabric_test.exs
new file mode 100644
index 000000000..bdb84e9a2
--- /dev/null
+++ b/src/couch/test/exunit/fabric_test.exs
@@ -0,0 +1,101 @@
+defmodule Couch.Test.Fabric do
+ use Couch.Test.ExUnit.Case
+ alias Couch.Test.Utils
+
+ alias Couch.Test.Setup
+
+ alias Couch.Test.Setup.Step
+
+ import Couch.DBTest
+
+ import Utils
+
+ @admin {:user_ctx, user_ctx(roles: ["_admin"])}
+
+ def with_db(context, setup) do
+ setup =
+ setup
+ |> Setup.Common.with_db()
+ |> Setup.run()
+
+ context =
+ Map.merge(context, %{
+ db_name: setup |> Setup.get(:db) |> Step.Create.DB.name()
+ })
+
+ {context, setup}
+ end
+
+ describe "Fabric miscellaneous API" do
+ @describetag setup: &__MODULE__.with_db/2
+ test "Get inactive_index_files", ctx do
+ {:ok, _rev} = update_doc(ctx.db_name, %{"_id" => "doc1"})
+
+ design_doc = %{
+ "_id" => "_design/test",
+ "language" => "javascript",
+ "views" => %{
+ "view" => %{
+ "map" => "function(doc){emit(doc._id, doc._rev)}"
+ }
+ }
+ }
+
+ {:ok, rev1} = update_doc(ctx.db_name, design_doc)
+ wait_sig_update(ctx.db_name, "test", "")
+ prev_active = get_active_sig(ctx.db_name, "test")
+
+ updated_design_doc =
+ put_in(design_doc, ["views", "view", "map"], "function(doc){emit(doc._id, null)}")
+
+ {:ok, rev2} =
+ update_doc(
+ ctx.db_name,
+ Map.put(updated_design_doc, "_rev", rev1)
+ )
+
+ assert rev1 != rev2
+ wait_sig_update(ctx.db_name, "test", prev_active)
+
+ {:ok, info} = :fabric.get_view_group_info(ctx.db_name, "_design/test")
+ active = info[:signature]
+
+ files = Enum.map(:fabric.inactive_index_files(ctx.db_name), &List.to_string/1)
+
+ assert [] != files, "We should have some inactive"
+
+ assert not Enum.any?(files, fn
+ file_path -> String.contains?(file_path, active)
+ end),
+ "We are not suppose to return active views"
+
+ assert Enum.all?(files, fn
+ file_path -> String.contains?(file_path, prev_active)
+ end),
+ "We expect all files to contain previous active signature"
+ end
+ end
+
+ defp update_doc(db_name, body) do
+ json_body = :jiffy.decode(:jiffy.encode(body))
+
+ case :fabric.update_doc(db_name, json_body, [@admin]) do
+ {:ok, rev} ->
+ {:ok, :couch_doc.rev_to_str(rev)}
+
+ error ->
+ error
+ end
+ end
+
+ defp get_active_sig(db_name, ddoc_id) do
+ {:ok, info} = :fabric.get_view_group_info(db_name, "_design/#{ddoc_id}")
+ info[:signature]
+ end
+
+ defp wait_sig_update(db_name, ddoc_id, prev_active) do
+ retry_until(fn ->
+ get_active_sig(db_name, ddoc_id) != prev_active
+ end)
+ end
+end
diff --git a/src/couch/test/exunit/test_helper.exs b/src/couch/test/exunit/test_helper.exs
new file mode 100644
index 000000000..314050085
--- /dev/null
+++ b/src/couch/test/exunit/test_helper.exs
@@ -0,0 +1,2 @@
+ExUnit.configure(formatters: [JUnitFormatter, ExUnit.CLIFormatter])
+ExUnit.start()
diff --git a/src/couch_epi/src/couch_epi_sup.erl b/src/couch_epi/src/couch_epi_sup.erl
index 218db5445..477cbe79e 100644
--- a/src/couch_epi/src/couch_epi_sup.erl
+++ b/src/couch_epi/src/couch_epi_sup.erl
@@ -136,4 +136,7 @@ modules(#couch_epi_spec{kind = data_subscriptions, behaviour = Module}) ->
merge([], Children) ->
Children;
merge([{Id, _, _, _, _, _} = Spec | Rest], Children) ->
- merge(Rest, lists:keystore(Id, 1, Children, Spec)).
+ merge(Rest, lists:keystore(Id, 1, Children, Spec));
+merge([#{id := Id} = Spec | Rest], Children) ->
+ Replace = fun(#{id := I}) when I == Id -> Spec; (E) -> E end,
+ merge(Rest, lists:map(Replace, Children)).
diff --git a/src/couch_epi/test/couch_epi_basic_test.erl b/src/couch_epi/test/eunit/couch_epi_basic_test.erl
index 587d1564e..5ba6c9f87 100644
--- a/src/couch_epi/test/couch_epi_basic_test.erl
+++ b/src/couch_epi/test/eunit/couch_epi_basic_test.erl
@@ -67,7 +67,9 @@ processes() ->
[
{?MODULE, [?CHILD(extra_process, worker)]},
{?MODULE, [{to_replace, {new, start_link, [bar]},
- permanent, 5000, worker, [bar]}]}
+ permanent, 5000, worker, [bar]}]},
+ {?MODULE, [#{id => to_replace_map,
+ start => {new, start_link, [bar]}, modules => [bar]}]}
].
@@ -95,9 +97,10 @@ parse_child_id(Id) ->
-include_lib("eunit/include/eunit.hrl").
basic_test() ->
- Expected = lists:sort([
+ Expected = [
{extra_process, [], [extra_process]},
{to_replace, [bar], [bar]},
+ {to_replace_map, [bar], [bar]},
{{my_service, providers},
[couch_epi_functions_gen_my_service],
[couch_epi_codechange_monitor, couch_epi_functions_gen_my_service,
@@ -114,18 +117,23 @@ basic_test() ->
[couch_epi_data_gen_test_app_descriptions],
lists:sort([couch_epi_codechange_monitor,
couch_epi_data_gen_test_app_descriptions, ?MODULE])}
- ]),
+ ],
- ToReplace = {to_replace,
- {old, start_link, [foo]}, permanent, 5000, worker, [foo]},
- Children = lists:sort(couch_epi_sup:plugin_childspecs(
- ?MODULE, [?MODULE], [ToReplace])),
- Results = [
- {parse_child_id(Id), Args, lists:sort(Modules)}
- || {Id, {_M, _F, Args}, _, _, _, Modules} <- Children
+ ToReplace = [
+ {to_replace, {old, start_link, [foo]}, permanent, 5000, worker, [foo]},
+ #{id => to_replace_map, start => {old, start_link, [foo]}}
],
+ Children = lists:sort(couch_epi_sup:plugin_childspecs(
+ ?MODULE, [?MODULE], ToReplace)),
+
+ Results = lists:map(fun
+ ({Id, {_M, _F, Args}, _, _, _, Modules}) ->
+ {parse_child_id(Id), Args, lists:sort(Modules)};
+ (#{id := Id, start := {_M, _F, Args}, modules := Modules}) ->
+ {parse_child_id(Id), Args, lists:sort(Modules)}
+ end, Children),
- Tests = lists:zip(Expected, Results),
+ Tests = lists:zip(lists:sort(Expected), lists:sort(Results)),
[?assertEqual(Expect, Result) || {Expect, Result} <- Tests],
ExpectedChild = {to_replace, {new, start_link, [bar]},
@@ -134,4 +142,8 @@ basic_test() ->
ExpectedChild,
lists:keyfind(to_replace, 1, Children)),
+ ExpectedMapChildSpec = #{id => to_replace_map,
+ start => {new, start_link, [bar]}, modules => [bar]},
+ [MapChildSpec] = [E || #{id := to_replace_map} = E <- Children],
+ ?assertEqual(ExpectedMapChildSpec, MapChildSpec),
ok.
diff --git a/src/couch_epi/test/couch_epi_tests.erl b/src/couch_epi/test/eunit/couch_epi_tests.erl
index 042753215..12d8610c1 100644
--- a/src/couch_epi/test/couch_epi_tests.erl
+++ b/src/couch_epi/test/eunit/couch_epi_tests.erl
@@ -14,8 +14,8 @@
-include_lib("couch/include/couch_eunit.hrl").
--define(DATA_FILE1, ?ABS_PATH("test/fixtures/app_data1.cfg")).
--define(DATA_FILE2, ?ABS_PATH("test/fixtures/app_data2.cfg")).
+-define(DATA_FILE1, ?ABS_PATH("test/eunit/fixtures/app_data1.cfg")).
+-define(DATA_FILE2, ?ABS_PATH("test/eunit/fixtures/app_data2.cfg")).
-export([notify_cb/4, save/3, get/2]).
diff --git a/src/couch_epi/test/fixtures/app_data1.cfg b/src/couch_epi/test/eunit/fixtures/app_data1.cfg
index 4c9f3fe2d..4c9f3fe2d 100644
--- a/src/couch_epi/test/fixtures/app_data1.cfg
+++ b/src/couch_epi/test/eunit/fixtures/app_data1.cfg
diff --git a/src/couch_epi/test/fixtures/app_data2.cfg b/src/couch_epi/test/eunit/fixtures/app_data2.cfg
index e5a5ffb8c..e5a5ffb8c 100644
--- a/src/couch_epi/test/fixtures/app_data2.cfg
+++ b/src/couch_epi/test/eunit/fixtures/app_data2.cfg
diff --git a/src/couch_index/test/couch_index_compaction_tests.erl b/src/couch_index/test/eunit/couch_index_compaction_tests.erl
index 53316d944..53316d944 100644
--- a/src/couch_index/test/couch_index_compaction_tests.erl
+++ b/src/couch_index/test/eunit/couch_index_compaction_tests.erl
diff --git a/src/couch_index/test/couch_index_ddoc_updated_tests.erl b/src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl
index 0e23adf91..0e23adf91 100644
--- a/src/couch_index/test/couch_index_ddoc_updated_tests.erl
+++ b/src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl
diff --git a/src/couch_log/test/couch_log_config_listener_test.erl b/src/couch_log/test/eunit/couch_log_config_listener_test.erl
index 07abae1ff..07abae1ff 100644
--- a/src/couch_log/test/couch_log_config_listener_test.erl
+++ b/src/couch_log/test/eunit/couch_log_config_listener_test.erl
diff --git a/src/couch_log/test/couch_log_config_test.erl b/src/couch_log/test/eunit/couch_log_config_test.erl
index c4677f37f..c4677f37f 100644
--- a/src/couch_log/test/couch_log_config_test.erl
+++ b/src/couch_log/test/eunit/couch_log_config_test.erl
diff --git a/src/couch_log/test/couch_log_error_logger_h_test.erl b/src/couch_log/test/eunit/couch_log_error_logger_h_test.erl
index b78598fa4..b78598fa4 100644
--- a/src/couch_log/test/couch_log_error_logger_h_test.erl
+++ b/src/couch_log/test/eunit/couch_log_error_logger_h_test.erl
diff --git a/src/couch_log/test/couch_log_formatter_test.erl b/src/couch_log/test/eunit/couch_log_formatter_test.erl
index 795efcf29..795efcf29 100644
--- a/src/couch_log/test/couch_log_formatter_test.erl
+++ b/src/couch_log/test/eunit/couch_log_formatter_test.erl
diff --git a/src/couch_log/test/couch_log_monitor_test.erl b/src/couch_log/test/eunit/couch_log_monitor_test.erl
index eec008522..eec008522 100644
--- a/src/couch_log/test/couch_log_monitor_test.erl
+++ b/src/couch_log/test/eunit/couch_log_monitor_test.erl
diff --git a/src/couch_log/test/couch_log_server_test.erl b/src/couch_log/test/eunit/couch_log_server_test.erl
index 7af570e90..7af570e90 100644
--- a/src/couch_log/test/couch_log_server_test.erl
+++ b/src/couch_log/test/eunit/couch_log_server_test.erl
diff --git a/src/couch_log/test/couch_log_test.erl b/src/couch_log/test/eunit/couch_log_test.erl
index c7195f65f..c7195f65f 100644
--- a/src/couch_log/test/couch_log_test.erl
+++ b/src/couch_log/test/eunit/couch_log_test.erl
diff --git a/src/couch_log/test/couch_log_test_util.erl b/src/couch_log/test/eunit/couch_log_test_util.erl
index 00f3981fc..00f3981fc 100644
--- a/src/couch_log/test/couch_log_test_util.erl
+++ b/src/couch_log/test/eunit/couch_log_test_util.erl
diff --git a/src/couch_log/test/couch_log_trunc_io_fmt_test.erl b/src/couch_log/test/eunit/couch_log_trunc_io_fmt_test.erl
index 77d555440..77d555440 100644
--- a/src/couch_log/test/couch_log_trunc_io_fmt_test.erl
+++ b/src/couch_log/test/eunit/couch_log_trunc_io_fmt_test.erl
diff --git a/src/couch_log/test/couch_log_util_test.erl b/src/couch_log/test/eunit/couch_log_util_test.erl
index e97911aa9..e97911aa9 100644
--- a/src/couch_log/test/couch_log_util_test.erl
+++ b/src/couch_log/test/eunit/couch_log_util_test.erl
diff --git a/src/couch_log/test/couch_log_writer_ets.erl b/src/couch_log/test/eunit/couch_log_writer_ets.erl
index d5fd327ac..d5fd327ac 100644
--- a/src/couch_log/test/couch_log_writer_ets.erl
+++ b/src/couch_log/test/eunit/couch_log_writer_ets.erl
diff --git a/src/couch_log/test/couch_log_writer_file_test.erl b/src/couch_log/test/eunit/couch_log_writer_file_test.erl
index ba042610a..ba042610a 100644
--- a/src/couch_log/test/couch_log_writer_file_test.erl
+++ b/src/couch_log/test/eunit/couch_log_writer_file_test.erl
diff --git a/src/couch_log/test/couch_log_writer_stderr_test.erl b/src/couch_log/test/eunit/couch_log_writer_stderr_test.erl
index 1e99263dd..1e99263dd 100644
--- a/src/couch_log/test/couch_log_writer_stderr_test.erl
+++ b/src/couch_log/test/eunit/couch_log_writer_stderr_test.erl
diff --git a/src/couch_log/test/couch_log_writer_syslog_test.erl b/src/couch_log/test/eunit/couch_log_writer_syslog_test.erl
index c32b5c6bf..c32b5c6bf 100644
--- a/src/couch_log/test/couch_log_writer_syslog_test.erl
+++ b/src/couch_log/test/eunit/couch_log_writer_syslog_test.erl
diff --git a/src/couch_log/test/couch_log_writer_test.erl b/src/couch_log/test/eunit/couch_log_writer_test.erl
index d0bb347fe..d0bb347fe 100644
--- a/src/couch_log/test/couch_log_writer_test.erl
+++ b/src/couch_log/test/eunit/couch_log_writer_test.erl
diff --git a/src/couch_mrview/test/couch_mrview_all_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl
index bf8eb7e5b..bf8eb7e5b 100644
--- a/src/couch_mrview/test/couch_mrview_all_docs_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl
diff --git a/src/couch_mrview/test/couch_mrview_changes_since_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_changes_since_tests.erl
index d670e109b..d670e109b 100644
--- a/src/couch_mrview/test/couch_mrview_changes_since_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_changes_since_tests.erl
diff --git a/src/couch_mrview/test/couch_mrview_collation_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl
index 5c8cb54b1..5c8cb54b1 100644
--- a/src/couch_mrview/test/couch_mrview_collation_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl
diff --git a/src/couch_mrview/test/couch_mrview_compact_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl
index 7664becdc..7664becdc 100644
--- a/src/couch_mrview/test/couch_mrview_compact_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl
diff --git a/src/couch_mrview/test/couch_mrview_ddoc_updated_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl
index 4310157eb..4310157eb 100644
--- a/src/couch_mrview/test/couch_mrview_ddoc_updated_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl
diff --git a/src/couch_mrview/test/couch_mrview_ddoc_validation_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl
index ce2be8904..ce2be8904 100644
--- a/src/couch_mrview/test/couch_mrview_ddoc_validation_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl
diff --git a/src/couch_mrview/test/couch_mrview_design_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl
index aedd42865..aedd42865 100644
--- a/src/couch_mrview/test/couch_mrview_design_docs_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl
diff --git a/src/couch_mrview/test/couch_mrview_http_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_http_tests.erl
index bd11c7ad8..bd11c7ad8 100644
--- a/src/couch_mrview/test/couch_mrview_http_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_http_tests.erl
diff --git a/src/couch_mrview/test/couch_mrview_index_changes_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_index_changes_tests.erl
index f0be1b9b1..f0be1b9b1 100644
--- a/src/couch_mrview/test/couch_mrview_index_changes_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_index_changes_tests.erl
diff --git a/src/couch_mrview/test/couch_mrview_index_info_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl
index efa03e7c0..efa03e7c0 100644
--- a/src/couch_mrview/test/couch_mrview_index_info_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl
diff --git a/src/couch_mrview/test/couch_mrview_local_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl
index b0d25469a..b0d25469a 100644
--- a/src/couch_mrview/test/couch_mrview_local_docs_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl
diff --git a/src/couch_mrview/test/couch_mrview_map_views_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl
index 805dc6c74..805dc6c74 100644
--- a/src/couch_mrview/test/couch_mrview_map_views_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl
diff --git a/src/couch_mrview/test/couch_mrview_purge_docs_fabric_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl
index 213acac0b..a593f54e3 100644
--- a/src/couch_mrview/test/couch_mrview_purge_docs_fabric_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl
@@ -17,7 +17,7 @@
-include_lib("mem3/include/mem3.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
--define(TIMEOUT, 1000).
+-define(TIMEOUT, 60). % seconds
setup() ->
@@ -56,7 +56,7 @@ view_purge_fabric_test_() ->
test_purge_verify_index(DbName) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
Docs1 = couch_mrview_test_util:make_docs(normal, 5),
{ok, _} = fabric:update_docs(DbName, Docs1, [?ADMIN_CTX]),
{ok, _} = fabric:update_doc(
@@ -99,11 +99,11 @@ test_purge_verify_index(DbName) ->
?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props2)),
?assertEqual(true, couch_mrview_index:verify_index_exists(
ShardDbName, Props2))
- end).
+ end)}.
test_purge_hook_before_compaction(DbName) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
Docs1 = couch_mrview_test_util:make_docs(normal, 5),
{ok, _} = fabric:update_docs(DbName, Docs1, [?ADMIN_CTX]),
{ok, _} = fabric:update_doc(
@@ -198,7 +198,7 @@ test_purge_hook_before_compaction(DbName) ->
{ok, #doc{body = {Props4}}} = get_local_purge_doc(DbName),
?assertEqual(2, couch_util:get_value(<<"purge_seq">>, Props4))
- end).
+ end)}.
get_local_purge_doc(DbName) ->
diff --git a/src/couch_mrview/test/couch_mrview_purge_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl
index 1020607a4..1020607a4 100644
--- a/src/couch_mrview/test/couch_mrview_purge_docs_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl
diff --git a/src/couch_mrview/test/couch_mrview_red_views_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl
index b83686113..b83686113 100644
--- a/src/couch_mrview/test/couch_mrview_red_views_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl
diff --git a/src/couch_mrview/test/couch_mrview_util_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_util_tests.erl
index 7046c9bb2..7046c9bb2 100644
--- a/src/couch_mrview/test/couch_mrview_util_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_util_tests.erl
diff --git a/src/couch_peruser/test/couch_peruser_test.erl b/src/couch_peruser/test/eunit/couch_peruser_test.erl
index 8501cc36f..8501cc36f 100644
--- a/src/couch_peruser/test/couch_peruser_test.erl
+++ b/src/couch_peruser/test/eunit/couch_peruser_test.erl
diff --git a/src/couch_pse_tests/src/cpse_test_purge_replication.erl b/src/couch_pse_tests/src/cpse_test_purge_replication.erl
index fb09eeba6..20dcc2f81 100644
--- a/src/couch_pse_tests/src/cpse_test_purge_replication.erl
+++ b/src/couch_pse_tests/src/cpse_test_purge_replication.erl
@@ -48,8 +48,8 @@ cpse_purge_http_replication({Source, Target}) ->
]),
RepObject = {[
- {<<"source">>, Source},
- {<<"target">>, Target}
+ {<<"source">>, db_url(Source)},
+ {<<"target">>, db_url(Target)}
]},
{ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
@@ -100,8 +100,8 @@ cpse_purge_http_replication({Source, Target}) ->
% Show that replicating from the target
% back to the source reintroduces the doc
RepObject2 = {[
- {<<"source">>, Target},
- {<<"target">>, Source}
+ {<<"source">>, db_url(Target)},
+ {<<"target">>, db_url(Source)}
]},
{ok, _} = couch_replicator:replicate(RepObject2, ?ADMIN_USER),
@@ -200,3 +200,16 @@ make_shard(DbName) ->
dbname = DbName,
range = [0, 16#FFFFFFFF]
}.
+
+
+db_url(DbName) ->
+ Addr = config:get("httpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(couch_httpd, port),
+ Url = ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])),
+ test_util:wait(fun() ->
+ case test_request:get(?b2l(Url)) of
+ {ok, 200, _, _} -> ok;
+ _ -> wait
+ end
+ end),
+ Url.
diff --git a/src/couch_replicator/src/couch_replicator.erl b/src/couch_replicator/src/couch_replicator.erl
index 39141c301..9c7e318b6 100644
--- a/src/couch_replicator/src/couch_replicator.erl
+++ b/src/couch_replicator/src/couch_replicator.erl
@@ -144,11 +144,13 @@ replication_states() ->
-spec strip_url_creds(binary() | {[_]}) -> binary().
strip_url_creds(Endpoint) ->
- case couch_replicator_docs:parse_rep_db(Endpoint, [], []) of
- #httpdb{url=Url} ->
- iolist_to_binary(couch_util:url_strip_password(Url));
- LocalDb when is_binary(LocalDb) ->
- LocalDb
+ try
+ couch_replicator_docs:parse_rep_db(Endpoint, [], []) of
+ #httpdb{url = Url} ->
+ iolist_to_binary(couch_util:url_strip_password(Url))
+ catch
+ throw:{error, local_endpoints_not_supported} ->
+ Endpoint
end.
@@ -358,9 +360,9 @@ strip_url_creds_test_() ->
end,
fun (_) -> meck:unload() end,
[
- t_strip_local_db_creds(),
t_strip_http_basic_creds(),
- t_strip_http_props_creds()
+ t_strip_http_props_creds(),
+ t_strip_local_db_creds()
]
}.
diff --git a/src/couch_replicator/src/couch_replicator_api_wrap.erl b/src/couch_replicator/src/couch_replicator_api_wrap.erl
index 44c290d33..ab1de7df9 100644
--- a/src/couch_replicator/src/couch_replicator_api_wrap.erl
+++ b/src/couch_replicator/src/couch_replicator_api_wrap.erl
@@ -23,8 +23,8 @@
-include("couch_replicator_api_wrap.hrl").
-export([
- db_open/2,
- db_open/4,
+ db_open/1,
+ db_open/3,
db_close/1,
get_db_info/1,
get_pending_count/2,
@@ -67,10 +67,10 @@ db_uri(Db) ->
db_uri(couch_db:name(Db)).
-db_open(Db, Options) ->
- db_open(Db, Options, false, []).
+db_open(Db) ->
+ db_open(Db, false, []).
-db_open(#httpdb{} = Db1, _Options, Create, CreateParams) ->
+db_open(#httpdb{} = Db1, Create, CreateParams) ->
{ok, Db} = couch_replicator_httpc:setup(Db1),
try
case Create of
@@ -118,51 +118,19 @@ db_open(#httpdb{} = Db1, _Options, Create, CreateParams) ->
exit:Error ->
db_close(Db),
erlang:exit(Error)
- end;
-db_open(DbName, Options, Create, _CreateParams) ->
- try
- case Create of
- false ->
- ok;
- true ->
- ok = couch_httpd:verify_is_server_admin(
- get_value(user_ctx, Options)),
- couch_db:create(DbName, Options)
- end,
- case couch_db:open(DbName, Options) of
- {error, {illegal_database_name, _}} ->
- throw({db_not_found, DbName});
- {not_found, _Reason} ->
- throw({db_not_found, DbName});
- {ok, _Db} = Success ->
- Success
- end
- catch
- throw:{unauthorized, _} ->
- throw({unauthorized, DbName})
end.
db_close(#httpdb{httpc_pool = Pool} = HttpDb) ->
couch_replicator_auth:cleanup(HttpDb),
unlink(Pool),
- ok = couch_replicator_httpc_pool:stop(Pool);
-db_close(DbName) ->
- catch couch_db:close(DbName).
+ ok = couch_replicator_httpc_pool:stop(Pool).
get_db_info(#httpdb{} = Db) ->
send_req(Db, [],
fun(200, _, {Props}) ->
{ok, Props}
- end);
-get_db_info(Db) ->
- DbName = couch_db:name(Db),
- UserCtx = couch_db:get_user_ctx(Db),
- {ok, InfoDb} = couch_db:open(DbName, [{user_ctx, UserCtx}]),
- {ok, Info} = couch_db:get_db_info(InfoDb),
- couch_db:close(InfoDb),
- {ok, [{couch_util:to_binary(K), V} || {K, V} <- Info]}.
-
+ end).
get_pending_count(#httpdb{} = Db, Seq) when is_number(Seq) ->
% Source looks like Apache CouchDB and not Cloudant so we fall
@@ -179,14 +147,7 @@ get_pending_count(#httpdb{} = Db, Seq) ->
Options = [{path, "_changes"}, {qs, [{"since", ?JSON_ENCODE(Seq)}, {"limit", "0"}]}],
send_req(Db, Options, fun(200, _, {Props}) ->
{ok, couch_util:get_value(<<"pending">>, Props, null)}
- end);
-get_pending_count(Db, Seq) when is_number(Seq) ->
- DbName = couch_db:name(Db),
- UserCtx = couch_db:get_user_ctx(Db),
- {ok, CountDb} = couch_db:open(DbName, [{user_ctx, UserCtx}]),
- Pending = couch_db:count_changes_since(CountDb, Seq),
- couch_db:close(CountDb),
- {ok, Pending}.
+ end).
get_view_info(#httpdb{} = Db, DDocId, ViewName) ->
Path = io_lib:format("~s/_view/~s/_info", [DDocId, ViewName]),
@@ -194,11 +155,7 @@ get_view_info(#httpdb{} = Db, DDocId, ViewName) ->
fun(200, _, {Props}) ->
{VInfo} = couch_util:get_value(<<"view_index">>, Props, {[]}),
{ok, VInfo}
- end);
-get_view_info(Db, DDocId, ViewName) ->
- DbName = couch_db:name(Db),
- {ok, VInfo} = couch_mrview:get_view_info(DbName, DDocId, ViewName),
- {ok, [{couch_util:to_binary(K), V} || {K, V} <- VInfo]}.
+ end).
ensure_full_commit(#httpdb{} = Db) ->
@@ -210,9 +167,7 @@ ensure_full_commit(#httpdb{} = Db) ->
{ok, get_value(<<"instance_start_time">>, Props)};
(_, _, {Props}) ->
{error, get_value(<<"error">>, Props)}
- end);
-ensure_full_commit(Db) ->
- couch_db:ensure_full_commit(Db).
+ end).
get_missing_revs(#httpdb{} = Db, IdRevs) ->
@@ -232,10 +187,7 @@ get_missing_revs(#httpdb{} = Db, IdRevs) ->
{Id, MissingRevs, PossibleAncestors}
end,
{ok, lists:map(ConvertToNativeFun, Props)}
- end);
-get_missing_revs(Db, IdRevs) ->
- couch_db:get_missing_revs(Db, IdRevs).
-
+ end).
open_doc_revs(#httpdb{retries = 0} = HttpDb, Id, Revs, Options, _Fun, _Acc) ->
@@ -331,10 +283,8 @@ open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
wait = Wait
},
open_doc_revs(RetryDb, Id, Revs, Options, Fun, Acc)
- end;
-open_doc_revs(Db, Id, Revs, Options, Fun, Acc) ->
- {ok, Results} = couch_db:open_doc_revs(Db, Id, Revs, Options),
- {ok, lists:foldl(fun(R, A) -> {_, A2} = Fun(R, A), A2 end, Acc, Results)}.
+ end.
+
error_reason({http_request_failed, "GET", _Url, {error, timeout}}) ->
timeout;
@@ -353,14 +303,7 @@ open_doc(#httpdb{} = Db, Id, Options) ->
{ok, couch_doc:from_json_obj(Body)};
(_, _, {Props}) ->
{error, get_value(<<"error">>, Props)}
- end);
-open_doc(Db, Id, Options) ->
- case couch_db:open_doc(Db, Id, Options) of
- {ok, _} = Ok ->
- Ok;
- {not_found, _Reason} ->
- {error, <<"not_found">>}
- end.
+ end).
update_doc(Db, Doc, Options) ->
@@ -411,9 +354,7 @@ update_doc(#httpdb{} = HttpDb, #doc{id = DocId} = Doc, Options, Type) ->
{_, Error} ->
{error, Error}
end
- end);
-update_doc(Db, Doc, Options, Type) ->
- couch_db:update_doc(Db, Doc, Options, Type).
+ end).
update_docs(Db, DocList, Options) ->
@@ -468,10 +409,7 @@ update_docs(#httpdb{} = HttpDb, DocList, Options, UpdateType) ->
{error, request_body_too_large};
(417, _, Results) when is_list(Results) ->
{ok, bulk_results_to_errors(DocList, Results, remote)}
- end);
-update_docs(Db, DocList, Options, UpdateType) ->
- Result = couch_db:update_docs(Db, DocList, Options, UpdateType),
- {ok, bulk_results_to_errors(DocList, Result, UpdateType)}.
+ end).
changes_since(#httpdb{headers = Headers1, timeout = InactiveTimeout} = HttpDb,
@@ -538,38 +476,7 @@ changes_since(#httpdb{headers = Headers1, timeout = InactiveTimeout} = HttpDb,
throw(retry_no_limit);
exit:{http_request_failed, _, _, _} = Error ->
throw({retry_limit, Error})
- end;
-changes_since(Db, Style, StartSeq, UserFun, Options) ->
- DocIds = get_value(doc_ids, Options),
- Selector = get_value(selector, Options),
- Filter = case {DocIds, Selector} of
- {undefined, undefined} ->
- ?b2l(get_value(filter, Options, <<>>));
- {_, undefined} ->
- "_doc_ids";
- {undefined, _} ->
- "_selector"
- end,
- Args = #changes_args{
- style = Style,
- since = StartSeq,
- filter = Filter,
- feed = case get_value(continuous, Options, false) of
- true ->
- "continuous";
- false ->
- "normal"
- end,
- timeout = infinity
- },
- QueryParams = get_value(query_params, Options, {[]}),
- Req = changes_json_req(Db, Filter, QueryParams, Options),
- ChangesFeedFun = couch_changes:handle_db_changes(Args, {json_req, Req}, Db),
- ChangesFeedFun(fun({change, Change, _}, _) ->
- UserFun(json_to_doc_info(Change));
- (_, _) ->
- ok
- end).
+ end.
% internal functions
@@ -614,29 +521,6 @@ parse_changes_feed(Options, UserFun, DataStreamFun) ->
json_stream_parse:events(DataStreamFun, EventFun)
end.
-changes_json_req(_Db, "", _QueryParams, _Options) ->
- {[]};
-changes_json_req(_Db, "_doc_ids", _QueryParams, Options) ->
- {[{<<"doc_ids">>, get_value(doc_ids, Options)}]};
-changes_json_req(_Db, "_selector", _QueryParams, Options) ->
- {[{<<"selector">>, get_value(selector, Options)}]};
-changes_json_req(Db, FilterName, {QueryParams}, _Options) ->
- {ok, Info} = couch_db:get_db_info(Db),
- % simulate a request to db_name/_changes
- {[
- {<<"info">>, {Info}},
- {<<"id">>, null},
- {<<"method">>, 'GET'},
- {<<"path">>, [couch_db:name(Db), <<"_changes">>]},
- {<<"query">>, {[{<<"filter">>, FilterName} | QueryParams]}},
- {<<"headers">>, []},
- {<<"body">>, []},
- {<<"peer">>, <<"replicator">>},
- {<<"form">>, []},
- {<<"cookie">>, []},
- {<<"userCtx">>, couch_util:json_user_ctx(Db)}
- ]}.
-
options_to_query_args(HttpDb, Path, Options0) ->
case lists:keytake(max_url_len, 1, Options0) of
diff --git a/src/couch_replicator/src/couch_replicator_doc_processor.erl b/src/couch_replicator/src/couch_replicator_doc_processor.erl
index 1b43598da..772037d8d 100644
--- a/src/couch_replicator/src/couch_replicator_doc_processor.erl
+++ b/src/couch_replicator/src/couch_replicator_doc_processor.erl
@@ -886,8 +886,8 @@ change() ->
{<<"id">>, ?DOC1},
{doc, {[
{<<"_id">>, ?DOC1},
- {<<"source">>, <<"src">>},
- {<<"target">>, <<"tgt">>}
+ {<<"source">>, <<"http://srchost.local/src">>},
+ {<<"target">>, <<"http://tgthost.local/tgt">>}
]}}
]}.
@@ -897,8 +897,8 @@ change(State) ->
{<<"id">>, ?DOC1},
{doc, {[
{<<"_id">>, ?DOC1},
- {<<"source">>, <<"src">>},
- {<<"target">>, <<"tgt">>},
+ {<<"source">>, <<"http://srchost.local/src">>},
+ {<<"target">>, <<"http://tgthost.local/tgt">>},
{<<"_replication_state">>, State}
]}}
]}.
@@ -910,8 +910,8 @@ deleted_change() ->
{<<"deleted">>, true},
{doc, {[
{<<"_id">>, ?DOC1},
- {<<"source">>, <<"src">>},
- {<<"target">>, <<"tgt">>}
+ {<<"source">>, <<"http://srchost.local/src">>},
+ {<<"target">>, <<"http://tgthost.local/tgt">>}
]}}
]}.
diff --git a/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl b/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl
index aa048bfab..a4c829323 100644
--- a/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl
+++ b/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl
@@ -137,7 +137,7 @@ maybe_add_job_to_scheduler({DbName, DocId}, Rep, WRef) ->
-define(DB, <<"db">>).
-define(DOC1, <<"doc1">>).
--define(R1, {"0b7831e9a41f9322a8600ccfa02245f2", ""}).
+-define(R1, {"ad08e05057046eabe898a2572bbfb573", ""}).
doc_processor_worker_test_() ->
@@ -277,8 +277,8 @@ did_not_add_job() ->
change() ->
{[
{<<"_id">>, ?DOC1},
- {<<"source">>, <<"src">>},
- {<<"target">>, <<"tgt">>}
+ {<<"source">>, <<"http://srchost.local/src">>},
+ {<<"target">>, <<"http://tgthost.local/tgt">>}
]}.
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl
index bbf9694d7..2d6db1b73 100644
--- a/src/couch_replicator/src/couch_replicator_docs.erl
+++ b/src/couch_replicator/src/couch_replicator_docs.erl
@@ -423,8 +423,8 @@ parse_rep_db(<<"http://", _/binary>> = Url, Proxy, Options) ->
parse_rep_db(<<"https://", _/binary>> = Url, Proxy, Options) ->
parse_rep_db({[{<<"url">>, Url}]}, Proxy, Options);
-parse_rep_db(<<DbName/binary>>, _Proxy, _Options) ->
- DbName;
+parse_rep_db(<<_/binary>>, _Proxy, _Options) ->
+ throw({error, local_endpoints_not_supported});
parse_rep_db(undefined, _Proxy, _Options) ->
throw({error, <<"Missing replicator database">>}).
@@ -822,4 +822,29 @@ t_vdu_does_not_crash_on_save(DbName) ->
?assertEqual({ok, forbidden}, save_rep_doc(DbName, Doc))
end).
+
+local_replication_endpoint_error_test_() ->
+ {
+ foreach,
+ fun () -> meck:expect(config, get,
+ fun(_, _, Default) -> Default end)
+ end,
+ fun (_) -> meck:unload() end,
+ [
+ t_error_on_local_endpoint()
+ ]
+ }.
+
+
+t_error_on_local_endpoint() ->
+ ?_test(begin
+ RepDoc = {[
+ {<<"_id">>, <<"someid">>},
+ {<<"source">>, <<"localdb">>},
+ {<<"target">>, <<"http://somehost.local/tgt">>}
+ ]},
+ Expect = local_endpoints_not_supported,
+ ?assertThrow({bad_rep_doc, Expect}, parse_rep_doc_without_id(RepDoc))
+ end).
+
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_filters.erl b/src/couch_replicator/src/couch_replicator_filters.erl
index 5668820d1..c8980001a 100644
--- a/src/couch_replicator/src/couch_replicator_filters.erl
+++ b/src/couch_replicator/src/couch_replicator_filters.erl
@@ -14,7 +14,7 @@
-export([
parse/1,
- fetch/4,
+ fetch/3,
view_type/2,
ejsort/1
]).
@@ -63,11 +63,11 @@ parse(Options) ->
% Fetches body of filter function from source database. Guaranteed to either
% return {ok, Body} or an {error, Reason}. Also assume this function might
% block due to network / socket issues for an undeterminted amount of time.
--spec fetch(binary(), binary(), binary(), #user_ctx{}) ->
+-spec fetch(binary(), binary(), binary()) ->
{ok, {[_]}} | {error, binary()}.
-fetch(DDocName, FilterName, Source, UserCtx) ->
+fetch(DDocName, FilterName, Source) ->
{Pid, Ref} = spawn_monitor(fun() ->
- try fetch_internal(DDocName, FilterName, Source, UserCtx) of
+ try fetch_internal(DDocName, FilterName, Source) of
Resp ->
exit({exit_ok, Resp})
catch
@@ -108,9 +108,8 @@ view_type(Props, Options) ->
% Private functions
-fetch_internal(DDocName, FilterName, Source, UserCtx) ->
- Db = case (catch couch_replicator_api_wrap:db_open(Source,
- [{user_ctx, UserCtx}])) of
+fetch_internal(DDocName, FilterName, Source) ->
+ Db = case (catch couch_replicator_api_wrap:db_open(Source)) of
{ok, Db0} ->
Db0;
DbError ->
diff --git a/src/couch_replicator/src/couch_replicator_ids.erl b/src/couch_replicator/src/couch_replicator_ids.erl
index e10b98082..04e71c3ef 100644
--- a/src/couch_replicator/src/couch_replicator_ids.erl
+++ b/src/couch_replicator/src/couch_replicator_ids.erl
@@ -39,19 +39,19 @@ replication_id(#rep{options = Options} = Rep) ->
% If a change is made to how replications are identified,
% please add a new clause and increase ?REP_ID_VERSION.
-replication_id(#rep{user_ctx = UserCtx} = Rep, 4) ->
+replication_id(#rep{} = Rep, 4) ->
UUID = couch_server:get_uuid(),
- SrcInfo = get_v4_endpoint(UserCtx, Rep#rep.source),
- TgtInfo = get_v4_endpoint(UserCtx, Rep#rep.target),
+ SrcInfo = get_v4_endpoint(Rep#rep.source),
+ TgtInfo = get_v4_endpoint(Rep#rep.target),
maybe_append_filters([UUID, SrcInfo, TgtInfo], Rep);
-replication_id(#rep{user_ctx = UserCtx} = Rep, 3) ->
+replication_id(#rep{} = Rep, 3) ->
UUID = couch_server:get_uuid(),
- Src = get_rep_endpoint(UserCtx, Rep#rep.source),
- Tgt = get_rep_endpoint(UserCtx, Rep#rep.target),
+ Src = get_rep_endpoint(Rep#rep.source),
+ Tgt = get_rep_endpoint(Rep#rep.target),
maybe_append_filters([UUID, Src, Tgt], Rep);
-replication_id(#rep{user_ctx = UserCtx} = Rep, 2) ->
+replication_id(#rep{} = Rep, 2) ->
{ok, HostName} = inet:gethostname(),
Port = case (catch mochiweb_socket_server:get(couch_httpd, port)) of
P when is_number(P) ->
@@ -64,14 +64,14 @@ replication_id(#rep{user_ctx = UserCtx} = Rep, 2) ->
% ... mochiweb_socket_server:get(https, port)
list_to_integer(config:get("httpd", "port", "5984"))
end,
- Src = get_rep_endpoint(UserCtx, Rep#rep.source),
- Tgt = get_rep_endpoint(UserCtx, Rep#rep.target),
+ Src = get_rep_endpoint(Rep#rep.source),
+ Tgt = get_rep_endpoint(Rep#rep.target),
maybe_append_filters([HostName, Port, Src, Tgt], Rep);
-replication_id(#rep{user_ctx = UserCtx} = Rep, 1) ->
+replication_id(#rep{} = Rep, 1) ->
{ok, HostName} = inet:gethostname(),
- Src = get_rep_endpoint(UserCtx, Rep#rep.source),
- Tgt = get_rep_endpoint(UserCtx, Rep#rep.target),
+ Src = get_rep_endpoint(Rep#rep.source),
+ Tgt = get_rep_endpoint(Rep#rep.target),
maybe_append_filters([HostName, Src, Tgt], Rep).
@@ -91,7 +91,7 @@ convert({BaseId, Ext} = Id) when is_list(BaseId), is_list(Ext) ->
% Private functions
maybe_append_filters(Base,
- #rep{source = Source, user_ctx = UserCtx, options = Options}) ->
+ #rep{source = Source, options = Options}) ->
Base2 = Base ++
case couch_replicator_filters:parse(Options) of
{ok, nil} ->
@@ -99,7 +99,7 @@ maybe_append_filters(Base,
{ok, {view, Filter, QueryParams}} ->
[Filter, QueryParams];
{ok, {user, {Doc, Filter}, QueryParams}} ->
- case couch_replicator_filters:fetch(Doc, Filter, Source, UserCtx) of
+ case couch_replicator_filters:fetch(Doc, Filter, Source) of
{ok, Code} ->
[Code, QueryParams];
{error, Error} ->
@@ -127,23 +127,19 @@ maybe_append_options(Options, RepOptions) ->
end, [], Options).
-get_rep_endpoint(_UserCtx, #httpdb{url=Url, headers=Headers}) ->
+get_rep_endpoint(#httpdb{url=Url, headers=Headers}) ->
DefaultHeaders = (#httpdb{})#httpdb.headers,
- {remote, Url, Headers -- DefaultHeaders};
-get_rep_endpoint(UserCtx, <<DbName/binary>>) ->
- {local, DbName, UserCtx}.
+ {remote, Url, Headers -- DefaultHeaders}.
-get_v4_endpoint(UserCtx, #httpdb{} = HttpDb) ->
- {remote, Url, Headers} = get_rep_endpoint(UserCtx, HttpDb),
+get_v4_endpoint(#httpdb{} = HttpDb) ->
+ {remote, Url, Headers} = get_rep_endpoint(HttpDb),
{{UserFromHeaders, _}, HeadersWithoutBasicAuth} =
couch_replicator_utils:remove_basic_auth_from_headers(Headers),
{UserFromUrl, Host, NonDefaultPort, Path} = get_v4_url_info(Url),
User = pick_defined_value([UserFromUrl, UserFromHeaders]),
OAuth = undefined, % Keep this to ensure checkpoints don't change
- {remote, User, Host, NonDefaultPort, Path, HeadersWithoutBasicAuth, OAuth};
-get_v4_endpoint(UserCtx, <<DbName/binary>>) ->
- {local, DbName, UserCtx}.
+ {remote, User, Host, NonDefaultPort, Path, HeadersWithoutBasicAuth, OAuth}.
pick_defined_value(Values) ->
@@ -201,7 +197,7 @@ replication_id_convert_test_() ->
http_v4_endpoint_test_() ->
[?_assertMatch({remote, User, Host, Port, Path, HeadersNoAuth, undefined},
- get_v4_endpoint(nil, #httpdb{url = Url, headers = Headers})) ||
+ get_v4_endpoint(#httpdb{url = Url, headers = Headers})) ||
{{User, Host, Port, Path, HeadersNoAuth}, {Url, Headers}} <- [
{
{undefined, "host", default, "/", []},
diff --git a/src/couch_replicator/src/couch_replicator_scheduler.erl b/src/couch_replicator/src/couch_replicator_scheduler.erl
index e3dbede83..7fe417a53 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler.erl
@@ -494,7 +494,10 @@ start_jobs(Count, State) ->
-spec stop_jobs(non_neg_integer(), boolean(), #state{}) -> non_neg_integer().
-stop_jobs(Count, IsContinuous, State) ->
+stop_jobs(Count, _, _) when is_integer(Count), Count =< 0 ->
+ 0;
+
+stop_jobs(Count, IsContinuous, State) when is_integer(Count) ->
Running0 = running_jobs(),
ContinuousPred = fun(Job) -> is_continuous(Job) =:= IsContinuous end,
Running1 = lists:filter(ContinuousPred, Running0),
@@ -723,35 +726,25 @@ reset_job_process(#job{} = Job) ->
-spec reschedule(#state{}) -> ok.
reschedule(State) ->
- Running = running_job_count(),
- Pending = pending_job_count(),
- stop_excess_jobs(State, Running),
- start_pending_jobs(State, Running, Pending),
- rotate_jobs(State, Running, Pending),
- update_running_jobs_stats(State#state.stats_pid),
- ok.
+ StopCount = stop_excess_jobs(State, running_job_count()),
+ rotate_jobs(State, StopCount),
+ update_running_jobs_stats(State#state.stats_pid).
--spec stop_excess_jobs(#state{}, non_neg_integer()) -> ok.
+-spec stop_excess_jobs(#state{}, non_neg_integer()) -> non_neg_integer().
stop_excess_jobs(State, Running) ->
#state{max_jobs=MaxJobs} = State,
- StopCount = Running - MaxJobs,
- if StopCount =< 0 -> ok; true ->
- Stopped = stop_jobs(StopCount, true, State),
- OneshotLeft = StopCount - Stopped,
- if OneshotLeft =< 0 -> ok; true ->
- stop_jobs(OneshotLeft, false, State),
- ok
- end
- end.
+ StopCount = max(0, Running - MaxJobs),
+ Stopped = stop_jobs(StopCount, true, State),
+ OneshotLeft = StopCount - Stopped,
+ stop_jobs(OneshotLeft, false, State),
+ StopCount.
start_pending_jobs(State) ->
- start_pending_jobs(State, running_job_count(), pending_job_count()).
-
-
-start_pending_jobs(State, Running, Pending) ->
#state{max_jobs=MaxJobs} = State,
+ Running = running_job_count(),
+ Pending = pending_job_count(),
if Running < MaxJobs, Pending > 0 ->
start_jobs(MaxJobs - Running, State);
true ->
@@ -759,13 +752,19 @@ start_pending_jobs(State, Running, Pending) ->
end.
--spec rotate_jobs(#state{}, non_neg_integer(), non_neg_integer()) -> ok.
-rotate_jobs(State, Running, Pending) ->
+-spec rotate_jobs(#state{}, non_neg_integer()) -> ok.
+rotate_jobs(State, ChurnSoFar) ->
#state{max_jobs=MaxJobs, max_churn=MaxChurn} = State,
- if Running == MaxJobs, Pending > 0 ->
- RotateCount = lists:min([Pending, Running, MaxChurn]),
- StopCount = stop_jobs(RotateCount, true, State),
- start_jobs(StopCount, State);
+ Running = running_job_count(),
+ Pending = pending_job_count(),
+ % Reduce MaxChurn by the number of already stopped jobs in the
+ % current rescheduling cycle.
+ Churn = max(0, MaxChurn - ChurnSoFar),
+ if Running =< MaxJobs ->
+ StopCount = lists:min([Pending, Running, Churn]),
+ stop_jobs(StopCount, true, State),
+ StartCount = max(0, MaxJobs - running_job_count()),
+ start_jobs(StartCount, State);
true ->
ok
end.
@@ -1047,6 +1046,7 @@ scheduler_test_() ->
t_excess_prefer_continuous_first(),
t_stop_oldest_first(),
t_start_oldest_first(),
+ t_jobs_churn_even_if_not_all_max_jobs_are_running(),
t_dont_stop_if_nothing_pending(),
t_max_churn_limits_number_of_rotated_jobs(),
t_existing_jobs(),
@@ -1056,7 +1056,7 @@ scheduler_test_() ->
t_rotate_continuous_only_if_mixed(),
t_oneshot_dont_get_starting_priority(),
t_oneshot_will_hog_the_scheduler(),
- t_if_excess_is_trimmed_rotation_doesnt_happen(),
+ t_if_excess_is_trimmed_rotation_still_happens(),
t_if_transient_job_crashes_it_gets_removed(),
t_if_permanent_job_crashes_it_stays_in_ets(),
t_job_summary_running(),
@@ -1177,10 +1177,10 @@ t_stop_oldest_first() ->
continuous_running(5)
],
setup_jobs(Jobs),
- reschedule(mock_state(2)),
+ reschedule(mock_state(2, 1)),
?assertEqual({2, 1}, run_stop_count()),
?assertEqual([4], jobs_stopped()),
- reschedule(mock_state(1)),
+ reschedule(mock_state(1, 1)),
?assertEqual([7], jobs_running())
end).
@@ -1192,6 +1192,22 @@ t_start_oldest_first() ->
?assertEqual({1, 2}, run_stop_count()),
?assertEqual([2], jobs_running()),
reschedule(mock_state(2)),
+ ?assertEqual({2, 1}, run_stop_count()),
+ % After rescheduling with max_jobs = 2, 2 was stopped and 5, 7 should
+ % be running.
+ ?assertEqual([2], jobs_stopped())
+ end).
+
+
+t_jobs_churn_even_if_not_all_max_jobs_are_running() ->
+ ?_test(begin
+ setup_jobs([
+ continuous_running(7),
+ continuous(2),
+ continuous(5)
+ ]),
+ reschedule(mock_state(2, 2)),
+ ?assertEqual({2, 1}, run_stop_count()),
?assertEqual([7], jobs_stopped())
end).
@@ -1289,7 +1305,7 @@ t_oneshot_will_hog_the_scheduler() ->
end).
-t_if_excess_is_trimmed_rotation_doesnt_happen() ->
+t_if_excess_is_trimmed_rotation_still_happens() ->
?_test(begin
Jobs = [
continuous(1),
@@ -1298,7 +1314,7 @@ t_if_excess_is_trimmed_rotation_doesnt_happen() ->
],
setup_jobs(Jobs),
reschedule(mock_state(1)),
- ?assertEqual([3], jobs_running())
+ ?assertEqual([1], jobs_running())
end).
diff --git a/src/couch_replicator/src/couch_replicator_scheduler_job.erl b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
index 412ff7d05..565a2bd97 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler_job.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
@@ -40,8 +40,6 @@
]).
-import(couch_replicator_utils, [
- start_db_compaction_notifier/2,
- stop_db_compaction_notifier/1,
pp_rep_id/1
]).
@@ -75,8 +73,6 @@
workers,
stats = couch_replicator_stats:new(),
session_id,
- source_db_compaction_notifier = nil,
- target_db_compaction_notifier = nil,
source_monitor = nil,
target_monitor = nil,
source_seq = nil,
@@ -226,21 +222,6 @@ handle_call({report_seq_done, Seq, StatsInc}, From,
update_task(NewState),
{noreply, NewState}.
-handle_cast({db_compacted, DbName}, State) ->
- #rep_state{
- source = Source,
- target = Target
- } = State,
- SourceName = couch_replicator_utils:local_db_name(Source),
- TargetName = couch_replicator_utils:local_db_name(Target),
- case DbName of
- SourceName ->
- {ok, NewSource} = couch_db:reopen(Source),
- {noreply, State#rep_state{source = NewSource}};
- TargetName ->
- {ok, NewTarget} = couch_db:reopen(Target),
- {noreply, State#rep_state{target = NewTarget}}
- end;
handle_cast(checkpoint, State) ->
case do_checkpoint(State) of
@@ -412,8 +393,6 @@ terminate(Reason, State) ->
terminate_cleanup(State) ->
update_task(State),
- stop_db_compaction_notifier(State#rep_state.source_db_compaction_notifier),
- stop_db_compaction_notifier(State#rep_state.target_db_compaction_notifier),
couch_replicator_api_wrap:db_close(State#rep_state.source),
couch_replicator_api_wrap:db_close(State#rep_state.target).
@@ -572,16 +551,16 @@ init_state(Rep) ->
#rep{
id = {BaseId, _Ext},
source = Src0, target = Tgt,
- options = Options, user_ctx = UserCtx,
+ options = Options,
type = Type, view = View,
start_time = StartTime,
stats = Stats
} = Rep,
% Adjust minimum number of http source connections to 2 to avoid deadlock
Src = adjust_maxconn(Src0, BaseId),
- {ok, Source} = couch_replicator_api_wrap:db_open(Src, [{user_ctx, UserCtx}]),
+ {ok, Source} = couch_replicator_api_wrap:db_open(Src),
{CreateTargetParams} = get_value(create_target_params, Options, {[]}),
- {ok, Target} = couch_replicator_api_wrap:db_open(Tgt, [{user_ctx, UserCtx}],
+ {ok, Target} = couch_replicator_api_wrap:db_open(Tgt,
get_value(create_target, Options, false), CreateTargetParams),
{ok, SourceInfo} = couch_replicator_api_wrap:get_db_info(Source),
@@ -613,10 +592,6 @@ init_state(Rep) ->
src_starttime = get_value(<<"instance_start_time">>, SourceInfo),
tgt_starttime = get_value(<<"instance_start_time">>, TargetInfo),
session_id = couch_uuids:random(),
- source_db_compaction_notifier =
- start_db_compaction_notifier(Source, self()),
- target_db_compaction_notifier =
- start_db_compaction_notifier(Target, self()),
source_monitor = db_monitor(Source),
target_monitor = db_monitor(Target),
source_seq = SourceSeq,
diff --git a/src/couch_replicator/src/couch_replicator_utils.erl b/src/couch_replicator/src/couch_replicator_utils.erl
index b0d706953..ccf241324 100644
--- a/src/couch_replicator/src/couch_replicator_utils.erl
+++ b/src/couch_replicator/src/couch_replicator_utils.erl
@@ -14,11 +14,6 @@
-export([
parse_rep_doc/2,
- open_db/1,
- close_db/1,
- local_db_name/1,
- start_db_compaction_notifier/2,
- stop_db_compaction_notifier/1,
replication_id/2,
sum_stats/2,
is_deleted/1,
@@ -32,9 +27,6 @@
normalize_rep/1
]).
--export([
- handle_db_event/3
-]).
-include_lib("couch/include/couch_db.hrl").
-include("couch_replicator.hrl").
@@ -46,50 +38,6 @@
]).
-open_db(#httpdb{} = HttpDb) ->
- HttpDb;
-open_db(Db) ->
- DbName = couch_db:name(Db),
- UserCtx = couch_db:get_user_ctx(Db),
- {ok, NewDb} = couch_db:open(DbName, [{user_ctx, UserCtx}]),
- NewDb.
-
-
-close_db(#httpdb{}) ->
- ok;
-close_db(Db) ->
- couch_db:close(Db).
-
-
-local_db_name(#httpdb{}) ->
- undefined;
-local_db_name(Db) ->
- couch_db:name(Db).
-
-
-start_db_compaction_notifier(#httpdb{}, _) ->
- nil;
-start_db_compaction_notifier(Db, Server) ->
- DbName = couch_db:name(Db),
- {ok, Pid} = couch_event:link_listener(
- ?MODULE, handle_db_event, Server, [{dbname, DbName}]
- ),
- Pid.
-
-
-stop_db_compaction_notifier(nil) ->
- ok;
-stop_db_compaction_notifier(Listener) ->
- couch_event:stop_listener(Listener).
-
-
-handle_db_event(DbName, compacted, Server) ->
- gen_server:cast(Server, {db_compacted, DbName}),
- {ok, Server};
-handle_db_event(_DbName, _Event, Server) ->
- {ok, Server}.
-
-
rep_error_to_binary(Error) ->
couch_util:to_binary(error_reason(Error)).
@@ -289,14 +237,14 @@ normalize_rep_test_() ->
?_test(begin
EJson1 = {[
{<<"source">>, <<"http://host.com/source_db">>},
- {<<"target">>, <<"local">>},
+ {<<"target">>, <<"http://target.local/db">>},
{<<"doc_ids">>, [<<"a">>, <<"c">>, <<"b">>]},
{<<"other_field">>, <<"some_value">>}
]},
Rep1 = couch_replicator_docs:parse_rep_doc_without_id(EJson1),
EJson2 = {[
{<<"other_field">>, <<"unrelated">>},
- {<<"target">>, <<"local">>},
+ {<<"target">>, <<"http://target.local/db">>},
{<<"source">>, <<"http://host.com/source_db">>},
{<<"doc_ids">>, [<<"c">>, <<"a">>, <<"b">>]},
{<<"other_field2">>, <<"unrelated2">>}
diff --git a/src/couch_replicator/src/couch_replicator_worker.erl b/src/couch_replicator/src/couch_replicator_worker.erl
index ec98fa0f3..986c32c0a 100644
--- a/src/couch_replicator/src/couch_replicator_worker.erl
+++ b/src/couch_replicator/src/couch_replicator_worker.erl
@@ -28,18 +28,11 @@
% TODO: maybe make both buffer max sizes configurable
-define(DOC_BUFFER_BYTE_SIZE, 512 * 1024). % for remote targets
--define(DOC_BUFFER_LEN, 10). % for local targets, # of documents
-define(MAX_BULK_ATT_SIZE, 64 * 1024).
-define(MAX_BULK_ATTS_PER_DOC, 8).
-define(STATS_DELAY, 10000000). % 10 seconds (in microseconds)
-define(MISSING_DOC_RETRY_MSEC, 2000).
--import(couch_replicator_utils, [
- open_db/1,
- close_db/1,
- start_db_compaction_notifier/2,
- stop_db_compaction_notifier/1
-]).
-import(couch_util, [
to_binary/1,
get_value/3
@@ -62,8 +55,6 @@
pending_fetch = nil,
flush_waiter = nil,
stats = couch_replicator_stats:new(),
- source_db_compaction_notifier = nil,
- target_db_compaction_notifier = nil,
batch = #batch{}
}).
@@ -71,14 +62,7 @@
start_link(Cp, #httpdb{} = Source, Target, ChangesManager, MaxConns) ->
gen_server:start_link(
- ?MODULE, {Cp, Source, Target, ChangesManager, MaxConns}, []);
-
-start_link(Cp, Source, Target, ChangesManager, _MaxConns) ->
- Pid = spawn_link(fun() ->
- erlang:put(last_stats_report, os:timestamp()),
- queue_fetch_loop(Source, Target, Cp, Cp, ChangesManager)
- end),
- {ok, Pid}.
+ ?MODULE, {Cp, Source, Target, ChangesManager, MaxConns}, []).
init({Cp, Source, Target, ChangesManager, MaxConns}) ->
@@ -92,12 +76,8 @@ init({Cp, Source, Target, ChangesManager, MaxConns}) ->
cp = Cp,
max_parallel_conns = MaxConns,
loop = LoopPid,
- source = open_db(Source),
- target = open_db(Target),
- source_db_compaction_notifier =
- start_db_compaction_notifier(Source, self()),
- target_db_compaction_notifier =
- start_db_compaction_notifier(Target, self())
+ source = Source,
+ target = Target
},
{ok, State}.
@@ -141,24 +121,6 @@ handle_call(flush, {Pid, _} = From,
{noreply, State2#state{flush_waiter = From}}.
-handle_cast({db_compacted, DbName} = Msg, #state{} = State) ->
- #state{
- source = Source,
- target = Target
- } = State,
- SourceName = couch_replicator_utils:local_db_name(Source),
- TargetName = couch_replicator_utils:local_db_name(Target),
- case DbName of
- SourceName ->
- {ok, NewSource} = couch_db:reopen(Source),
- {noreply, State#state{source = NewSource}};
- TargetName ->
- {ok, NewTarget} = couch_db:reopen(Target),
- {noreply, State#state{target = NewTarget}};
- _Else ->
- {stop, {unexpected_async_call, Msg}, State}
- end;
-
handle_cast(Msg, State) ->
{stop, {unexpected_async_call, Msg}, State}.
@@ -213,11 +175,8 @@ handle_info({'EXIT', Pid, Reason}, State) ->
{stop, {process_died, Pid, Reason}, State}.
-terminate(_Reason, State) ->
- close_db(State#state.source),
- close_db(State#state.target),
- stop_db_compaction_notifier(State#state.source_db_compaction_notifier),
- stop_db_compaction_notifier(State#state.target_db_compaction_notifier).
+terminate(_Reason, _State) ->
+ ok.
format_status(_Opt, [_PDict, State]) ->
#state{
@@ -253,20 +212,10 @@ queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager) ->
ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity),
queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager);
{changes, ChangesManager, Changes, ReportSeq} ->
- Target2 = open_db(Target),
- {IdRevs, Stats0} = find_missing(Changes, Target2),
- case Source of
- #httpdb{} ->
- ok = gen_server:call(Parent, {add_stats, Stats0}, infinity),
- remote_process_batch(IdRevs, Parent),
- {ok, Stats} = gen_server:call(Parent, flush, infinity);
- _Db ->
- Source2 = open_db(Source),
- Stats = local_process_batch(
- IdRevs, Cp, Source2, Target2, #batch{}, Stats0),
- close_db(Source2)
- end,
- close_db(Target2),
+ {IdRevs, Stats0} = find_missing(Changes, Target),
+ ok = gen_server:call(Parent, {add_stats, Stats0}, infinity),
+ remote_process_batch(IdRevs, Parent),
+ {ok, Stats} = gen_server:call(Parent, flush, infinity),
ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity),
erlang:put(last_stats_report, os:timestamp()),
couch_log:debug("Worker reported completion of seq ~p", [ReportSeq]),
@@ -274,32 +223,6 @@ queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager) ->
end.
-local_process_batch([], _Cp, _Src, _Tgt, #batch{docs = []}, Stats) ->
- Stats;
-
-local_process_batch([], Cp, Source, Target, #batch{docs = Docs, size = Size}, Stats) ->
- case Target of
- #httpdb{} ->
- couch_log:debug("Worker flushing doc batch of size ~p bytes", [Size]);
- _Db ->
- couch_log:debug("Worker flushing doc batch of ~p docs", [Size])
- end,
- Stats2 = flush_docs(Target, Docs),
- Stats3 = couch_replicator_utils:sum_stats(Stats, Stats2),
- local_process_batch([], Cp, Source, Target, #batch{}, Stats3);
-
-local_process_batch([IdRevs | Rest], Cp, Source, Target, Batch, Stats) ->
- {ok, {_, DocList, Stats2, _}} = fetch_doc(
- Source, IdRevs, fun local_doc_handler/2, {Target, [], Stats, Cp}),
- {Batch2, Stats3} = lists:foldl(
- fun(Doc, {Batch0, Stats0}) ->
- {Batch1, S} = maybe_flush_docs(Target, Batch0, Doc),
- {Batch1, couch_replicator_utils:sum_stats(Stats0, S)}
- end,
- {Batch, Stats2}, DocList),
- local_process_batch(Rest, Cp, Source, Target, Batch2, Stats3).
-
-
remote_process_batch([], _Parent) ->
ok;
@@ -319,10 +242,8 @@ remote_process_batch([{Id, Revs, PAs} | Rest], Parent) ->
spawn_doc_reader(Source, Target, FetchParams) ->
Parent = self(),
spawn_link(fun() ->
- Source2 = open_db(Source),
fetch_doc(
- Source2, FetchParams, fun remote_doc_handler/2, {Parent, Target}),
- close_db(Source2)
+ Source, FetchParams, fun remote_doc_handler/2, {Parent, Target})
end).
@@ -350,29 +271,6 @@ fetch_doc(Source, {Id, Revs, PAs}, DocHandler, Acc) ->
end.
-local_doc_handler({ok, Doc}, {Target, DocList, Stats, Cp}) ->
- Stats2 = couch_replicator_stats:increment(docs_read, Stats),
- case batch_doc(Doc) of
- true ->
- {ok, {Target, [Doc | DocList], Stats2, Cp}};
- false ->
- couch_log:debug("Worker flushing doc with attachments", []),
- Target2 = open_db(Target),
- Success = (flush_doc(Target2, Doc) =:= ok),
- close_db(Target2),
- Stats3 = case Success of
- true ->
- couch_replicator_stats:increment(docs_written, Stats2);
- false ->
- couch_replicator_stats:increment(doc_write_failures, Stats2)
- end,
- Stats4 = maybe_report_stats(Cp, Stats3),
- {ok, {Target, DocList, Stats4, Cp}}
- end;
-local_doc_handler(_, Acc) ->
- {ok, Acc}.
-
-
remote_doc_handler({ok, #doc{atts = []} = Doc}, {Parent, _} = Acc) ->
ok = gen_server:call(Parent, {batch_doc, Doc}, infinity),
{ok, Acc};
@@ -383,9 +281,7 @@ remote_doc_handler({ok, Doc}, {Parent, Target} = Acc) ->
% convenient to call it ASAP to avoid ibrowse inactivity timeouts.
Stats = couch_replicator_stats:new([{docs_read, 1}]),
couch_log:debug("Worker flushing doc with attachments", []),
- Target2 = open_db(Target),
- Success = (flush_doc(Target2, Doc) =:= ok),
- close_db(Target2),
+ Success = (flush_doc(Target, Doc) =:= ok),
{Result, Stats2} = case Success of
true ->
{{ok, Acc}, couch_replicator_stats:increment(docs_written, Stats)};
@@ -402,17 +298,13 @@ spawn_writer(Target, #batch{docs = DocList, size = Size}) ->
case {Target, Size > 0} of
{#httpdb{}, true} ->
couch_log:debug("Worker flushing doc batch of size ~p bytes", [Size]);
- {_Db, true} ->
- couch_log:debug("Worker flushing doc batch of ~p docs", [Size]);
_ ->
ok
end,
Parent = self(),
spawn_link(
fun() ->
- Target2 = open_db(Target),
- Stats = flush_docs(Target2, DocList),
- close_db(Target2),
+ Stats = flush_docs(Target, DocList),
ok = gen_server:call(Parent, {add_stats, Stats}, infinity)
end).
@@ -462,17 +354,6 @@ maybe_flush_docs(#httpdb{} = Target, Batch, Doc) ->
Stats = couch_replicator_stats:new(),
{#batch{docs = [JsonDoc | DocAcc], size = SizeAcc2}, Stats}
end
- end;
-
-maybe_flush_docs(Target, #batch{docs = DocAcc, size = SizeAcc}, Doc) ->
- case SizeAcc + 1 of
- SizeAcc2 when SizeAcc2 >= ?DOC_BUFFER_LEN ->
- couch_log:debug("Worker flushing doc batch of ~p docs", [SizeAcc2]),
- Stats = flush_docs(Target, [Doc | DocAcc]),
- {#batch{}, Stats};
- SizeAcc2 ->
- Stats = couch_replicator_stats:new(),
- {#batch{docs = [Doc | DocAcc], size = SizeAcc2}, Stats}
end.
diff --git a/src/couch_replicator/test/couch_replicator_attachments_too_large.erl b/src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl
index 7fe84d2d9..ac4bb84f3 100644
--- a/src/couch_replicator/test/couch_replicator_attachments_too_large.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl
@@ -33,7 +33,7 @@ teardown(_, {Ctx, {Source, Target}}) ->
attachment_too_large_replication_test_() ->
- Pairs = [{local, remote}, {remote, local}, {remote, remote}],
+ Pairs = [{remote, remote}],
{
"Attachment size too large replication tests",
{
@@ -96,8 +96,6 @@ delete_db(DbName) ->
ok = couch_server:delete(DbName, [?ADMIN_CTX]).
-db_url(local, DbName) ->
- DbName;
db_url(remote, DbName) ->
Addr = config:get("httpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(couch_httpd, port),
diff --git a/src/couch_replicator/test/couch_replicator_compact_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl
index 7cc530c19..eb3fc82c5 100644
--- a/src/couch_replicator/test/couch_replicator_compact_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl
@@ -33,8 +33,6 @@ setup() ->
ok = couch_db:close(Db),
DbName.
-setup(local) ->
- setup();
setup(remote) ->
{remote, setup()};
setup({A, B}) ->
@@ -56,8 +54,7 @@ teardown(_, {Ctx, {Source, Target}}) ->
ok = test_util:stop_couch(Ctx).
compact_test_() ->
- Pairs = [{local, local}, {local, remote},
- {remote, local}, {remote, remote}],
+ Pairs = [{remote, remote}],
{
"Compaction during replication tests",
{
diff --git a/src/couch_replicator/test/couch_replicator_connection_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_connection_tests.erl
index e75cc5a63..e75cc5a63 100644
--- a/src/couch_replicator/test/couch_replicator_connection_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_connection_tests.erl
diff --git a/src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl
index 63310d39e..63310d39e 100644
--- a/src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl
diff --git a/src/couch_replicator/test/couch_replicator_filtered_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl
index d34e9f020..70b25a31b 100644
--- a/src/couch_replicator/test/couch_replicator_filtered_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl
@@ -60,8 +60,7 @@ teardown(_, {Ctx, {Source, Target}}) ->
ok = test_util:stop_couch(Ctx).
filtered_replication_test_() ->
- Pairs = [{local, local}, {local, remote},
- {remote, local}, {remote, remote}],
+ Pairs = [{remote, remote}],
{
"Filtered replication tests",
{
@@ -72,8 +71,7 @@ filtered_replication_test_() ->
}.
query_filtered_replication_test_() ->
- Pairs = [{local, local}, {local, remote},
- {remote, local}, {remote, remote}],
+ Pairs = [{remote, remote}],
{
"Filtered with query replication tests",
{
@@ -84,7 +82,7 @@ query_filtered_replication_test_() ->
}.
view_filtered_replication_test_() ->
- Pairs = [{local, local}],
+ Pairs = [{remote, remote}],
{
"Filtered with a view replication tests",
{
@@ -236,8 +234,6 @@ create_docs(DbName) ->
delete_db(DbName) ->
ok = couch_server:delete(DbName, [?ADMIN_CTX]).
-db_url(local, DbName) ->
- DbName;
db_url(remote, DbName) ->
Addr = config:get("httpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(couch_httpd, port),
diff --git a/src/couch_replicator/test/couch_replicator_httpc_pool_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_httpc_pool_tests.erl
index c4ad4e9b6..c4ad4e9b6 100644
--- a/src/couch_replicator/test/couch_replicator_httpc_pool_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_httpc_pool_tests.erl
diff --git a/src/couch_replicator/test/couch_replicator_id_too_long_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl
index 70eda0566..1447acfa7 100644
--- a/src/couch_replicator/test/couch_replicator_id_too_long_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl
@@ -33,8 +33,7 @@ teardown(_, {Ctx, {Source, Target}}) ->
id_too_long_replication_test_() ->
- Pairs = [{local, local}, {local, remote},
- {remote, local}, {remote, remote}],
+ Pairs = [{remote, remote}],
{
"Doc id too long tests",
{
@@ -86,8 +85,6 @@ delete_db(DbName) ->
ok = couch_server:delete(DbName, [?ADMIN_CTX]).
-db_url(local, DbName) ->
- DbName;
db_url(remote, DbName) ->
Addr = config:get("httpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(couch_httpd, port),
diff --git a/src/couch_replicator/test/couch_replicator_large_atts_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl
index b9adf5c4b..27c89a0cd 100644
--- a/src/couch_replicator/test/couch_replicator_large_atts_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl
@@ -33,8 +33,6 @@ setup() ->
ok = couch_db:close(Db),
DbName.
-setup(local) ->
- setup();
setup(remote) ->
{remote, setup()};
setup({A, B}) ->
@@ -58,8 +56,7 @@ teardown(_, {Ctx, {Source, Target}}) ->
ok = test_util:stop_couch(Ctx).
large_atts_test_() ->
- Pairs = [{local, local}, {local, remote},
- {remote, local}, {remote, remote}],
+ Pairs = [{remote, remote}],
{
"Replicate docs with large attachments",
{
diff --git a/src/couch_replicator/test/couch_replicator_many_leaves_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl
index eee5b1647..be1bfa344 100644
--- a/src/couch_replicator/test/couch_replicator_many_leaves_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl
@@ -37,8 +37,7 @@ setup() ->
ok = couch_db:close(Db),
DbName.
-setup(local) ->
- setup();
+
setup(remote) ->
{remote, setup()};
setup({A, B}) ->
@@ -60,8 +59,7 @@ teardown(_, {Ctx, {Source, Target}}) ->
ok = test_util:stop_couch(Ctx).
docs_with_many_leaves_test_() ->
- Pairs = [{local, local}, {local, remote},
- {remote, local}, {remote, remote}],
+ Pairs = [{remote, remote}],
{
"Replicate documents with many leaves",
{
diff --git a/src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl
index c1681781f..ff08b5ee5 100644
--- a/src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl
@@ -30,8 +30,6 @@ setup() ->
ok = couch_db:close(Db),
DbName.
-setup(local) ->
- setup();
setup(remote) ->
{remote, setup()};
setup({A, B}) ->
@@ -53,8 +51,7 @@ teardown(_, {Ctx, {Source, Target}}) ->
ok = test_util:stop_couch(Ctx).
missing_stubs_test_() ->
- Pairs = [{local, local}, {local, remote},
- {remote, local}, {remote, remote}],
+ Pairs = [{remote, remote}],
{
"Replicate docs with missing stubs (COUCHDB-1365)",
{
diff --git a/src/couch_replicator/test/couch_replicator_proxy_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_proxy_tests.erl
index 4f545bcb5..4f545bcb5 100644
--- a/src/couch_replicator/test/couch_replicator_proxy_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_proxy_tests.erl
diff --git a/src/couch_replicator/test/couch_replicator_rate_limiter_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_rate_limiter_tests.erl
index 034550aec..034550aec 100644
--- a/src/couch_replicator/test/couch_replicator_rate_limiter_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_rate_limiter_tests.erl
diff --git a/src/couch_replicator/test/couch_replicator_retain_stats_between_job_runs.erl b/src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl
index 3b7377b78..3b7377b78 100644
--- a/src/couch_replicator/test/couch_replicator_retain_stats_between_job_runs.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl
diff --git a/src/couch_replicator/test/couch_replicator_selector_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl
index a7f4c5df3..7d92bdcb1 100644
--- a/src/couch_replicator/test/couch_replicator_selector_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl
@@ -31,8 +31,7 @@ teardown(_, {Ctx, {Source, Target}}) ->
ok = test_util:stop_couch(Ctx).
selector_replication_test_() ->
- Pairs = [{local, local}, {local, remote},
- {remote, local}, {remote, remote}],
+ Pairs = [{remote, remote}],
{
"Selector filtered replication tests",
{
@@ -113,8 +112,6 @@ create_docs(DbName) ->
delete_db(DbName) ->
ok = couch_server:delete(DbName, [?ADMIN_CTX]).
-db_url(local, DbName) ->
- DbName;
db_url(remote, DbName) ->
Addr = config:get("httpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(couch_httpd, port),
diff --git a/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl b/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl
index af3a285f5..8aebbe151 100644
--- a/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl
@@ -19,9 +19,6 @@ setup() ->
DbName.
-setup(local) ->
- setup();
-
setup(remote) ->
{remote, setup()};
@@ -47,7 +44,7 @@ teardown(_, {Ctx, {Source, Target}}) ->
reduce_max_request_size_test_() ->
- Pairs = [{local, remote}, {remote, remote}],
+ Pairs = [{remote, remote}],
{
"Replicate docs when target has a small max_http_request_size",
{
diff --git a/src/couch_replicator/test/couch_replicator_test_helper.erl b/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl
index fd0409164..fd0409164 100644
--- a/src/couch_replicator/test/couch_replicator_test_helper.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl
diff --git a/src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl
index c2fcf8bf1..8e4a21dbb 100644
--- a/src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl
@@ -51,8 +51,6 @@ setup() ->
ok = couch_db:close(Db),
DbName.
-setup(local) ->
- setup();
setup(remote) ->
{remote, setup()};
setup({_, Fun, {A, B}}) ->
@@ -88,8 +86,7 @@ use_checkpoints_test_() ->
}.
use_checkpoints_tests(UseCheckpoints, Fun) ->
- Pairs = [{local, local}, {local, remote},
- {remote, local}, {remote, remote}],
+ Pairs = [{remote, remote}],
{
"use_checkpoints: " ++ atom_to_list(UseCheckpoints),
{
diff --git a/src/ddoc_cache/test/ddoc_cache_basic_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl
index b576d88bb..b576d88bb 100644
--- a/src/ddoc_cache/test/ddoc_cache_basic_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl
diff --git a/src/ddoc_cache/test/ddoc_cache_coverage_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl
index b1a185bdc..b1a185bdc 100644
--- a/src/ddoc_cache/test/ddoc_cache_coverage_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl
diff --git a/src/ddoc_cache/test/ddoc_cache_disabled_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl
index d46bdde32..d46bdde32 100644
--- a/src/ddoc_cache/test/ddoc_cache_disabled_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl
diff --git a/src/ddoc_cache/test/ddoc_cache_entry_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl
index c992bea8d..c992bea8d 100644
--- a/src/ddoc_cache/test/ddoc_cache_entry_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl
diff --git a/src/ddoc_cache/test/ddoc_cache_ev.erl b/src/ddoc_cache/test/eunit/ddoc_cache_ev.erl
index a451342cf..a451342cf 100644
--- a/src/ddoc_cache/test/ddoc_cache_ev.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_ev.erl
diff --git a/src/ddoc_cache/test/ddoc_cache_eviction_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl
index bd61afc37..bd61afc37 100644
--- a/src/ddoc_cache/test/ddoc_cache_eviction_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl
diff --git a/src/ddoc_cache/test/ddoc_cache_lru_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl
index e37f1c090..9a5391587 100644
--- a/src/ddoc_cache/test/ddoc_cache_lru_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl
@@ -47,14 +47,17 @@ stop_couch(Ctx) ->
check_not_started_test() ->
% Starting couch, but not ddoc_cache
- Ctx = test_util:start_couch(),
- try
- Key = {ddoc_cache_entry_custom, {<<"dbname">>, ?MODULE}},
- ?assertEqual({ok, <<"dbname">>}, ddoc_cache_lru:open(Key))
- after
- test_util:stop_couch(Ctx)
- end.
-
+ {
+ setup,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
+ [
+ fun(_) ->
+ Key = {ddoc_cache_entry_custom, {<<"dbname">>, ?MODULE}},
+ ?assertEqual({ok, <<"dbname">>}, ddoc_cache_lru:open(Key))
+ end
+ ]
+ }.
check_lru_test_() ->
{
diff --git a/src/ddoc_cache/test/ddoc_cache_no_cache_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl
index 637a6e872..a1937a088 100644
--- a/src/ddoc_cache/test/ddoc_cache_no_cache_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl
@@ -35,45 +35,43 @@ return_error(_DDocId) ->
{error, timeout}.
-start(Resp) ->
- Ctx = ddoc_cache_tutil:start_couch(),
+no_cache_test_() ->
+ {
+ "ddoc_cache no cache test",
+ {
+ setup,
+ fun ddoc_cache_tutil:start_couch/0, fun ddoc_cache_tutil:stop_couch/1,
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [
+ {fun ddoc/1, fun no_cache_open_ok_test/2},
+ {fun not_found/1, fun no_cache_open_not_found_test/2},
+ {fun return_error/1, fun no_cache_open_error_test/2}
+ ]
+ }
+ }
+ }.
+
+setup(Resp) ->
meck:new(fabric),
meck:expect(fabric, open_doc, fun(_, DDocId, _) ->
Resp(DDocId)
- end),
- Ctx.
-
-
-stop(Ctx) ->
- meck:unload(),
- ddoc_cache_tutil:stop_couch(Ctx).
-
-
-no_cache_open_ok_test() ->
- Ctx = start(fun ddoc/1),
- try
- Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>),
- ?assertEqual(ddoc(<<"bar">>), Resp)
- after
- stop(Ctx)
- end.
-
-
-no_cache_open_not_found_test() ->
- Ctx = start(fun not_found/1),
- try
- Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>),
- ?assertEqual(not_found(<<"bar">>), Resp)
- after
- stop(Ctx)
- end.
-
-
-no_cache_open_error_test() ->
- Ctx = start(fun return_error/1),
- try
- Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>),
- ?assertEqual(return_error(<<"bar">>), Resp)
- after
- stop(Ctx)
- end.
+ end).
+
+teardown(_, _) ->
+ meck:unload().
+
+no_cache_open_ok_test(_, _) ->
+ Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>),
+ ?_assertEqual(ddoc(<<"bar">>), Resp).
+
+
+no_cache_open_not_found_test(_, _) ->
+ Resp = ddoc_cache:open_doc(<<"foo">>, <<"baz">>),
+ ?_assertEqual(not_found(<<"baz">>), Resp).
+
+
+no_cache_open_error_test(_, _) ->
+ Resp = ddoc_cache:open_doc(<<"foo">>, <<"bif">>),
+ ?_assertEqual(return_error(<<"bif">>), Resp).
diff --git a/src/ddoc_cache/test/ddoc_cache_open_error_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl
index c7379d26a..c7379d26a 100644
--- a/src/ddoc_cache/test/ddoc_cache_open_error_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl
diff --git a/src/ddoc_cache/test/ddoc_cache_open_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl
index 73d644f71..73d644f71 100644
--- a/src/ddoc_cache/test/ddoc_cache_open_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl
diff --git a/src/ddoc_cache/test/ddoc_cache_opener_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl
index c3846360c..c3846360c 100644
--- a/src/ddoc_cache/test/ddoc_cache_opener_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl
diff --git a/src/ddoc_cache/test/ddoc_cache_refresh_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl
index 24ae346d4..24ae346d4 100644
--- a/src/ddoc_cache/test/ddoc_cache_refresh_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl
diff --git a/src/ddoc_cache/test/ddoc_cache_remove_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl
index e40518529..e40518529 100644
--- a/src/ddoc_cache/test/ddoc_cache_remove_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl
diff --git a/src/ddoc_cache/test/ddoc_cache_test.hrl b/src/ddoc_cache/test/eunit/ddoc_cache_test.hrl
index 73f7bc217..73f7bc217 100644
--- a/src/ddoc_cache/test/ddoc_cache_test.hrl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_test.hrl
diff --git a/src/ddoc_cache/test/ddoc_cache_tutil.erl b/src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl
index b34d4b163..b34d4b163 100644
--- a/src/ddoc_cache/test/ddoc_cache_tutil.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl
diff --git a/src/dreyfus/src/dreyfus_fabric_cleanup.erl b/src/dreyfus/src/dreyfus_fabric_cleanup.erl
index b5e030db0..2840a2f2d 100644
--- a/src/dreyfus/src/dreyfus_fabric_cleanup.erl
+++ b/src/dreyfus/src/dreyfus_fabric_cleanup.erl
@@ -53,12 +53,12 @@ cleanup_local_purge_doc(DbName, ActiveSigs) ->
end, [], LocalShards),
DeadDirs = DirList -- ActiveDirs,
- lists:foldl(fun(IdxDir) ->
+ lists:foreach(fun(IdxDir) ->
Sig = dreyfus_util:get_signature_from_idxdir(IdxDir),
case Sig of undefined -> ok; _ ->
DocId = dreyfus_util:get_local_purge_doc_id(Sig),
LocalShards = mem3:local_shards(DbName),
- lists:foldl(fun(LS, _AccOuter) ->
+ lists:foreach(fun(LS) ->
ShardDbName = LS#shard.name,
{ok, ShardDb} = couch_db:open_int(ShardDbName, []),
case couch_db:open_doc(ShardDb, DocId, []) of
@@ -69,6 +69,6 @@ cleanup_local_purge_doc(DbName, ActiveSigs) ->
ok
end,
couch_db:close(ShardDb)
- end, [], LocalShards)
+ end, LocalShards)
end
- end, [], DeadDirs).
+ end, DeadDirs).
diff --git a/src/dreyfus/src/dreyfus_util.erl b/src/dreyfus/src/dreyfus_util.erl
index ae3133e7d..6832299db 100644
--- a/src/dreyfus/src/dreyfus_util.erl
+++ b/src/dreyfus/src/dreyfus_util.erl
@@ -332,7 +332,10 @@ get_local_purge_doc_id(Sig) ->
get_signature_from_idxdir(IdxDir) ->
IdxDirList = filename:split(IdxDir),
Sig = lists:last(IdxDirList),
- case [Ch || Ch <- Sig, not (((Ch >= $0) and (Ch =< $9))
+ Sig2 = if not is_binary(Sig) -> Sig; true ->
+ binary_to_list(Sig)
+ end,
+ case [Ch || Ch <- Sig2, not (((Ch >= $0) and (Ch =< $9))
orelse ((Ch >= $a) and (Ch =< $f))
orelse ((Ch >= $A) and (Ch =< $F)))] == [] of
true -> Sig;
diff --git a/src/dreyfus/test/elixir/test/partition_search_test.exs b/src/dreyfus/test/elixir/test/partition_search_test.exs
index 052a41ad1..4400d7b7f 100644
--- a/src/dreyfus/test/elixir/test/partition_search_test.exs
+++ b/src/dreyfus/test/elixir/test/partition_search_test.exs
@@ -26,12 +26,12 @@ defmodule PartitionSearchTest do
end
def create_ddoc(db_name, opts \\ %{}) do
- indexFn = "function(doc) {\n if (doc.some) {\n index('some', doc.some);\n }\n}"
+ index_fn = "function(doc) {\n if (doc.some) {\n index('some', doc.some);\n }\n}"
default_ddoc = %{
indexes: %{
books: %{
analyzer: %{name: "standard"},
- index: indexFn
+ index: index_fn
}
}
}
diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl
index 6d04184e6..d98ffc978 100644
--- a/src/fabric/src/fabric.erl
+++ b/src/fabric/src/fabric.erl
@@ -36,7 +36,8 @@
% miscellany
-export([design_docs/1, reset_validation_funs/1, cleanup_index_files/0,
- cleanup_index_files/1, cleanup_index_files_all_nodes/1, dbname/1]).
+ cleanup_index_files/1, cleanup_index_files_all_nodes/1, dbname/1,
+ inactive_index_files/1]).
-include_lib("fabric/include/fabric.hrl").
@@ -503,26 +504,30 @@ cleanup_index_files() ->
%% @doc clean up index files for a specific db
-spec cleanup_index_files(dbname()) -> ok.
cleanup_index_files(DbName) ->
+ lists:foreach(fun(File) ->
+ file:delete(File)
+ end, inactive_index_files(DbName)).
+
+%% @doc inactive index files for a specific db
+-spec inactive_index_files(dbname()) -> ok.
+inactive_index_files(DbName) ->
{ok, DesignDocs} = fabric:design_docs(DbName),
- ActiveSigs = lists:map(fun(#doc{id = GroupId}) ->
+ ActiveSigs = maps:from_list(lists:map(fun(#doc{id = GroupId}) ->
{ok, Info} = fabric:get_view_group_info(DbName, GroupId),
- binary_to_list(couch_util:get_value(signature, Info))
- end, [couch_doc:from_json_obj(DD) || DD <- DesignDocs]),
+ {binary_to_list(couch_util:get_value(signature, Info)), nil}
+ end, [couch_doc:from_json_obj(DD) || DD <- DesignDocs])),
FileList = lists:flatmap(fun(#shard{name = ShardName}) ->
IndexDir = couch_index_util:index_dir(mrview, ShardName),
filelib:wildcard([IndexDir, "/*"])
end, mem3:local_shards(dbname(DbName))),
- DeleteFiles = if ActiveSigs =:= [] -> FileList; true ->
- {ok, RegExp} = re:compile([$(, string:join(ActiveSigs, "|"), $)]),
+ if ActiveSigs =:= [] -> FileList; true ->
lists:filter(fun(FilePath) ->
- re:run(FilePath, RegExp, [{capture, none}]) == nomatch
+ not maps:is_key(filename:basename(FilePath, ".view"), ActiveSigs)
end, FileList)
- end,
- [file:delete(File) || File <- DeleteFiles],
- ok.
+ end.
%% @doc clean up index files for a specific db on all nodes
-spec cleanup_index_files_all_nodes(dbname()) -> [reference()].
diff --git a/src/fabric/src/fabric_view_all_docs.erl b/src/fabric/src/fabric_view_all_docs.erl
index 1d87e3ddd..5fe143731 100644
--- a/src/fabric/src/fabric_view_all_docs.erl
+++ b/src/fabric/src/fabric_view_all_docs.erl
@@ -104,10 +104,15 @@ go(DbName, Options, QueryArgs, Callback, Acc0) ->
[{total, TotalRows}, {offset, null}, {update_seq, null}]
end,
{ok, Acc1} = Callback({meta, Meta}, Acc0),
- {ok, Acc2} = doc_receive_loop(
+ Resp = doc_receive_loop(
Keys3, queue:new(), SpawnFun, MaxJobs, Callback, Acc1
),
- Callback(complete, Acc2);
+ case Resp of
+ {ok, Acc2} ->
+ Callback(complete, Acc2);
+ timeout ->
+ Callback(timeout, Acc0)
+ end;
{'DOWN', Ref, _, _, Error} ->
Callback({error, Error}, Acc0)
after Timeout ->
diff --git a/src/fabric/test/fabric_rpc_purge_tests.erl b/src/fabric/test/eunit/fabric_rpc_purge_tests.erl
index 4eafb2bc4..4eafb2bc4 100644
--- a/src/fabric/test/fabric_rpc_purge_tests.erl
+++ b/src/fabric/test/eunit/fabric_rpc_purge_tests.erl
diff --git a/src/global_changes/test/global_changes_hooks_tests.erl b/src/global_changes/test/eunit/global_changes_hooks_tests.erl
index 23fa2c87f..23fa2c87f 100644
--- a/src/global_changes/test/global_changes_hooks_tests.erl
+++ b/src/global_changes/test/eunit/global_changes_hooks_tests.erl
diff --git a/src/mango/src/mango_error.erl b/src/mango/src/mango_error.erl
index 2f22552c9..bb545ad67 100644
--- a/src/mango/src/mango_error.erl
+++ b/src/mango/src/mango_error.erl
@@ -69,7 +69,7 @@ info(mango_cursor_text, {text_search_error, {error, {bad_request, Msg}}})
};
info(mango_cursor_text, {text_search_error, {error, Error}}) ->
{
- 400,
+ 500,
<<"text_search_error">>,
fmt("~p", [Error])
};
diff --git a/src/mem3/src/mem3_sync_event_listener.erl b/src/mem3/src/mem3_sync_event_listener.erl
index d7f745137..69a7a6017 100644
--- a/src/mem3/src/mem3_sync_event_listener.erl
+++ b/src/mem3/src/mem3_sync_event_listener.erl
@@ -258,7 +258,7 @@ subscribe_for_config_test_() ->
should_set_sync_delay(Pid) ->
?_test(begin
config:set("mem3", "sync_delay", "123", false),
- wait_state_delay(Pid, 123),
+ wait_state(Pid, #state.delay, 123),
?assertMatch(#state{delay = 123}, get_state(Pid)),
ok
end).
@@ -266,7 +266,7 @@ should_set_sync_delay(Pid) ->
should_set_sync_frequency(Pid) ->
?_test(begin
config:set("mem3", "sync_frequency", "456", false),
- wait_state_frequency(Pid, 456),
+ wait_state(Pid, #state.frequency, 456),
?assertMatch(#state{frequency = 456}, get_state(Pid)),
ok
end).
@@ -301,30 +301,18 @@ get_state(Pid) ->
Pid ! {get_state, Ref, self()},
receive
{Ref, State} -> State
- after 10 ->
+ after 500 ->
timeout
end.
-wait_state_frequency(Pid, Val) ->
+wait_state(Pid, Field, Val) when is_pid(Pid), is_integer(Field) ->
WaitFun = fun() ->
case get_state(Pid) of
- timeout ->
- wait;
- #state{frequency = Val} ->
- true
- end
- end,
- test_util:wait(WaitFun).
-
-
-wait_state_delay(Pid, Val) ->
- WaitFun = fun() ->
- case get_state(Pid) of
- timeout ->
- wait;
- #state{delay = Val} ->
- true
+ #state{} = S when element(Field, S) == Val ->
+ true;
+ _ ->
+ wait
end
end,
test_util:wait(WaitFun).
diff --git a/src/mem3/src/mem3_util.erl b/src/mem3/src/mem3_util.erl
index e8cba5d7b..3fc9b4f8e 100644
--- a/src/mem3/src/mem3_util.erl
+++ b/src/mem3/src/mem3_util.erl
@@ -221,11 +221,25 @@ get_engine_opt(DocProps) ->
get_props_opt(DocProps) ->
case couch_util:get_value(<<"props">>, DocProps) of
{Props} when is_list(Props) ->
- [{props, Props}];
+ [{props, db_props_from_json(Props)}];
_ ->
[]
end.
+db_props_from_json([]) ->
+ [];
+
+db_props_from_json([{<<"partitioned">>, Value} | Rest]) ->
+ [{partitioned, Value} | db_props_from_json(Rest)];
+
+db_props_from_json([{<<"hash">>, [MBin, FBin, A]} | Rest]) ->
+ M = binary_to_existing_atom(MBin, utf8),
+ F = binary_to_existing_atom(FBin, utf8),
+ [{hash, [M, F, A]} | db_props_from_json(Rest)];
+
+db_props_from_json([{K, V} | Rest]) ->
+ [{K, V} | db_props_from_json(Rest)].
+
n_val(undefined, NodeCount) ->
n_val(config:get("cluster", "n", "3"), NodeCount);
n_val(N, NodeCount) when is_list(N) ->
diff --git a/src/mem3/test/mem3_cluster_test.erl b/src/mem3/test/eunit/mem3_cluster_test.erl
index 4610d64bd..4610d64bd 100644
--- a/src/mem3/test/mem3_cluster_test.erl
+++ b/src/mem3/test/eunit/mem3_cluster_test.erl
diff --git a/src/mem3/test/mem3_hash_test.erl b/src/mem3/test/eunit/mem3_hash_test.erl
index 7a40c5366..7a40c5366 100644
--- a/src/mem3/test/mem3_hash_test.erl
+++ b/src/mem3/test/eunit/mem3_hash_test.erl
diff --git a/src/mem3/test/mem3_rep_test.erl b/src/mem3/test/eunit/mem3_rep_test.erl
index 4a46e7b93..4a46e7b93 100644
--- a/src/mem3/test/mem3_rep_test.erl
+++ b/src/mem3/test/eunit/mem3_rep_test.erl
diff --git a/src/mem3/test/mem3_reshard_api_test.erl b/src/mem3/test/eunit/mem3_reshard_api_test.erl
index c4df24ad3..c4df24ad3 100644
--- a/src/mem3/test/mem3_reshard_api_test.erl
+++ b/src/mem3/test/eunit/mem3_reshard_api_test.erl
diff --git a/src/mem3/test/mem3_reshard_changes_feed_test.erl b/src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl
index 4b9e2a34a..4b9e2a34a 100644
--- a/src/mem3/test/mem3_reshard_changes_feed_test.erl
+++ b/src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl
diff --git a/src/mem3/test/mem3_reshard_test.erl b/src/mem3/test/eunit/mem3_reshard_test.erl
index ab6202115..ab6202115 100644
--- a/src/mem3/test/mem3_reshard_test.erl
+++ b/src/mem3/test/eunit/mem3_reshard_test.erl
diff --git a/src/mem3/test/mem3_ring_prop_tests.erl b/src/mem3/test/eunit/mem3_ring_prop_tests.erl
index 9f4f86f5f..9f4f86f5f 100644
--- a/src/mem3/test/mem3_ring_prop_tests.erl
+++ b/src/mem3/test/eunit/mem3_ring_prop_tests.erl
diff --git a/src/mem3/test/mem3_seeds_test.erl b/src/mem3/test/eunit/mem3_seeds_test.erl
index ba83b66be..ba83b66be 100644
--- a/src/mem3/test/mem3_seeds_test.erl
+++ b/src/mem3/test/eunit/mem3_seeds_test.erl
diff --git a/src/mem3/test/mem3_sync_security_test.erl b/src/mem3/test/eunit/mem3_sync_security_test.erl
index e67a72017..e67a72017 100644
--- a/src/mem3/test/mem3_sync_security_test.erl
+++ b/src/mem3/test/eunit/mem3_sync_security_test.erl
diff --git a/src/mem3/test/mem3_util_test.erl b/src/mem3/test/eunit/mem3_util_test.erl
index 8b74c4b2b..8b74c4b2b 100644
--- a/src/mem3/test/mem3_util_test.erl
+++ b/src/mem3/test/eunit/mem3_util_test.erl
diff --git a/test/elixir/Makefile b/test/elixir/Makefile
index bfcf017d5..67ce2b427 100644
--- a/test/elixir/Makefile
+++ b/test/elixir/Makefile
@@ -1,2 +1,4 @@
+SELF_DIR := $(dir $(lastword $(MAKEFILE_LIST)))
+
all:
- mix test --trace
+ make -C ${SELF_DIR}../.. elixir
diff --git a/test/elixir/README.md b/test/elixir/README.md
index a59b4df90..f7691ad3c 100644
--- a/test/elixir/README.md
+++ b/test/elixir/README.md
@@ -111,3 +111,146 @@ X means done, - means partially
- [ ] Port view_pagination.js
- [ ] Port view_sandboxing.js
- [ ] Port view_update_seq.js
+
+# Using ExUnit to write unit tests
+
+Elixir has a number of benefits which makes writing unit tests easier.
+For example it is trivial to do codegeneration of tests.
+Bellow we present a few use cases where code-generation is really helpful.
+
+## How to write ExUnit tests
+
+1. Create new file in test/exunit/ directory (the file name should match *_test.exs)
+2. In case it is a first file in the directory create test_helper.exs (look at src/couch/test/exunit/test_helper.exs to get an idea)
+3. define test module which does `use Couch.Test.ExUnit.Case`
+4. Define test cases in the module
+
+You can run tests either:
+- using make: `make exunit`
+- using mix: BUILDDIR=`pwd` ERL_LIBS=`pwd`/src MIX_ENV=test mix test --trace
+
+## Generating tests from spec
+
+Sometimes we have some data in structured format and want
+to generate test cases using that data. This is easy in Elixir.
+For example suppose we have following spec:
+```
+{
+ "{db_name}/_view_cleanup": {
+ "roles": ["_admin"]
+ }
+}
+```
+We can use this spec to generate test cases
+```
+defmodule GenerateTestsFromSpec do
+ use ExUnit.Case
+ require Record
+ Record.defrecordp :user_ctx, Record.extract(:user_ctx, from_lib: "couch/include/couch_db.hrl")
+ Record.defrecordp :httpd, Record.extract(:httpd, from_lib: "couch/include/couch_db.hrl")
+
+ {:ok, spec_bin} = File.read("roles.json")
+ spec = :jiffy.decode(spec_bin, [:return_maps])
+ Enum.each spec, fn {path, path_spec} ->
+ roles = path_spec["roles"]
+ @roles roles
+ @path_parts String.split(path, "/")
+ test "Access with `#{inspect(roles)}` roles" do
+ req = httpd(path_parts: @path_parts, user_ctx: user_ctx(roles: @roles))
+ :chttpd_auth_request.authorize_request(req)
+ end
+ end
+end
+```
+As a result we would get
+```
+GenerateTestsFromSpec
+ * test Access with `["_admin"]` roles (0.00ms)
+```
+
+## Test all possible combinations
+
+Sometimes we want to test all possible permutations for parameters.
+This can be accomplished using something like the following:
+
+```
+defmodule Permutations do
+ use ExUnit.Case
+ pairs = :couch_tests_combinatorics.product([
+ [:remote, :local], [:remote, :local]
+ ])
+ for [source, dest] <- pairs do
+ @source source
+ @dest dest
+ test "Replication #{source} -> #{dest}" do
+ assert :ok == :ok
+ end
+ end
+end
+```
+
+This would produce following tests
+```
+Permutations
+ * test Replication remote -> remote (0.00ms)
+ * test Replication local -> remote (0.00ms)
+ * test Replication remote -> local (0.00ms)
+ * test Replication local -> local (0.00ms)
+```
+
+## Reuseing of common setups
+
+The setup functions are quite similar in lots of tests therefore it makes
+sense to reuse them. The idea is to add shared setup functions into either
+- test/elixir/lib/setup/common.ex
+- test/elixir/lib/setup/<something>.ex
+
+The setup functions looks like the following:
+```
+defmodule Foo do
+ alias Couch.Test.Setup.Step
+
+ def httpd_with_admin(setup) do
+ setup
+ |> Step.Start.new(:start, extra_apps: [:chttpd])
+ |> Step.User.new(:admin, roles: [:server_admin])
+ end
+end
+```
+
+These parts of a setup chain can be invoked as follows:
+```
+defmodule Couch.Test.CRUD do
+ use Couch.Test.ExUnit.Case
+ alias Couch.Test.Utils
+
+ alias Couch.Test.Setup
+
+ alias Couch.Test.Setup.Step
+
+ def with_db(context, setup) do
+ setup =
+ setup
+ |> Setup.Common.httpd_with_db()
+ |> Setup.run()
+
+ context =
+ Map.merge(context, %{
+ db_name: setup |> Setup.get(:db) |> Step.Create.DB.name(),
+ base_url: setup |> Setup.get(:start) |> Step.Start.clustered_url(),
+ user: setup |> Setup.get(:admin) |> Step.User.name()
+ })
+
+ {context, setup}
+ end
+
+ describe "Database CRUD using Fabric API" do
+ @describetag setup: &__MODULE__.with_db/2
+ test "Create DB", ctx do
+ IO.puts("base_url: #{ctx.base_url}")
+ IO.puts("admin: #{ctx.user}")
+ IO.puts("db_name: #{ctx.db_name}")
+ end
+ end
+end
+``` \ No newline at end of file
diff --git a/test/elixir/lib/couch.ex b/test/elixir/lib/couch.ex
index 58581b2fd..6c7310d56 100644
--- a/test/elixir/lib/couch.ex
+++ b/test/elixir/lib/couch.ex
@@ -50,6 +50,14 @@ defmodule Couch do
CouchDB library to power test suite.
"""
+ # These constants are supplied to the underlying HTTP client and control
+ # how long we will wait before timing out a test. The inactivity timeout
+ # specifically fires during an active HTTP response and defaults to 10_000
+ # if not specified. We're defining it to a different value than the
+ # request_timeout largely just so we know which timeout fired.
+ @request_timeout 60_000
+ @inactivity_timeout 55_000
+
def process_url("http://" <> _ = url) do
url
end
@@ -59,7 +67,7 @@ defmodule Couch do
base_url <> url
end
- def process_request_headers(headers, options) do
+ def process_request_headers(headers, _body, options) do
headers = Keyword.put(headers, :"User-Agent", "couch-potion")
headers =
@@ -79,19 +87,10 @@ defmodule Couch do
end
def process_options(options) do
- if Keyword.get(options, :cookie) == nil do
- headers = Keyword.get(options, :headers, [])
-
- if headers[:basic_auth] != nil or headers[:authorization] != nil do
- options
- else
- username = System.get_env("EX_USERNAME") || "adm"
- password = System.get_env("EX_PASSWORD") || "pass"
- Keyword.put(options, :basic_auth, {username, password})
- end
- else
- options
- end
+ options
+ |> set_auth_options()
+ |> set_inactivity_timeout()
+ |> set_request_timeout()
end
def process_request_body(body) do
@@ -112,6 +111,33 @@ defmodule Couch do
end
end
+ def set_auth_options(options) do
+ if Keyword.get(options, :cookie) == nil do
+ headers = Keyword.get(options, :headers, [])
+
+ if headers[:basic_auth] != nil or headers[:authorization] != nil do
+ options
+ else
+ username = System.get_env("EX_USERNAME") || "adm"
+ password = System.get_env("EX_PASSWORD") || "pass"
+ Keyword.put(options, :basic_auth, {username, password})
+ end
+ else
+ options
+ end
+ end
+
+ def set_inactivity_timeout(options) do
+ Keyword.update(options, :ibrowse, [{:inactivity_timeout, @inactivity_timeout}], fn(ibrowse) ->
+ Keyword.put_new(ibrowse, :inactivity_timeout, @inactivity_timeout)
+ end)
+ end
+
+ def set_request_timeout(options) do
+ timeout = Application.get_env(:httpotion, :default_timeout, @request_timeout)
+ Keyword.put_new(options, :timeout, timeout)
+ end
+
def login(userinfo) do
[user, pass] = String.split(userinfo, ":", parts: 2)
login(user, pass)
@@ -125,105 +151,4 @@ defmodule Couch do
%Couch.Session{cookie: token}
end
- # HACK: this is here until this commit lands in a release
- # https://github.com/myfreeweb/httpotion/commit/f3fa2f0bc3b9b400573942b3ba4628b48bc3c614
- def handle_response(response) do
- case response do
- {:ok, status_code, headers, body, _} ->
- processed_headers = process_response_headers(headers)
-
- %HTTPotion.Response{
- status_code: process_status_code(status_code),
- headers: processed_headers,
- body: process_response_body(processed_headers, body)
- }
-
- {:ok, status_code, headers, body} ->
- processed_headers = process_response_headers(headers)
-
- %HTTPotion.Response{
- status_code: process_status_code(status_code),
- headers: processed_headers,
- body: process_response_body(processed_headers, body)
- }
-
- {:ibrowse_req_id, id} ->
- %HTTPotion.AsyncResponse{id: id}
-
- {:error, {:conn_failed, {:error, reason}}} ->
- %HTTPotion.ErrorResponse{message: error_to_string(reason)}
-
- {:error, :conn_failed} ->
- %HTTPotion.ErrorResponse{message: "conn_failed"}
-
- {:error, reason} ->
- %HTTPotion.ErrorResponse{message: error_to_string(reason)}
- end
- end
-
- # Anther HACK: Until we can get process_request_headers/2 merged
- # upstream.
- @spec process_arguments(atom, String.t(), [{atom(), any()}]) :: %{}
- defp process_arguments(method, url, options) do
- options = process_options(options)
-
- body = Keyword.get(options, :body, "")
-
- headers =
- Keyword.merge(
- Application.get_env(:httpotion, :default_headers, []),
- Keyword.get(options, :headers, [])
- )
-
- timeout =
- Keyword.get(
- options,
- :timeout,
- Application.get_env(:httpotion, :default_timeout, 5000)
- )
-
- ib_options =
- Keyword.merge(
- Application.get_env(:httpotion, :default_ibrowse, []),
- Keyword.get(options, :ibrowse, [])
- )
-
- follow_redirects =
- Keyword.get(
- options,
- :follow_redirects,
- Application.get_env(:httpotion, :default_follow_redirects, false)
- )
-
- ib_options =
- if stream_to = Keyword.get(options, :stream_to),
- do:
- Keyword.put(
- ib_options,
- :stream_to,
- spawn(__MODULE__, :transformer, [stream_to, method, url, options])
- ),
- else: ib_options
-
- ib_options =
- if user_password = Keyword.get(options, :basic_auth) do
- {user, password} = user_password
- Keyword.put(ib_options, :basic_auth, {to_charlist(user), to_charlist(password)})
- else
- ib_options
- end
-
- %{
- method: method,
- url: url |> to_string |> process_url(options) |> to_charlist,
- body: body |> process_request_body,
- headers:
- headers
- |> process_request_headers(options)
- |> Enum.map(fn {k, v} -> {to_charlist(k), to_charlist(v)} end),
- timeout: timeout,
- ib_options: ib_options,
- follow_redirects: follow_redirects
- }
- end
end
diff --git a/test/elixir/lib/couch/db_test.ex b/test/elixir/lib/couch/db_test.ex
index 990173a13..f98376c94 100644
--- a/test/elixir/lib/couch/db_test.ex
+++ b/test/elixir/lib/couch/db_test.ex
@@ -290,7 +290,7 @@ defmodule Couch.DBTest do
end
end
- def retry_until(condition, sleep \\ 100, timeout \\ 5000) do
+ def retry_until(condition, sleep \\ 100, timeout \\ 30_000) do
retry_until(condition, now(:ms), sleep, timeout)
end
diff --git a/test/elixir/lib/ex_unit.ex b/test/elixir/lib/ex_unit.ex
new file mode 100644
index 000000000..8503cd991
--- /dev/null
+++ b/test/elixir/lib/ex_unit.ex
@@ -0,0 +1,48 @@
+defmodule Couch.Test.ExUnit.Case do
+ @moduledoc """
+ Template for ExUnit test case. It can be used as follows:
+ ```
+ defmodule Couch.Test.CRUD do
+ use Couch.Test.ExUnit.Case
+ ...
+ def with_db(context, setup) do
+ setup = setup
+ |> Step.Start.new(:start, extra_apps: [:chttpd])
+ |> Setup.run
+ context = Map.merge(context, %{
+ base_url: setup |> Setup.get(:start) |> Step.Start.clustered_url
+ })
+ {context, setup}
+ end
+ describe "Group of tests" do
+ @describetag setup: &__MODULE__.with_db/2
+ test "Single test in a group", ctx do
+ ctx.base_url
+ end
+ ...
+ end
+ ```
+ """
+
+ use ExUnit.CaseTemplate
+ alias Couch.Test.Setup
+
+ using do
+ quote do
+ require Logger
+ use ExUnit.Case
+ end
+ end
+
+ setup context do
+ on_exit(fn ->
+ :meck.unload()
+ end)
+
+ case context do
+ %{:setup => setup_fun} ->
+ {:ok, Setup.setup(context, setup_fun)}
+ _ -> {:ok, context}
+ end
+ end
+end \ No newline at end of file
diff --git a/test/elixir/lib/setup.ex b/test/elixir/lib/setup.ex
new file mode 100644
index 000000000..037988521
--- /dev/null
+++ b/test/elixir/lib/setup.ex
@@ -0,0 +1,97 @@
+defmodule Couch.Test.Setup do
+ @moduledoc """
+ Allows to chain setup functions.
+ Example of using:
+
+ ```
+ alias Couch,Test.Utils
+ def with_db_name(context, setup) do
+ setup =
+ setup
+ |> Step.Start.new(:start, extra_apps: [:chttpd])
+ |> Step.User.new(:admin, roles: [:server_admin])
+ |> Setup.run()
+
+ context =
+ Map.merge(context, %{
+ db_name: Utils.random_name("db")
+ base_url: setup |> Setup.get(:start) |> Step.Start.clustered_url(),
+ user: setup |> Setup.get(:admin) |> Step.User.name()
+ })
+ {context, setup}
+ end
+
+ @tag setup: &__MODULE__.with_db_name/2
+ test "Create", %{db_name: db_name, user: user} do
+ ...
+ end
+ ```
+ """
+ import ExUnit.Callbacks, only: [on_exit: 1]
+ import ExUnit.Assertions, only: [assert: 2]
+ require Logger
+
+ alias Couch.Test.Setup
+ alias Couch.Test.Setup.Step
+ defstruct stages: [], by_type: %{}, state: %{}
+
+ def step(%Setup{stages: stages} = setup, id, step) do
+ %{setup | stages: [{id, step} | stages]}
+ end
+
+ defp setup_step({id, step}, %Setup{state: state, by_type: by_type} = setup) do
+ %module{} = step
+ # credo:disable-for-next-line Credo.Check.Warning.LazyLogging
+ Logger.debug("Calling 'setup/2' for '#{module}'")
+ step = module.setup(setup, step)
+ state = Map.put(state, id, step)
+ by_type = Map.update(by_type, module, [id], fn ids -> [id | ids] end)
+ on_exit(fn ->
+ # credo:disable-for-next-line Credo.Check.Warning.LazyLogging
+ Logger.debug("Calling 'teardown/3' for '#{module}'")
+ try do
+ module.teardown(setup, step)
+ :ok
+ catch
+ _ -> :ok
+ _, _ -> :ok
+ end
+ end)
+ {{id, step}, %{setup | state: state, by_type: by_type}}
+ end
+
+ def run(%Setup{stages: stages} = setup) do
+ {stages, setup} = stages
+ |> Enum.reverse
+ |> Enum.map_reduce(setup, &setup_step/2)
+ %{setup | stages: stages}
+ end
+
+ def setup(ctx) do
+ Map.get(ctx, :__setup)
+ end
+
+ def setup(ctx, setup_fun) do
+ setup = %Setup{} |> Step.Config.new(:test_config, config_file: nil)
+ {ctx, setup} = setup_fun.(ctx, setup)
+ assert not Map.has_key?(ctx, :__setup), "Key `__setup` is reserved for internal purposes"
+ Map.put(ctx, :__setup, setup)
+ end
+
+ def completed?(%Setup{by_type: by_type}, step) do
+ Map.has_key?(by_type, step)
+ end
+
+ def all_for(%Setup{by_type: by_type, state: state}, step_module) do
+ Map.take(state, by_type[step_module] || [])
+ end
+
+ def reduce_for(setup, step_module, acc, fun) do
+ Enum.reduce(all_for(setup, step_module), acc, fun)
+ end
+
+ def get(%Setup{state: state}, id) do
+ state[id]
+ end
+
+end \ No newline at end of file
diff --git a/test/elixir/lib/setup/common.ex b/test/elixir/lib/setup/common.ex
new file mode 100644
index 000000000..e81f109c9
--- /dev/null
+++ b/test/elixir/lib/setup/common.ex
@@ -0,0 +1,27 @@
+defmodule Couch.Test.Setup.Common do
+ @moduledoc """
+ A set of common setup pipelines for reuse
+
+ - httpd_with_admin - chttpd is started and new admin is created
+ - httpd_with_db - httpd_with_admin and new database is created
+ """
+ alias Couch.Test.Setup.Step
+
+ def httpd_with_admin(setup) do
+ setup
+ |> Step.Start.new(:start, extra_apps: [:chttpd])
+ |> Step.User.new(:admin, roles: [:server_admin])
+ end
+
+ def httpd_with_db(setup) do
+ setup
+ |> httpd_with_admin()
+ |> Step.Create.DB.new(:db)
+ end
+
+ def with_db(setup) do
+ setup
+ |> Step.Start.new(:start, extra_apps: [:fabric])
+ |> Step.Create.DB.new(:db)
+ end
+end \ No newline at end of file
diff --git a/test/elixir/lib/step.ex b/test/elixir/lib/step.ex
new file mode 100644
index 000000000..316d765aa
--- /dev/null
+++ b/test/elixir/lib/step.ex
@@ -0,0 +1,44 @@
+defmodule Couch.Test.Setup.Step do
+ @moduledoc """
+ A behaviour module for implementing custom setup steps for future reuse.
+
+ Every module implementing this behaviour must implement following three functions:
+ - new
+ - setup
+ - teardown
+
+ Here is an example of a custom step
+ ```
+ defmodule Couch.Test.Setup.Step.Foo do
+
+ alias Couch.Test.Setup
+
+ defstruct [:foo_data, :foo_arg]
+
+ def new(setup, id, arg: arg) do
+ setup |> Setup.step(id, %__MODULE__{foo_arg: arg})
+ end
+
+ def setup(_setup, %__MODULE__{foo_arg: arg} = step) do
+ ...
+ foo_data = ...
+ %{step | foo_data: foo_data}
+ end
+
+ def teardown(_setup, _step) do
+ end
+
+ def get_data(%__MODULE__{foo_data: data}) do
+ data
+ end
+ end
+ ```
+ """
+ @type t :: struct()
+ @callback new(setup :: %Couch.Test.Setup{}, id :: atom(), args: Keyword.t()) ::
+ %Couch.Test.Setup{}
+ @callback setup(setup :: %Couch.Test.Setup{}, step :: t()) ::
+ t()
+ @callback teardown(setup :: %Couch.Test.Setup{}, step :: t()) ::
+ any()
+end \ No newline at end of file
diff --git a/test/elixir/lib/step/config.ex b/test/elixir/lib/step/config.ex
new file mode 100644
index 000000000..9d9ac8eab
--- /dev/null
+++ b/test/elixir/lib/step/config.ex
@@ -0,0 +1,33 @@
+defmodule Couch.Test.Setup.Step.Config do
+ @moduledoc """
+ This setup reads configuration for a test run.
+ It is not supposed to be called manually.
+ """
+
+ alias Couch.Test.Setup
+
+ defstruct [:config, :config_file]
+
+ def new(setup, id, config_file: config_file) do
+ setup |> Setup.step(id, %__MODULE__{config_file: config_file})
+ end
+
+ def setup(_setup, %__MODULE__{config_file: config_file} = step) do
+ # TODO we would need to access config file here
+ %{step | config: %{
+ backdoor: %{
+ protocol: "http"
+ },
+ clustered: %{
+ protocol: "http"
+ }
+ }}
+ end
+
+ def teardown(_setup, _step) do
+ end
+
+ def get(%__MODULE__{config: config}) do
+ config
+ end
+end \ No newline at end of file
diff --git a/test/elixir/lib/step/create_db.ex b/test/elixir/lib/step/create_db.ex
new file mode 100644
index 000000000..3cca3c55a
--- /dev/null
+++ b/test/elixir/lib/step/create_db.ex
@@ -0,0 +1,53 @@
+defmodule Couch.Test.Setup.Step.Create.DB do
+ @moduledoc """
+ This setup step creates a database with given name.
+ If name is not provided random name would be used.
+
+ Example
+ setup
+ ...
+ |> Setup.Step.Create.DB.new(:db)
+ ...
+ |> Setup.run
+ ...
+
+ db_name = setup |> Setup.get(:db) |> Setup.Step.Create.DB.name
+ """
+ alias Couch.Test.Setup
+ alias Couch.Test.Setup.Step
+ alias Couch.Test.Utils
+
+ defstruct [:name]
+
+ import ExUnit.Assertions, only: [assert: 1, assert: 2]
+
+ import Utils
+
+ @admin {:user_ctx, user_ctx(roles: ["_admin"])}
+
+ def new(setup, id) do
+ new(setup, id, name: Utils.random_name("db"))
+ end
+
+ def new(setup, id, name: name) do
+ setup |> Setup.step(id, %__MODULE__{name: name})
+ end
+
+ def setup(setup, %__MODULE__{name: name} = step) do
+ assert Setup.completed?(setup, Step.Start), "Require `Start` step"
+ assert :fabric in Step.Start.apps(), "Fabric is not started"
+ res = :fabric.create_db(name, [@admin])
+ assert res in [:ok, :accepted], "Cannot create `#{name}` database"
+ step
+ end
+
+ def teardown(setup, %__MODULE__{name: name} = step) do
+ :fabric.delete_db(name, [@admin])
+ :ok
+ end
+
+ def name(%__MODULE__{name: name}) do
+ name
+ end
+
+end \ No newline at end of file
diff --git a/test/elixir/lib/step/start.ex b/test/elixir/lib/step/start.ex
new file mode 100644
index 000000000..ea7c70f5a
--- /dev/null
+++ b/test/elixir/lib/step/start.ex
@@ -0,0 +1,85 @@
+defmodule Couch.Test.Setup.Step.Start do
+ @moduledoc """
+ Step to start a set of couchdb applications. By default it starts
+ list of applications from DEFAULT_APPS macro defined in `test_util.erl`.
+ At the time of writing this list included:
+ - inets
+ - ibrowse
+ - ssl
+ - config
+ - couch_epi
+ - couch_event
+ - couch
+
+ It is possible to specify additional list of applications to start.
+
+ This setup is also maintains `clustered_url` and `backdoor_url` for future use.
+ The value for `clustered_url` could be nil if :chttpd app is not included in extra_apps.
+
+ Example
+ setup
+ |> Setup.Step.Start.new(:start, extra_apps: [:fabric, :chttpd])
+ ...
+ |> Setup.run
+ ...
+
+ started_apps = Setup.Step.Start.apps
+ clustered_url = setup |> Setup.get(:start) |> Setup.Step.Start.clustered_url
+ backdoor_url = setup |> Setup.get(:start) |> Setup.Step.Start.backdoor_url
+ """
+ alias Couch.Test.Setup
+ alias Couch.Test.Setup.Step
+
+ defstruct [:test_ctx, :extra_apps, :clustered_url, :backdoor_url]
+
+ def new(setup, id, extra_apps: extra_apps) do
+ setup |> Setup.step(id, %__MODULE__{extra_apps: extra_apps || []})
+ end
+
+ def setup(setup, %__MODULE__{extra_apps: extra_apps} = step) do
+ test_config = setup |> Setup.get(:test_config) |> Step.Config.get()
+ protocol = test_config[:backdoor][:protocol] || "http"
+ test_ctx = :test_util.start_couch(extra_apps)
+ addr = :config.get('couch_httpd', 'bind_address', '127.0.0.1')
+ port = :mochiweb_socket_server.get(:couch_httpd, :port)
+ backdoor_url = "#{protocol}://#{addr}:#{port}"
+ clustered_url =
+ if :chttpd in extra_apps do
+ protocol = test_config[:clustered][:protocol] || "http"
+ addr = :config.get('chttpd', 'bind_address', '127.0.0.1')
+ port = :mochiweb_socket_server.get(:chttpd, :port)
+ "#{protocol}://#{addr}:#{port}"
+ else
+ nil
+ end
+ %{step |
+ test_ctx: test_ctx,
+ clustered_url: clustered_url,
+ backdoor_url: backdoor_url
+ }
+ end
+
+ def teardown(_setup, %___MODULE__{test_ctx: test_ctx}) do
+ :test_util.stop_couch(test_ctx)
+ end
+
+ def backdoor_url(%__MODULE__{backdoor_url: url}) do
+ url
+ end
+
+ def clustered_url(%__MODULE__{clustered_url: url}) do
+ url
+ end
+
+ def extra_apps(%__MODULE__{extra_apps: apps}) do
+ apps
+ end
+
+ @doc """
+ Returns list of currently running applications
+ """
+ def apps() do
+ for {x, _, _} <- Application.started_applications, do: x
+ end
+
+end \ No newline at end of file
diff --git a/test/elixir/lib/step/user.ex b/test/elixir/lib/step/user.ex
new file mode 100644
index 000000000..5a1cab33c
--- /dev/null
+++ b/test/elixir/lib/step/user.ex
@@ -0,0 +1,104 @@
+defmodule Couch.Test.Setup.Step.User do
+ @moduledoc """
+ Step to create user with given list of roles.
+ The :server_admin is a special role which is used to put user
+ into `admins` section of a config instead of a database.
+
+ Example
+ setup
+ |> Setup.Step.User.new(:admin, roles: [:server_admin])
+ ...
+ |> Setup.run
+ ...
+
+ user = setup |> Setup.get(:admin) |> Step.User.name()
+ """
+
+ alias Couch.Test.Setup
+ alias Couch.Test.Setup.Step
+ alias Couch.Test.Utils
+
+ import ExUnit.Callbacks, only: [on_exit: 1]
+
+ defstruct [:roles, :name, :password, :users_db]
+
+ import ExUnit.Assertions, only: [assert: 1, assert: 2]
+
+ import Utils
+
+ @admin {:user_ctx, user_ctx(roles: ["_admin"])}
+
+ def new(setup, id, roles: roles) do
+ setup |> Setup.step(id, %__MODULE__{roles: roles || []})
+ end
+
+ def setup(setup, %__MODULE__{roles: roles} = step) do
+ users_db = IO.chardata_to_string(
+ :config.get('chttpd_auth', 'authentication_db', '_users'))
+ if not Utils.db_exists?(users_db) do
+ on_exit fn ->
+ :fabric.delete_db(users_db, [@admin])
+ end
+ res = :fabric.create_db(users_db, [@admin])
+ assert res in [:ok, :accepted], "Cannot create `users` database #{users_db}"
+ end
+
+ if :server_admin in roles do
+ name = Utils.random_name("admin")
+ pass = Utils.random_password()
+ :config.set(
+ 'admins', String.to_charlist(name), String.to_charlist(pass), false)
+ %{step |
+ name: name,
+ password: pass,
+ users_db: users_db
+ }
+ else
+ name = Utils.random_name("admin")
+ pass = Utils.random_password()
+ doc_id = "org.couchdb.user:#{name}"
+ user_doc = :couch_doc.from_json_obj(%{
+ _id: doc_id,
+ name: name,
+ type: "user",
+ roles: roles,
+ password: pass
+ })
+ res = :fabric.update_doc(users_db, user_doc, [@admin])
+ assert res in [:ok, :accepted], "Cannot create user document"
+ %{step |
+ name: name,
+ password: pass,
+ users_db: users_db,
+ roles: roles
+ }
+ end
+ end
+
+ def teardown(setup, %__MODULE__{name: name, users_db: users_db, roles: roles} = step) do
+ if :server_admin in roles do
+ :config.delete("admins", String.to_charlist(name), false)
+ else
+ doc_id = "org.couchdb.user:#{name}"
+ assert {:ok, doc_info(revs: [rev | _])} = :fabric.get_doc_info(users_db)
+ doc = :couch_doc.from_json_obj(%{
+ _id: doc_id,
+ _rev: rev,
+ _deleted: true
+ })
+ assert {:ok, _resp} = :fabric.update_doc(users_db, doc, [@admin])
+ end
+ :ok
+ end
+
+ def name(%__MODULE__{name: name}) do
+ name
+ end
+ def password(%__MODULE__{password: pass}) do
+ pass
+ end
+ def credentials(%__MODULE__{name: name, password: pass}) do
+ {name, pass}
+ end
+
+end \ No newline at end of file
diff --git a/test/elixir/lib/utils.ex b/test/elixir/lib/utils.ex
new file mode 100644
index 000000000..3ecf878e7
--- /dev/null
+++ b/test/elixir/lib/utils.ex
@@ -0,0 +1,61 @@
+defmodule Couch.Test.Utils do
+ require Record
+ @moduledoc "Helper functions for testing"
+ @project_root "#{__DIR__}/../../../"
+ Record.defrecord :user_ctx, Record.extract(
+ :user_ctx, from: "#{@project_root}/src/couch/include/couch_db.hrl")
+
+ Record.defrecord :doc_info, Record.extract(
+ :doc_info, from: "#{@project_root}/src/couch/include/couch_db.hrl")
+
+ def random_name(prefix) do
+ time = :erlang.monotonic_time()
+ umi = :erlang.unique_integer([:monotonic])
+ "#{prefix}-#{time}-#{umi}"
+ end
+
+ def random_password() do
+ rand_bytes = :crypto.strong_rand_bytes(16)
+ rand_bytes
+ |> :base64.encode()
+ |> String.slice(0..16)
+ end
+
+ def db_exists?(db_name) do
+ try do
+ :fabric.get_db_info(db_name)
+ catch
+ :error, :database_does_not_exist -> false
+ end
+ end
+
+ @doc """
+ In some cases we need to access record definition at compile time.
+ We cannot use Record.defrecord in such cases. This helper function
+ can be used instead. Use it as follows:
+ ```
+ defmodule Foo do
+ admin_ctx = {:user_ctx, Utils.erlang_record(
+ :user_ctx, "couch/include/couch_db.hrl", roles: ["_admin"])}
+ end
+ ```
+
+ Longer term we should wrap erlang records as it is done for user_ctx
+ see beginning of the Utils.ex. In this case we would be able to use
+ them at compile time in other modules.
+ ```
+ Record.defrecord :user_ctx, Record.extract(
+ :user_ctx, from_lib: "couch/include/couch_db.hrl")
+ ```
+ """
+ def erlang_record(name, from_lib, opts \\ []) do
+ record_info = Record.extract(name, from_lib: from_lib)
+ index = [name | Keyword.keys(record_info)] |> Enum.with_index
+ draft = [name | Keyword.values(record_info)] |> List.to_tuple
+ opts
+ |> Enum.reduce(draft, fn
+ {k, v}, acc -> put_elem(acc, index[k], v)
+ end)
+ end
+
+end \ No newline at end of file
diff --git a/test/elixir/mix.exs b/test/elixir/mix.exs
deleted file mode 100644
index f04038ef3..000000000
--- a/test/elixir/mix.exs
+++ /dev/null
@@ -1,37 +0,0 @@
-defmodule Foo.Mixfile do
- use Mix.Project
-
- def project do
- [
- app: :foo,
- version: "0.1.0",
- elixir: "~> 1.6",
- elixirc_paths: elixirc_paths(Mix.env()),
- start_permanent: Mix.env() == :prod,
- deps: deps()
- ]
- end
-
- # Run "mix help compile.app" to learn about applications.
- def application do
- [
- extra_applications: [:logger]
- ]
- end
-
- # Specifies which paths to compile per environment.
- defp elixirc_paths(:test), do: ["lib", "test/support"]
- defp elixirc_paths(_), do: ["lib"]
-
- # Run "mix help deps" to learn about dependencies.
- defp deps do
- [
- # {:dep_from_hexpm, "~> 0.3.0"},
- {:httpotion, "~> 3.0"},
- {:jiffy, "~> 0.15.2"},
- {:credo, "~> 1.0.0", only: [:dev, :test], runtime: false},
- {:junit_formatter, "~> 3.0", only: [:test]}
- # {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"},
- ]
- end
-end
diff --git a/test/elixir/run b/test/elixir/run
deleted file mode 100755
index a9c2efa4d..000000000
--- a/test/elixir/run
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash -e
-cd "$(dirname "$0")"
-mix local.hex --force
-mix local.rebar --force
-mix deps.get
-mix test --trace "$@"
diff --git a/test/elixir/test/compact_test.exs b/test/elixir/test/compact_test.exs
index 4b0a5a07a..4c6657ea9 100644
--- a/test/elixir/test/compact_test.exs
+++ b/test/elixir/test/compact_test.exs
@@ -12,6 +12,9 @@ defmodule CompactTest do
@att_name "foo.txt"
@att_plaintext "This is plain text"
+ # Need to investigate why compaction is not compacting (or compactor cannot complete)
+ # Refer:- https://github.com/apache/couchdb/pull/2127
+ @tag :pending
@tag :skip_on_jenkins
@tag :with_db
test "compaction reduces size of deleted docs", context do
diff --git a/test/elixir/test/replication_test.exs b/test/elixir/test/replication_test.exs
index e98775fbd..11687ab17 100644
--- a/test/elixir/test/replication_test.exs
+++ b/test/elixir/test/replication_test.exs
@@ -9,9 +9,6 @@ defmodule ReplicationTest do
# TODO: Parameterize these
@admin_account "adm:pass"
@db_pairs_prefixes [
- {"local-to-local", "", ""},
- {"remote-to-local", "http://127.0.0.1:15984/", ""},
- {"local-to-remote", "", "http://127.0.0.1:15984/"},
{"remote-to-remote", "http://127.0.0.1:15984/", "http://127.0.0.1:15984/"}
]
@@ -19,22 +16,11 @@ defmodule ReplicationTest do
# happens for JavaScript tests.
@moduletag config: [{"replicator", "startup_jitter", "0"}]
- @moduletag :skip_on_jenkins
-
- test "source database does not exist" do
- name = random_db_name()
- check_not_found(name <> "_src", name <> "_tgt")
- end
-
- test "source database not found with path - COUCHDB-317" do
- name = random_db_name()
- check_not_found(name <> "_src", name <> "_tgt")
- end
-
test "source database not found with host" do
name = random_db_name()
- url = "http://127.0.0.1:15984/" <> name <> "_src"
- check_not_found(url, name <> "_tgt")
+ src_url = "http://127.0.0.1:15984/" <> name <> "_src"
+ tgt_url = "http://127.0.0.1:15984/" <> name <> "_tgt"
+ check_not_found(src_url, tgt_url)
end
def check_not_found(src, tgt) do
@@ -55,7 +41,9 @@ defmodule ReplicationTest do
doc = %{"_id" => "doc1"}
[doc] = save_docs(src_db_name, [doc])
- result = replicate(src_db_name, "http://127.0.0.1:15984/" <> tgt_db_name)
+ repl_src = "http://127.0.0.1:15984/" <> src_db_name
+ repl_tgt = "http://127.0.0.1:15984/" <> tgt_db_name
+ result = replicate(repl_src, repl_tgt)
assert result["ok"]
assert is_list(result["history"])
history = Enum.at(result["history"], 0)
@@ -79,7 +67,9 @@ defmodule ReplicationTest do
[doc] = save_docs(src_db_name, [doc])
- result = replicate(src_db_name, "http://127.0.0.1:15984/" <> tgt_db_name)
+ repl_src = "http://127.0.0.1:15984/" <> src_db_name
+ repl_tgt = "http://127.0.0.1:15984/" <> tgt_db_name
+ result = replicate(repl_src, repl_tgt)
assert result["ok"]
assert is_list(result["history"])
@@ -127,7 +117,8 @@ defmodule ReplicationTest do
repl_body = %{:continuous => true, :create_target => true}
repl_src = "http://127.0.0.1:15984/" <> src_db_name
- result = replicate(repl_src, tgt_db_name, body: repl_body)
+ repl_tgt = "http://127.0.0.1:15984/" <> tgt_db_name
+ result = replicate(repl_src, repl_tgt, body: repl_body)
assert result["ok"]
assert is_binary(result["_local_id"])
@@ -167,8 +158,9 @@ defmodule ReplicationTest do
save_docs(src_db_name, make_docs(1..6))
repl_src = "http://127.0.0.1:15984/" <> src_db_name
+ repl_tgt = "http://127.0.0.1:15984/" <> tgt_db_name
repl_body = %{"continuous" => true}
- result = replicate(repl_src, tgt_db_name, body: repl_body)
+ result = replicate(repl_src, repl_tgt, body: repl_body)
assert result["ok"]
assert is_binary(result["_local_id"])
@@ -282,7 +274,9 @@ defmodule ReplicationTest do
end
end
- result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
+ repl_src = src_prefix <> src_db_name
+ repl_tgt = tgt_prefix <> tgt_db_name
+ result = replicate(repl_src, repl_tgt)
assert result["ok"]
src_info =
@@ -1757,11 +1751,11 @@ defmodule ReplicationTest do
end
def get_att1_data do
- File.read!("test/data/lorem.txt")
+ File.read!(Path.expand("data/lorem.txt", __DIR__))
end
def get_att2_data do
- File.read!("test/data/lorem_b64.txt")
+ File.read!(Path.expand("data/lorem_b64.txt", __DIR__))
end
def cmp_json(lhs, rhs) when is_map(lhs) and is_map(rhs) do
diff --git a/test/elixir/test/reshard_helpers.exs b/test/elixir/test/reshard_helpers.exs
index 52ce301df..282d98c82 100644
--- a/test/elixir/test/reshard_helpers.exs
+++ b/test/elixir/test/reshard_helpers.exs
@@ -92,7 +92,7 @@ defmodule ReshardHelpers do
end
def wait_job_removed(id) do
- retry_until(fn -> get_job(id).status_code == 404 end, 200, 10_000)
+ retry_until(fn -> get_job(id).status_code == 404 end, 200, 60_000)
end
def wait_job_completed(id) do
@@ -100,7 +100,7 @@ defmodule ReshardHelpers do
end
def wait_job_state(id, state) do
- retry_until(fn -> get_job_state(id) == state end, 200, 10_000)
+ retry_until(fn -> get_job_state(id) == state end, 200, 60_000)
end
def reset_reshard_state do
diff --git a/test/elixir/test/test_helper.exs b/test/elixir/test/test_helper.exs
index ef71bbb1b..4bf65bcf6 100644
--- a/test/elixir/test/test_helper.exs
+++ b/test/elixir/test/test_helper.exs
@@ -2,12 +2,14 @@
# and skip certain tests that fail on jenkins.
exclude =
case System.get_env("BUILD_NUMBER") !== nil do
- true -> [pending: true, skip_on_jenkins: true]
- false -> [pending: true]
+ true -> [:pending, :skip_on_jenkins]
+ false -> [:pending]
end
+current_exclude = Keyword.get(ExUnit.configuration(), :exclude, [])
+
ExUnit.configure(
- exclude: exclude,
+ exclude: Enum.uniq(exclude ++ current_exclude),
formatters: [JUnitFormatter, ExUnit.CLIFormatter]
)
diff --git a/test/javascript/tests/replication.js b/test/javascript/tests/replication.js
deleted file mode 100644
index ba586b409..000000000
--- a/test/javascript/tests/replication.js
+++ /dev/null
@@ -1,1920 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-couchTests.replication = function(debug) {
-// return console.log('TODO');
- if (debug) debugger;
-
- var host = CouchDB.host;
- // as we change names during execution, do NOT use test_suite_db or a
- // pre-computed value like ''+sourceDb.name (compute only on use)
- var sourceDb;
- var targetDb;
-
- var dbPairsPrefixes = [
- {
- source: "",
- target: ""
- },
- {
- source: CouchDB.protocol + host + "/",
- target: ""
- },
- {
- source: "",
- target: CouchDB.protocol + host + "/"
- },
- {
- source: CouchDB.protocol + host + "/",
- target: CouchDB.protocol + host + "/"
- }
- ];
-
- var att1_data = CouchDB.request("GET", "/_utils/script/test/lorem.txt");
- att1_data = att1_data.responseText;
-
- var att2_data = CouchDB.request("GET", "/_utils/script/test/lorem_b64.txt");
- att2_data = att2_data.responseText;
-
- var sourceInfo, targetInfo;
- var docs, doc, copy;
- var repResult;
- var i, j, k;
-
-
- function makeAttData(minSize) {
- var data = att1_data;
-
- while (data.length < minSize) {
- data = data + att1_data;
- }
- return data;
- }
-
-
- function runAllNodes(callback) {
- // new and fancy: clustered version: pull cluster_members and walk over all of them
- var xhr = CouchDB.request("GET", "/_membership");
- T(xhr.status === 200);
- JSON.parse(xhr.responseText).cluster_nodes.forEach(callback);
- }
-
- function runFirstNode(callback) {
- // new and fancy: clustered version: pull cluster_members and walk over all of them
- var xhr = CouchDB.request("GET", "/_membership");
- T(xhr.status === 200);
- var node = JSON.parse(xhr.responseText).cluster_nodes[0];
- return callback(node);
- }
-
- function getCompressionInfo() {
- return runFirstNode(function(node) {
- var xhr = CouchDB.request(
- "GET",
- "_node/" + node + "/_config/attachments"
- );
- T(xhr.status === 200);
- var res = JSON.parse(xhr.responseText);
- return {"level": res.compression_level, "types": res.compressible_types};
- });
- }
-
- function enableAttCompression(level, types) {
- runAllNodes(function(node) {
- var xhr = CouchDB.request(
- "PUT",
- "_node/" + node + "/_config/attachments/compression_level",
- {
- body: JSON.stringify(level),
- headers: {"X-Couch-Persist": "false"}
- }
- );
- T(xhr.status === 200);
- xhr = CouchDB.request(
- "PUT",
- "_node/" + node + "/_config/attachments/compressible_types",
- {
- body: JSON.stringify(types),
- headers: {"X-Couch-Persist": "false"}
- }
- );
- T(xhr.status === 200);
- });
- }
-
- function disableAttCompression() {
- runAllNodes(function(node) {
- var xhr = CouchDB.request(
- "PUT",
- "_node/" + node + "/_config/attachments/compression_level",
- {
- body: JSON.stringify("0"),
- headers: {"X-Couch-Persist": "false"}
- }
- );
- T(xhr.status === 200);
- });
- }
-
-
- function populateSourceDb(docs, dontRecreateDb) {
- if(dontRecreateDb !== true) {
- if(sourceDb) {
- sourceDb.deleteDb();
- }
- sourceDb = new CouchDB(get_random_db_name() + "_src",{"X-Couch-Full-Commit":"false"});
- sourceDb.createDb();
- }
- for (var i = 0; i < docs.length; i++) {
- var doc = docs[i];
- delete doc._rev;
- }
- if (docs.length > 0) {
- sourceDb.bulkSave(docs);
- }
- }
- function populateTargetDb(docs, dontRecreateDb) {
- if(dontRecreateDb !== true) {
- if(targetDb) {
- targetDb.deleteDb();
- }
- targetDb = new CouchDB(get_random_db_name() + "_tgt",{"X-Couch-Full-Commit":"false"});
- targetDb.createDb();
- }
- for (var i = 0; i < docs.length; i++) {
- var doc = docs[i];
- delete doc._rev;
- }
- if (docs.length > 0) {
- targetDb.bulkSave(docs);
- }
- }
-
-
- function addAtt(db, doc, attName, attData, type) {
- var uri = "/" + db.name + "/" + encodeURIComponent(doc._id) + "/" + attName;
-
- if (doc._rev) {
- uri += "?rev=" + doc._rev;
- }
-
- var xhr = CouchDB.request("PUT", uri, {
- headers: {
- "Content-Type": type
- },
- body: attData
- });
-
- T(xhr.status === 201);
- doc._rev = JSON.parse(xhr.responseText).rev;
- }
-
-
- function compareObjects(o1, o2) {
- for (var p in o1) {
- if (o1[p] === null && o2[p] !== null) {
- return false;
- } else if (typeof o1[p] === "object") {
- if ((typeof o2[p] !== "object") || o2[p] === null) {
- return false;
- }
- if (!arguments.callee(o1[p], o2[p])) {
- return false;
- }
- } else {
- if (o1[p] !== o2[p]) {
- return false;
- }
- }
- }
- return true;
- }
-
-
- function getTask(rep_id, delay) {
- var t0 = new Date();
- var t1;
- do {
- var xhr = CouchDB.request("GET", "/_active_tasks");
- var tasks = JSON.parse(xhr.responseText);
- for(var i = 0; i < tasks.length; i++) {
- if(tasks[i].replication_id == repResult._local_id) {
- return tasks[i];
- }
- }
- sleep(500);
- t1 = new Date();
- } while((t1 - t0) <= delay);
-
- return null;
- }
-
- function getSourceLastSeq(sourceDb) {
- return sourceDb.changes({"since":"now"}).last_seq;
- }
-
- function waitForSeq(sourceDb, targetDb, rep_id) {
- var sourceSeq = getSourceLastSeq(sourceDb),
- t0 = new Date(),
- t1,
- ms = 30000;
-
- do {
- var task = getTask(rep_id, 0);
- if(task && task["through_seq"] == sourceSeq) {
- return;
- }
- t1 = new Date();
- sleep(500);
- } while (((t1 - t0) <= ms));
- throw(Error('Timeout waiting for replication through_seq = source update seq'));
- }
-
- function waitReplicationTaskStop(rep_id) {
- var t0 = new Date(),
- t1,
- ms = 30000;
- do {
- var task = getTask(rep_id, 0);
- if(task == null) {
- return;
- }
- t1 = new Date();
- sleep(500);
- } while (((t1 - t0) <= ms));
- throw(Error('Timeout waiting for replication task stop' + rep_id));
- }
-
- // test simple replications (not continuous, not filtered), including
- // conflict creation
- docs = makeDocs(1, 21);
- docs.push({
- _id: "_design/foo",
- language: "javascript",
- value: "ddoc"
- });
-
- for (i = 0; i < dbPairsPrefixes.length; i++) {
- populateSourceDb(docs);
- populateTargetDb([]);
-
- // add some attachments
- for (j = 10; j < 15; j++) {
- addAtt(sourceDb, docs[j], "readme.txt", att1_data, "text/plain");
- }
-
- repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name);
- TEquals(true, repResult.ok);
-
- sourceInfo = sourceDb.info();
- targetInfo = targetDb.info();
-
- TEquals(sourceInfo.doc_count, targetInfo.doc_count);
-
- TEquals('string', typeof repResult.session_id);
- // we can't rely on sequences in a cluster
- //TEquals(repResult.source_last_seq, sourceInfo.update_seq);
- TEquals(true, repResult.history instanceof Array);
- TEquals(1, repResult.history.length);
- TEquals(repResult.history[0].session_id, repResult.session_id);
- TEquals('string', typeof repResult.history[0].start_time);
- TEquals('string', typeof repResult.history[0].end_time);
- TEquals(0, repResult.history[0].start_last_seq);
- // we can't rely on sequences in a cluster
- //TEquals(sourceInfo.update_seq, repResult.history[0].end_last_seq);
- //TEquals(sourceInfo.update_seq, repResult.history[0].recorded_seq);
- TEquals(sourceInfo.doc_count, repResult.history[0].missing_checked);
- TEquals(sourceInfo.doc_count, repResult.history[0].missing_found);
- TEquals(sourceInfo.doc_count, repResult.history[0].docs_read);
- TEquals(sourceInfo.doc_count, repResult.history[0].docs_written);
- TEquals(0, repResult.history[0].doc_write_failures);
-
- for (j = 0; j < docs.length; j++) {
- doc = docs[j];
- copy = targetDb.open(doc._id);
-
- T(copy !== null);
- TEquals(true, compareObjects(doc, copy));
-
- if (j >= 10 && j < 15) {
- var atts = copy._attachments;
- TEquals('object', typeof atts);
- TEquals('object', typeof atts["readme.txt"]);
- TEquals(2, atts["readme.txt"].revpos);
- TEquals(0, atts["readme.txt"].content_type.indexOf("text/plain"));
- TEquals(true, atts["readme.txt"].stub);
-
- var att_copy = CouchDB.request(
- "GET", "/" + targetDb.name + "/" + copy._id + "/readme.txt"
- ).responseText;
- TEquals(att1_data.length, att_copy.length);
- TEquals(att1_data, att_copy);
- }
- }
-
-
- // add one more doc to source, more attachments to some existing docs
- // and replicate again
- var newDoc = {
- _id: "foo666",
- value: "d"
- };
- TEquals(true, sourceDb.save(newDoc).ok);
-
- // add some more attachments
- for (j = 10; j < 15; j++) {
- addAtt(sourceDb, docs[j], "data.dat", att2_data, "application/binary");
- }
-
- repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name);
- TEquals(true, repResult.ok);
-
- sourceInfo = sourceDb.info();
- targetInfo = targetDb.info();
-
- TEquals(targetInfo.doc_count, sourceInfo.doc_count);
-
- TEquals('string', typeof repResult.session_id);
- // we can't rely on sequences in a cluster
- //TEquals(sourceInfo.update_seq, repResult.source_last_seq);
- TEquals(true, repResult.history instanceof Array);
- TEquals(2, repResult.history.length);
- TEquals(repResult.history[0].session_id, repResult.session_id);
- TEquals('string', typeof repResult.history[0].start_time);
- TEquals('string', typeof repResult.history[0].end_time);
- // we can't rely on sequences in a cluster
- //TEquals((sourceInfo.update_seq - 6), repResult.history[0].start_last_seq);
- //TEquals(sourceInfo.update_seq, repResult.history[0].end_last_seq);
- //TEquals(sourceInfo.update_seq, repResult.history[0].recorded_seq);
- TEquals(6, repResult.history[0].missing_checked);
- TEquals(6, repResult.history[0].missing_found);
- TEquals(6, repResult.history[0].docs_read);
- TEquals(6, repResult.history[0].docs_written);
- TEquals(0, repResult.history[0].doc_write_failures);
-
- copy = targetDb.open(newDoc._id);
- T(copy !== null);
- TEquals(newDoc._id, copy._id);
- TEquals(newDoc.value, copy.value);
-
- for (j = 10; j < 15; j++) {
- doc = docs[j];
- copy = targetDb.open(doc._id);
-
- T(copy !== null);
- TEquals(true, compareObjects(doc, copy));
-
- var atts = copy._attachments;
- TEquals('object', typeof atts);
- TEquals('object', typeof atts["readme.txt"]);
- TEquals(2, atts["readme.txt"].revpos);
- TEquals(0, atts["readme.txt"].content_type.indexOf("text/plain"));
- TEquals(true, atts["readme.txt"].stub);
-
- var att1_copy = CouchDB.request(
- "GET", "/" + targetDb.name + "/" + copy._id + "/readme.txt"
- ).responseText;
- TEquals(att1_data.length, att1_copy.length);
- TEquals(att1_data, att1_copy);
-
- TEquals('object', typeof atts["data.dat"]);
- TEquals(3, atts["data.dat"].revpos);
- TEquals(0, atts["data.dat"].content_type.indexOf("application/binary"));
- TEquals(true, atts["data.dat"].stub);
-
- var att2_copy = CouchDB.request(
- "GET", "/" + targetDb.name + "/" + copy._id + "/data.dat"
- ).responseText;
- TEquals(att2_data.length, att2_copy.length);
- TEquals(att2_data, att2_copy);
- }
-
- // test deletion is replicated
- doc = sourceDb.open(docs[1]._id);
- TEquals(true, sourceDb.deleteDoc(doc).ok);
-
- repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name);
- TEquals(true, repResult.ok);
-
- sourceInfo = sourceDb.info();
- targetInfo = targetDb.info();
-
- TEquals(targetInfo.doc_count, sourceInfo.doc_count);
- TEquals(targetInfo.doc_del_count, sourceInfo.doc_del_count);
- TEquals(1, targetInfo.doc_del_count);
-
- TEquals(true, repResult.history instanceof Array);
- TEquals(3, repResult.history.length);
- // we can't rely on sequences in a cluster
- //TEquals((sourceInfo.update_seq - 1), repResult.history[0].start_last_seq);
- //TEquals(sourceInfo.update_seq, repResult.history[0].end_last_seq);
- //TEquals(sourceInfo.update_seq, repResult.history[0].recorded_seq);
- TEquals(1, repResult.history[0].missing_checked);
- TEquals(1, repResult.history[0].missing_found);
- TEquals(1, repResult.history[0].docs_read);
- TEquals(1, repResult.history[0].docs_written);
- TEquals(0, repResult.history[0].doc_write_failures);
-
- copy = targetDb.open(docs[1]._id);
- TEquals(null, copy);
-
- var changes = targetDb.changes({since: 0});
- // there is no guarantee of ordering also
- // however: the doc has to appear somewhere
- //var idx = changes.results.length - 1;
- var changesResDoc1 = changes.results.filter(function(c){return c.id == docs[1]._id;});
- TEquals(1, changesResDoc1.length);
- TEquals(docs[1]._id, changesResDoc1[0].id);
- TEquals(true, changesResDoc1[0].deleted);
-
- // test conflict
- doc = sourceDb.open(docs[0]._id);
- doc.value = "white";
- TEquals(true, sourceDb.save(doc).ok);
-
- copy = targetDb.open(docs[0]._id);
- copy.value = "black";
- TEquals(true, targetDb.save(copy).ok);
-
- repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name);
- TEquals(true, repResult.ok);
-
- sourceInfo = sourceDb.info();
- targetInfo = targetDb.info();
-
- TEquals(sourceInfo.doc_count, targetInfo.doc_count);
-
- TEquals(true, repResult.history instanceof Array);
- TEquals(4, repResult.history.length);
- // we can't rely on sequences in a cluster
- //TEquals((sourceInfo.update_seq - 1), repResult.history[0].start_last_seq);
- //TEquals(sourceInfo.update_seq, repResult.history[0].end_last_seq);
- //TEquals(sourceInfo.update_seq, repResult.history[0].recorded_seq);
- TEquals(1, repResult.history[0].missing_checked);
- TEquals(1, repResult.history[0].missing_found);
- TEquals(1, repResult.history[0].docs_read);
- TEquals(1, repResult.history[0].docs_written);
- TEquals(0, repResult.history[0].doc_write_failures);
-
- copy = targetDb.open(docs[0]._id, {conflicts: true});
-
- TEquals(0, copy._rev.indexOf("2-"));
- TEquals(true, copy._conflicts instanceof Array);
- TEquals(1, copy._conflicts.length);
- TEquals(0, copy._conflicts[0].indexOf("2-"));
-
- // replicate again with conflict
- doc.value = "yellow";
- TEquals(true, sourceDb.save(doc).ok);
-
- repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name);
- TEquals(true, repResult.ok);
-
- sourceInfo = sourceDb.info();
- targetInfo = targetDb.info();
-
- TEquals(sourceInfo.doc_count, targetInfo.doc_count);
-
- TEquals(true, repResult.history instanceof Array);
- TEquals(5, repResult.history.length);
- // we can't rely on sequences in a cluster
- //TEquals((sourceInfo.update_seq - 1), repResult.history[0].start_last_seq);
- //TEquals(sourceInfo.update_seq, repResult.history[0].end_last_seq);
- //TEquals(sourceInfo.update_seq, repResult.history[0].recorded_seq);
- TEquals(1, repResult.history[0].missing_checked);
- TEquals(1, repResult.history[0].missing_found);
- TEquals(1, repResult.history[0].docs_read);
- TEquals(1, repResult.history[0].docs_written);
- TEquals(0, repResult.history[0].doc_write_failures);
-
- copy = targetDb.open(docs[0]._id, {conflicts: true});
-
- TEquals(0, copy._rev.indexOf("3-"));
- TEquals(true, copy._conflicts instanceof Array);
- TEquals(1, copy._conflicts.length);
- TEquals(0, copy._conflicts[0].indexOf("2-"));
-
- // resolve the conflict
- TEquals(true, targetDb.deleteDoc({_id: copy._id, _rev: copy._conflicts[0]}).ok);
-
- // replicate again, check there are no more conflicts
- doc.value = "rainbow";
- TEquals(true, sourceDb.save(doc).ok);
-
- repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name);
- TEquals(true, repResult.ok);
-
- sourceInfo = sourceDb.info();
- targetInfo = targetDb.info();
-
- TEquals(sourceInfo.doc_count, targetInfo.doc_count);
-
- TEquals(true, repResult.history instanceof Array);
- TEquals(6, repResult.history.length);
- // we can't rely on sequences in a cluster
- //TEquals((sourceInfo.update_seq - 1), repResult.history[0].start_last_seq);
- //TEquals(sourceInfo.update_seq, repResult.history[0].end_last_seq);
- //TEquals(sourceInfo.update_seq, repResult.history[0].recorded_seq);
- TEquals(1, repResult.history[0].missing_checked);
- TEquals(1, repResult.history[0].missing_found);
- TEquals(1, repResult.history[0].docs_read);
- TEquals(1, repResult.history[0].docs_written);
- TEquals(0, repResult.history[0].doc_write_failures);
-
- copy = targetDb.open(docs[0]._id, {conflicts: true});
-
- TEquals(0, copy._rev.indexOf("4-"));
- TEquals('undefined', typeof copy._conflicts);
-
- // test that revisions already in a target are not copied
- TEquals(true, sourceDb.save({_id: "foo1", value: 111}).ok);
- TEquals(true, targetDb.save({_id: "foo1", value: 111}).ok);
- TEquals(true, sourceDb.save({_id: "foo2", value: 222}).ok);
- TEquals(true, sourceDb.save({_id: "foo3", value: 333}).ok);
- TEquals(true, targetDb.save({_id: "foo3", value: 333}).ok);
-
- repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name);
- TEquals(true, repResult.ok);
-
- sourceInfo = sourceDb.info();
- // we can't rely on sequences in a cluster
- //TEquals(sourceInfo.update_seq, repResult.source_last_seq);
- //TEquals(sourceInfo.update_seq - 3, repResult.history[0].start_last_seq);
- //TEquals(sourceInfo.update_seq, repResult.history[0].end_last_seq);
- //TEquals(sourceInfo.update_seq, repResult.history[0].recorded_seq);
- TEquals(3, repResult.history[0].missing_checked);
- TEquals(1, repResult.history[0].missing_found);
- TEquals(1, repResult.history[0].docs_read);
- TEquals(1, repResult.history[0].docs_written);
- TEquals(0, repResult.history[0].doc_write_failures);
-
- TEquals(true, sourceDb.save({_id: "foo4", value: 444}).ok);
- TEquals(true, targetDb.save({_id: "foo4", value: 444}).ok);
- TEquals(true, sourceDb.save({_id: "foo5", value: 555}).ok);
- TEquals(true, targetDb.save({_id: "foo5", value: 555}).ok);
-
- repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name);
- TEquals(true, repResult.ok);
-
- sourceInfo = sourceDb.info();
- // we can't rely on sequences in a cluster
- //TEquals(sourceInfo.update_seq, repResult.source_last_seq);
- //TEquals(sourceInfo.update_seq - 2, repResult.history[0].start_last_seq);
- //TEquals(sourceInfo.update_seq, repResult.history[0].end_last_seq);
- //TEquals(sourceInfo.update_seq, repResult.history[0].recorded_seq);
- TEquals(2, repResult.history[0].missing_checked);
- TEquals(0, repResult.history[0].missing_found);
- TEquals(0, repResult.history[0].docs_read);
- TEquals(0, repResult.history[0].docs_written);
- TEquals(0, repResult.history[0].doc_write_failures);
-
- repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name);
- TEquals(true, repResult.ok);
- TEquals(true, repResult.no_changes);
- sourceInfo = sourceDb.info();
- // we can't rely on sequences in a cluster
- //TEquals(sourceInfo.update_seq, repResult.source_last_seq);
- }
-
-
- // test error when source database does not exist
- try {
- CouchDB.replicate("foobar", "test_suite_db");
- T(false, "should have failed with db_not_found error");
- } catch (x) {
- TEquals("db_not_found", x.error);
- }
-
- // validate COUCHDB-317
- try {
- CouchDB.replicate("/foobar", "test_suite_db");
- T(false, "should have failed with db_not_found error");
- } catch (x) {
- TEquals("db_not_found", x.error);
- }
-
- try {
- CouchDB.replicate(CouchDB.protocol + host + "/foobar", "test_suite_db");
- T(false, "should have failed with db_not_found error");
- } catch (x) {
- TEquals("db_not_found", x.error);
- }
-
-
- // test since_seq parameter
- docs = makeDocs(1, 6);
-
- for (i = 0; i < dbPairsPrefixes.length; i++) {
- populateSourceDb(docs);
- populateTargetDb([]);
- // sequences are no longer simple numbers - so pull #3 from a feed
- var since_seq = sourceDb.changes().results[2].seq;
-
- var expected_ids = [];
- var changes = sourceDb.changes({since: JSON.stringify(since_seq)});
- for (j = 0; j < changes.results.length; j++) {
- expected_ids.push(changes.results[j].id);
- }
- TEquals(2, expected_ids.length, "2 documents since since_seq");
-
- // For OTP < R14B03, temporary child specs are kept in the supervisor
- // after the child terminates, so cancel the replication to delete the
- // child spec in those OTP releases, otherwise since_seq will have no
- // effect.
- try {
- CouchDB.replicate(
- dbPairsPrefixes[i].source+sourceDb.name,
- dbPairsPrefixes[i].target+targetDb.name,
- {body: {cancel: true}}
- );
- } catch (x) {
- // OTP R14B03 onwards
- TEquals("not_found", x.error);
- }
- repResult = CouchDB.replicate(
- dbPairsPrefixes[i].source+sourceDb.name,
- dbPairsPrefixes[i].target+targetDb.name,
- {body: {since_seq: since_seq}}
- );
- // Same reason as before. But here we don't want since_seq to affect
- // subsequent replications, so we need to delete the child spec from the
- // supervisor (since_seq is not used to calculate the replication ID).
- try {
- CouchDB.replicate(
- dbPairsPrefixes[i].source+sourceDb.name,
- dbPairsPrefixes[i].target+targetDb.name,
- {body: {cancel: true}}
- );
- } catch (x) {
- // OTP R14B03 onwards
- TEquals("not_found", x.error);
- }
- TEquals(true, repResult.ok);
- TEquals(2, repResult.history[0].missing_checked);
- TEquals(2, repResult.history[0].missing_found);
- TEquals(2, repResult.history[0].docs_read);
- TEquals(2, repResult.history[0].docs_written);
- TEquals(0, repResult.history[0].doc_write_failures);
-
- for (j = 0; j < docs.length; j++) {
- doc = docs[j];
- copy = targetDb.open(doc._id);
-
- if (expected_ids.indexOf(doc._id) === -1) {
- T(copy === null);
- } else {
- T(copy !== null);
- TEquals(true, compareObjects(doc, copy));
- }
- }
- }
-
-
- // test errors due to doc validate_doc_update functions in the target endpoint
- docs = makeDocs(1, 8);
- docs[2]["_attachments"] = {
- "hello.txt": {
- "content_type": "text/plain",
- "data": "aGVsbG8gd29ybGQ=" // base64:encode("hello world")
- }
- };
- var ddoc = {
- _id: "_design/test",
- language: "javascript",
- validate_doc_update: (function(newDoc, oldDoc, userCtx, secObj) {
- if ((newDoc.integer % 2) !== 0) {
- throw {forbidden: "I only like multiples of 2."};
- }
- }).toString()
- };
-
- for (i = 0; i < dbPairsPrefixes.length; i++) {
- populateSourceDb(docs);
- populateTargetDb([ddoc]);
-
- repResult = CouchDB.replicate(
- dbPairsPrefixes[i].source+sourceDb.name,
- dbPairsPrefixes[i].target+targetDb.name
- );
- TEquals(true, repResult.ok);
- TEquals(7, repResult.history[0].missing_checked);
- TEquals(7, repResult.history[0].missing_found);
- TEquals(7, repResult.history[0].docs_read);
- TEquals(3, repResult.history[0].docs_written);
- TEquals(4, repResult.history[0].doc_write_failures);
-
- for (j = 0; j < docs.length; j++) {
- doc = docs[j];
- copy = targetDb.open(doc._id);
-
- if (doc.integer % 2 === 0) {
- T(copy !== null);
- TEquals(copy.integer, doc.integer);
- } else {
- T(copy === null);
- }
- }
- }
-
-
- // test create_target option
- docs = makeDocs(1, 2);
-
- for (i = 0; i < dbPairsPrefixes.length; i++) {
- populateSourceDb(docs);
- targetDb.deleteDb();
-
- repResult = CouchDB.replicate(
- dbPairsPrefixes[i].source+sourceDb.name,
- dbPairsPrefixes[i].target+targetDb.name,
- {body: {create_target: true}}
- );
- TEquals(true, repResult.ok);
-
- sourceInfo = sourceDb.info();
- targetInfo = targetDb.info();
-
- TEquals(sourceInfo.doc_count, targetInfo.doc_count);
- TEquals(sourceInfo.update_seq, targetInfo.update_seq);
- }
-
-
- // test filtered replication
- docs = makeDocs(1, 31);
- docs.push({
- _id: "_design/mydesign",
- language: "javascript",
- filters: {
- myfilter: (function(doc, req) {
- var modulus = Number(req.query.modulus);
- var special = req.query.special;
- return (doc.integer % modulus === 0) || (doc.string === special);
- }).toString()
- }
- });
-
- for (i = 0; i < dbPairsPrefixes.length; i++) {
- populateSourceDb(docs);
- populateTargetDb([]);
-
- repResult = CouchDB.replicate(
- dbPairsPrefixes[i].source+sourceDb.name,
- dbPairsPrefixes[i].target+targetDb.name,
- {
- body: {
- filter: "mydesign/myfilter",
- query_params: {
- modulus: "2",
- special: "7"
- }
- }
- }
- );
-
- TEquals(true, repResult.ok);
-
- for (j = 0; j < docs.length; j++) {
- doc = docs[j];
- copy = targetDb.open(doc._id);
-
- if ((doc.integer && (doc.integer % 2 === 0)) || (doc.string === "7")) {
-
- T(copy !== null);
- TEquals(true, compareObjects(doc, copy));
- } else {
- TEquals(null, copy);
- }
- }
-
- TEquals(true, repResult.history instanceof Array);
- TEquals(1, repResult.history.length);
- // We (incorrectly) don't record update sequences for things
- // that don't pass the changse feed filter. Historically the
- // last document to pass was the second to last doc which has
- // an update sequence of 30. Work that has been applied to avoid
- // conflicts from duplicate IDs breaking _bulk_docs updates added
- // a sort to the logic which changes this. Now the last document
- // to pass has an doc id of "8" and is at update_seq 29 (because only
- // "9" and the design doc are after it).
- //
- // In the future the fix ought to be that we record that update
- // sequence of the database. BigCouch has some existing work on
- // this in the clustered case because if you have very few documents
- // that pass the filter then (given single node's behavior) you end
- // up having to rescan a large portion of the database.
- // we can't rely on sequences in a cluster
- // not only can one figure appear twice (at least for n>1), there's also hashes involved now - so comparing seq==29 is lottery (= cutting off hashes is nonsense)
- // above, there was brute-force comparing all attrs of all docs - now we did check if excluded docs did NOT make it
- // in any way, we can't rely on sequences in a cluster (so leave out)
- //TEquals(29, repResult.source_last_seq);
- //TEquals(0, repResult.history[0].start_last_seq);
- //TEquals(29, repResult.history[0].end_last_seq);
- //TEquals(29, repResult.history[0].recorded_seq);
- // 16 => 15 docs with even integer field + 1 doc with string field "7"
- TEquals(16, repResult.history[0].missing_checked);
- TEquals(16, repResult.history[0].missing_found);
- TEquals(16, repResult.history[0].docs_read);
- TEquals(16, repResult.history[0].docs_written);
- TEquals(0, repResult.history[0].doc_write_failures);
-
-
- // add new docs to source and resume the same replication
- var newDocs = makeDocs(50, 56);
- populateSourceDb(newDocs, true);
-
- repResult = CouchDB.replicate(
- dbPairsPrefixes[i].source+sourceDb.name,
- dbPairsPrefixes[i].target+targetDb.name,
- {
- body: {
- filter: "mydesign/myfilter",
- query_params: {
- modulus: "2",
- special: "7"
- }
- }
- }
- );
-
- TEquals(true, repResult.ok);
-
- for (j = 0; j < newDocs.length; j++) {
- doc = newDocs[j];
- copy = targetDb.open(doc._id);
-
- if (doc.integer && (doc.integer % 2 === 0)) {
-
- T(copy !== null);
- TEquals(true, compareObjects(doc, copy));
- } else {
- TEquals(null, copy);
- }
- }
-
- // last doc has even integer field, so last replicated seq is 36
- // cluster - so no seq (ditto above)
- //TEquals(36, repResult.source_last_seq);
- TEquals(true, repResult.history instanceof Array);
- TEquals(2, repResult.history.length);
- //TEquals(29, repResult.history[0].start_last_seq);
- //TEquals(36, repResult.history[0].end_last_seq);
- //TEquals(36, repResult.history[0].recorded_seq);
- TEquals(3, repResult.history[0].missing_checked);
- TEquals(3, repResult.history[0].missing_found);
- TEquals(3, repResult.history[0].docs_read);
- TEquals(3, repResult.history[0].docs_written);
- TEquals(0, repResult.history[0].doc_write_failures);
- }
-
-
- // test filtered replication works as expected after changing the filter's
- // code (ticket COUCHDB-892)
- var filterFun1 = (function(doc, req) {
- if (doc.value < Number(req.query.maxvalue)) {
- return true;
- } else {
- return false;
- }
- }).toString();
-
- var filterFun2 = (function(doc, req) {
- return true;
- }).toString();
-
- for (i = 0; i < dbPairsPrefixes.length; i++) {
- populateTargetDb([]);
- populateSourceDb([]);
-
- TEquals(true, sourceDb.save({_id: "foo1", value: 1}).ok);
- TEquals(true, sourceDb.save({_id: "foo2", value: 2}).ok);
- TEquals(true, sourceDb.save({_id: "foo3", value: 3}).ok);
- TEquals(true, sourceDb.save({_id: "foo4", value: 4}).ok);
-
- var ddoc = {
- "_id": "_design/mydesign",
- "language": "javascript",
- "filters": {
- "myfilter": filterFun1
- }
- };
-
- TEquals(true, sourceDb.save(ddoc).ok);
-
- repResult = CouchDB.replicate(
- dbPairsPrefixes[i].source+sourceDb.name,
- dbPairsPrefixes[i].target+targetDb.name,
- {
- body: {
- filter: "mydesign/myfilter",
- query_params: {
- maxvalue: "3"
- }
- }
- }
- );
-
- TEquals(true, repResult.ok);
- TEquals(true, repResult.history instanceof Array);
- TEquals(1, repResult.history.length);
- TEquals(2, repResult.history[0].docs_written);
- TEquals(2, repResult.history[0].docs_read);
- TEquals(0, repResult.history[0].doc_write_failures);
-
- var docFoo1 = targetDb.open("foo1");
- T(docFoo1 !== null);
- TEquals(1, docFoo1.value);
-
- var docFoo2 = targetDb.open("foo2");
- T(docFoo2 !== null);
- TEquals(2, docFoo2.value);
-
- var docFoo3 = targetDb.open("foo3");
- TEquals(null, docFoo3);
-
- var docFoo4 = targetDb.open("foo4");
- TEquals(null, docFoo4);
-
- // replication should start from scratch after the filter's code changed
-
- ddoc.filters.myfilter = filterFun2;
- TEquals(true, sourceDb.save(ddoc).ok);
-
- repResult = CouchDB.replicate(
- dbPairsPrefixes[i].source+sourceDb.name,
- dbPairsPrefixes[i].target+targetDb.name,
- {
- body: {
- filter: "mydesign/myfilter",
- query_params : {
- maxvalue: "3"
- }
- }
- }
- );
-
- TEquals(true, repResult.ok);
- TEquals(true, repResult.history instanceof Array);
- TEquals(1, repResult.history.length);
- TEquals(3, repResult.history[0].docs_written);
- TEquals(3, repResult.history[0].docs_read);
- TEquals(0, repResult.history[0].doc_write_failures);
-
- docFoo1 = targetDb.open("foo1");
- T(docFoo1 !== null);
- TEquals(1, docFoo1.value);
-
- docFoo2 = targetDb.open("foo2");
- T(docFoo2 !== null);
- TEquals(2, docFoo2.value);
-
- docFoo3 = targetDb.open("foo3");
- T(docFoo3 !== null);
- TEquals(3, docFoo3.value);
-
- docFoo4 = targetDb.open("foo4");
- T(docFoo4 !== null);
- TEquals(4, docFoo4.value);
-
- T(targetDb.open("_design/mydesign") !== null);
- }
-
-
- // test replication by doc IDs
- docs = makeDocs(1, 11);
- docs.push({
- _id: "_design/foo",
- language: "javascript",
- integer: 1
- });
-
- var target_doc_ids = [
- { initial: ["1", "2", "10"], after: [], conflict_id: "2" },
- { initial: ["1", "2"], after: ["7"], conflict_id: "1" },
- { initial: ["1", "foo_666", "10"], after: ["7"], conflict_id: "10" },
- { initial: ["_design/foo", "8"], after: ["foo_5"], conflict_id: "8" },
- { initial: ["_design%2Ffoo", "8"], after: ["foo_5"], conflict_id: "8" },
- { initial: [], after: ["foo_1000", "_design/foo", "1"], conflict_id: "1" }
- ];
- var doc_ids, after_doc_ids;
- var id, num_inexistent_docs, after_num_inexistent_docs;
- var total, after_total;
-
- for (i = 0; i < dbPairsPrefixes.length; i++) {
-
- for (j = 0; j < target_doc_ids.length; j++) {
- doc_ids = target_doc_ids[j].initial;
- num_inexistent_docs = 0;
-
- for (k = 0; k < doc_ids.length; k++) {
- id = doc_ids[k];
- if (id.indexOf("foo_") === 0) {
- num_inexistent_docs += 1;
- }
- }
-
- populateSourceDb(docs);
- populateTargetDb([]);
-
- repResult = CouchDB.replicate(
- dbPairsPrefixes[i].source+sourceDb.name,
- dbPairsPrefixes[i].target+targetDb.name,
- {
- body: {
- doc_ids: doc_ids
- }
- }
- );
-
- total = doc_ids.length - num_inexistent_docs;
- TEquals(true, repResult.ok);
- if (total === 0) {
- TEquals(true, repResult.no_changes);
- } else {
- TEquals('string', typeof repResult.start_time);
- TEquals('string', typeof repResult.end_time);
- TEquals(total, repResult.docs_read);
- TEquals(total, repResult.docs_written);
- TEquals(0, repResult.doc_write_failures);
- }
-
- for (k = 0; k < doc_ids.length; k++) {
- id = decodeURIComponent(doc_ids[k]);
- doc = sourceDb.open(id);
- copy = targetDb.open(id);
-
- if (id.indexOf("foo_") === 0) {
- TEquals(null, doc);
- TEquals(null, copy);
- } else {
- T(doc !== null);
- T(copy !== null);
- TEquals(true, compareObjects(doc, copy));
- }
- }
-
- // be absolutely sure that other docs were not replicated
- for (k = 0; k < docs.length; k++) {
- var base_id = docs[k]._id;
- id = encodeURIComponent(base_id);
- doc = targetDb.open(base_id);
-
- if ((doc_ids.indexOf(id) >= 0) || (doc_ids.indexOf(base_id) >= 0)) {
- T(doc !== null);
- } else {
- TEquals(null, doc);
- }
- }
-
- targetInfo = targetDb.info();
- TEquals(total, targetInfo.doc_count);
-
-
- // add more docs throught replication by doc IDs
- after_doc_ids = target_doc_ids[j].after;
- after_num_inexistent_docs = 0;
-
- for (k = 0; k < after_doc_ids.length; k++) {
- id = after_doc_ids[k];
- if (id.indexOf("foo_") === 0) {
- after_num_inexistent_docs += 1;
- }
- }
-
- repResult = CouchDB.replicate(
- dbPairsPrefixes[i].source+sourceDb.name,
- dbPairsPrefixes[i].target+targetDb.name,
- {
- body: {
- doc_ids: after_doc_ids
- }
- }
- );
-
- after_total = after_doc_ids.length - after_num_inexistent_docs;
- TEquals(true, repResult.ok);
- if (after_total === 0) {
- TEquals(true, repResult.no_changes);
- } else {
- TEquals('string', typeof repResult.start_time);
- TEquals('string', typeof repResult.end_time);
- TEquals(after_total, repResult.docs_read);
- TEquals(after_total, repResult.docs_written);
- TEquals(0, repResult.doc_write_failures);
- }
-
- for (k = 0; k < after_doc_ids.length; k++) {
- id = after_doc_ids[k];
- doc = sourceDb.open(id);
- copy = targetDb.open(id);
-
- if (id.indexOf("foo_") === 0) {
- TEquals(null, doc);
- TEquals(null, copy);
- } else {
- T(doc !== null);
- T(copy !== null);
- TEquals(true, compareObjects(doc, copy));
- }
- }
-
- // be absolutely sure that other docs were not replicated
- for (k = 0; k < docs.length; k++) {
- var base_id = docs[k]._id;
- id = encodeURIComponent(base_id);
- doc = targetDb.open(base_id);
-
- if ((doc_ids.indexOf(id) >= 0) || (after_doc_ids.indexOf(id) >= 0) ||
- (doc_ids.indexOf(base_id) >= 0) ||
- (after_doc_ids.indexOf(base_id) >= 0)) {
- T(doc !== null);
- } else {
- TEquals(null, doc);
- }
- }
-
- targetInfo = targetDb.info();
- TEquals((total + after_total), targetInfo.doc_count);
-
-
- // replicate again the same doc after updated on source (no conflict)
- id = target_doc_ids[j].conflict_id;
- doc = sourceDb.open(id);
- T(doc !== null);
- doc.integer = 666;
- TEquals(true, sourceDb.save(doc).ok);
- addAtt(sourceDb, doc, "readme.txt", att1_data, "text/plain");
- addAtt(sourceDb, doc, "data.dat", att2_data, "application/binary");
-
- repResult = CouchDB.replicate(
- dbPairsPrefixes[i].source+sourceDb.name,
- dbPairsPrefixes[i].target+targetDb.name,
- {
- body: {
- doc_ids: [id]
- }
- }
- );
-
- TEquals(true, repResult.ok);
- TEquals(1, repResult.docs_read);
- TEquals(1, repResult.docs_written);
- TEquals(0, repResult.doc_write_failures);
-
- copy = targetDb.open(id, {conflicts: true});
-
- TEquals(666, copy.integer);
- TEquals(0, copy._rev.indexOf("4-"));
- TEquals('undefined', typeof copy._conflicts);
-
- var atts = copy._attachments;
- TEquals('object', typeof atts);
- TEquals('object', typeof atts["readme.txt"]);
- TEquals(3, atts["readme.txt"].revpos);
- TEquals(0, atts["readme.txt"].content_type.indexOf("text/plain"));
- TEquals(true, atts["readme.txt"].stub);
-
- var att1_copy = CouchDB.request(
- "GET", "/" + targetDb.name + "/" + copy._id + "/readme.txt"
- ).responseText;
- TEquals(att1_data.length, att1_copy.length);
- TEquals(att1_data, att1_copy);
-
- TEquals('object', typeof atts["data.dat"]);
- TEquals(4, atts["data.dat"].revpos);
- TEquals(0, atts["data.dat"].content_type.indexOf("application/binary"));
- TEquals(true, atts["data.dat"].stub);
-
- var att2_copy = CouchDB.request(
- "GET", "/" + targetDb.name + "/" + copy._id + "/data.dat"
- ).responseText;
- TEquals(att2_data.length, att2_copy.length);
- TEquals(att2_data, att2_copy);
-
-
- // generate a conflict throught replication by doc IDs
- id = target_doc_ids[j].conflict_id;
- doc = sourceDb.open(id);
- copy = targetDb.open(id);
- T(doc !== null);
- T(copy !== null);
- doc.integer += 100;
- copy.integer += 1;
- TEquals(true, sourceDb.save(doc).ok);
- TEquals(true, targetDb.save(copy).ok);
-
- repResult = CouchDB.replicate(
- dbPairsPrefixes[i].source+sourceDb.name,
- dbPairsPrefixes[i].target+targetDb.name,
- {
- body: {
- doc_ids: [id]
- }
- }
- );
-
- TEquals(true, repResult.ok);
- TEquals(1, repResult.docs_read);
- TEquals(1, repResult.docs_written);
- TEquals(0, repResult.doc_write_failures);
-
- copy = targetDb.open(id, {conflicts: true});
-
- TEquals(0, copy._rev.indexOf("5-"));
- TEquals(true, copy._conflicts instanceof Array);
- TEquals(1, copy._conflicts.length);
- TEquals(0, copy._conflicts[0].indexOf("5-"));
- }
- }
-
-
- docs = makeDocs(1, 25);
- docs.push({
- _id: "_design/foo",
- language: "javascript",
- filters: {
- myfilter: (function(doc, req) { return true; }).toString()
- }
- });
-
- for (i = 0; i < dbPairsPrefixes.length; i++) {
- populateSourceDb(docs);
- populateTargetDb([]);
-
- // add some attachments
- for (j = 10; j < 15; j++) {
- addAtt(sourceDb, docs[j], "readme.txt", att1_data, "text/plain");
- }
-
- repResult = CouchDB.replicate(
- dbPairsPrefixes[i].source+sourceDb.name,
- dbPairsPrefixes[i].target+targetDb.name,
- {
- body: {
- continuous: true
- }
- }
- );
- TEquals(true, repResult.ok);
- TEquals('string', typeof repResult._local_id);
-
- var rep_id = repResult._local_id;
-
- waitForSeq(sourceDb, targetDb, rep_id);
-
- for (j = 0; j < docs.length; j++) {
- doc = docs[j];
- copy = targetDb.open(doc._id);
-
- T(copy !== null);
- TEquals(true, compareObjects(doc, copy));
-
- if (j >= 10 && j < 15) {
- var atts = copy._attachments;
- TEquals('object', typeof atts);
- TEquals('object', typeof atts["readme.txt"]);
- TEquals(2, atts["readme.txt"].revpos);
- TEquals(0, atts["readme.txt"].content_type.indexOf("text/plain"));
- TEquals(true, atts["readme.txt"].stub);
-
- var att_copy = CouchDB.request(
- "GET", "/" + targetDb.name + "/" + copy._id + "/readme.txt"
- ).responseText;
- TEquals(att1_data.length, att_copy.length);
- TEquals(att1_data, att_copy);
- }
- }
-
- sourceInfo = sourceDb.info();
- targetInfo = targetDb.info();
-
- TEquals(sourceInfo.doc_count, targetInfo.doc_count);
-
- // add attachments to docs in source
- for (j = 10; j < 15; j++) {
- addAtt(sourceDb, docs[j], "data.dat", att2_data, "application/binary");
- }
-
- var ddoc = docs[docs.length - 1]; // design doc
- addAtt(sourceDb, ddoc, "readme.txt", att1_data, "text/plain");
-
- waitForSeq(sourceDb, targetDb, rep_id);
-
- var modifDocs = docs.slice(10, 15).concat([ddoc]);
- for (j = 0; j < modifDocs.length; j++) {
- doc = modifDocs[j];
- copy = targetDb.open(doc._id);
-
- T(copy !== null);
- TEquals(true, compareObjects(doc, copy));
-
- var atts = copy._attachments;
- TEquals('object', typeof atts);
- TEquals('object', typeof atts["readme.txt"]);
- TEquals(2, atts["readme.txt"].revpos);
- TEquals(0, atts["readme.txt"].content_type.indexOf("text/plain"));
- TEquals(true, atts["readme.txt"].stub);
-
- var att1_copy = CouchDB.request(
- "GET", "/" + targetDb.name + "/" + copy._id + "/readme.txt"
- ).responseText;
- TEquals(att1_data.length, att1_copy.length);
- TEquals(att1_data, att1_copy);
-
- if (doc._id.indexOf("_design/") === -1) {
- TEquals('object', typeof atts["data.dat"]);
- TEquals(3, atts["data.dat"].revpos);
- TEquals(0, atts["data.dat"].content_type.indexOf("application/binary"));
- TEquals(true, atts["data.dat"].stub);
-
- var att2_copy = CouchDB.request(
- "GET", "/" + targetDb.name + "/" + copy._id + "/data.dat"
- ).responseText;
- TEquals(att2_data.length, att2_copy.length);
- TEquals(att2_data, att2_copy);
- }
- }
-
- sourceInfo = sourceDb.info();
- targetInfo = targetDb.info();
-
- TEquals(sourceInfo.doc_count, targetInfo.doc_count);
-
- // add another attachment to the ddoc on source
- addAtt(sourceDb, ddoc, "data.dat", att2_data, "application/binary");
-
- waitForSeq(sourceDb, targetDb, rep_id);
-
- copy = targetDb.open(ddoc._id);
- var atts = copy._attachments;
- TEquals('object', typeof atts);
- TEquals('object', typeof atts["readme.txt"]);
- TEquals(2, atts["readme.txt"].revpos);
- TEquals(0, atts["readme.txt"].content_type.indexOf("text/plain"));
- TEquals(true, atts["readme.txt"].stub);
-
- var att1_copy = CouchDB.request(
- "GET", "/" + targetDb.name + "/" + copy._id + "/readme.txt"
- ).responseText;
- TEquals(att1_data.length, att1_copy.length);
- TEquals(att1_data, att1_copy);
-
- TEquals('object', typeof atts["data.dat"]);
- TEquals(3, atts["data.dat"].revpos);
- TEquals(0, atts["data.dat"].content_type.indexOf("application/binary"));
- TEquals(true, atts["data.dat"].stub);
-
- var att2_copy = CouchDB.request(
- "GET", "/" + targetDb.name + "/" + copy._id + "/data.dat"
- ).responseText;
- TEquals(att2_data.length, att2_copy.length);
- TEquals(att2_data, att2_copy);
-
- sourceInfo = sourceDb.info();
- targetInfo = targetDb.info();
-
- TEquals(sourceInfo.doc_count, targetInfo.doc_count);
-
-
- // add more docs to source
- var newDocs = makeDocs(25, 35);
- populateSourceDb(newDocs, true);
-
- waitForSeq(sourceDb, targetDb, rep_id);
-
- for (j = 0; j < newDocs.length; j++) {
- doc = newDocs[j];
- copy = targetDb.open(doc._id);
-
- T(copy !== null);
- TEquals(true, compareObjects(doc, copy));
- }
-
- sourceInfo = sourceDb.info();
- targetInfo = targetDb.info();
-
- TEquals(sourceInfo.doc_count, targetInfo.doc_count);
-
- // delete docs from source
- TEquals(true, sourceDb.deleteDoc(newDocs[0]).ok);
- TEquals(true, sourceDb.deleteDoc(newDocs[6]).ok);
-
- waitForSeq(sourceDb, targetDb, rep_id);
-
- copy = targetDb.open(newDocs[0]._id);
- TEquals(null, copy);
- copy = targetDb.open(newDocs[6]._id);
- TEquals(null, copy);
-
- var changes = targetDb.changes({since: targetInfo.update_seq});
- // quite unfortunately, there is no way on relying on ordering in a cluster
- // but we can assume a length of 2
- var line1 = changes.results[changes.results.length - 2];
- var line2 = changes.results[changes.results.length - 1];
- T(newDocs[0]._id == line1.id || newDocs[0]._id == line2.id);
- T(newDocs[6]._id == line1.id || newDocs[6]._id == line2.id);
- T(line1.deleted && line2.deleted);
-
- // cancel the replication
- repResult = CouchDB.replicate(
- dbPairsPrefixes[i].source+sourceDb.name,
- dbPairsPrefixes[i].target+targetDb.name,
- {
- body: {
- continuous: true,
- cancel: true
- }
- }
- );
- TEquals(true, repResult.ok);
- TEquals(rep_id, repResult._local_id);
-
- doc = {
- _id: 'foobar',
- value: 666
- };
- TEquals(true, sourceDb.save(doc).ok);
-
- waitReplicationTaskStop(rep_id);
-
- copy = targetDb.open(doc._id);
- TEquals(null, copy);
- }
-
- // COUCHDB-1093 - filtered and continuous _changes feed dies when the
- // database is compacted
- // no more relevant when clustering, you can't compact (per se at least)
- /*
- docs = makeDocs(1, 10);
- docs.push({
- _id: "_design/foo",
- language: "javascript",
- filters: {
- myfilter: (function(doc, req) { return true; }).toString()
- }
- });
- populateSourceDb(docs);
- populateTargetDb([]);
-
- repResult = CouchDB.replicate(
- CouchDB.protocol + host + "/" + sourceDb.name,
- targetDb.name,
- {
- body: {
- continuous: true,
- filter: "foo/myfilter"
- }
- }
- );
- TEquals(true, repResult.ok);
- TEquals('string', typeof repResult._local_id);
-
- TEquals(true, sourceDb.compact().ok);
- while (sourceDb.info().compact_running) {};
-
- TEquals(true, sourceDb.save(makeDocs(30, 31)[0]).ok);
-
- var task = getTask(repResult._local_id, 1000);
- T(task != null);
-
- waitForSeq(sourceDb, targetDb, repResult._local_id);
- T(sourceDb.open("30") !== null);
-
- // cancel replication
- repResult = CouchDB.replicate(
- CouchDB.protocol + host + "/" + sourceDb.name,
- targetDb.name,
- {
- body: {
- continuous: true,
- filter: "foo/myfilter",
- cancel: true
- }
- }
- );
- TEquals(true, repResult.ok);
- TEquals('string', typeof repResult._local_id);
- */
-
- //
- // test replication of compressed attachments
- //
- doc = {
- _id: "foobar"
- };
- var bigTextAtt = makeAttData(128 * 1024);
- var attName = "readme.txt";
- var oldSettings = getCompressionInfo();
- var compressionLevel = oldSettings.level;
- var compressibleTypes = oldSettings.types;
-
- for (i = 0; i < dbPairsPrefixes.length; i++) {
- populateSourceDb([doc]);
- populateTargetDb([]);
-
- // enable compression of text types
- enableAttCompression("8", "text/*");
-
- // add text attachment to foobar doc
- xhr = CouchDB.request(
- "PUT",
- "/" + sourceDb.name + "/" + doc._id + "/" + attName + "?rev=" + doc._rev,
- {
- body: bigTextAtt,
- headers: {"Content-Type": "text/plain"}
- }
- );
- TEquals(201, xhr.status);
-
- // disable compression and replicate
- disableAttCompression();
-
- repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name);
- TEquals(true, repResult.ok);
- TEquals(true, repResult.history instanceof Array);
- TEquals(1, repResult.history.length);
- TEquals(1, repResult.history[0].missing_checked);
- TEquals(1, repResult.history[0].missing_found);
- TEquals(1, repResult.history[0].docs_read);
- TEquals(1, repResult.history[0].docs_written);
- TEquals(0, repResult.history[0].doc_write_failures);
-
- copy = targetDb.open(
- doc._id,
- {att_encoding_info: true, bypass_cache: Math.round(Math.random() * 1000)}
- );
- T(copy !== null);
- T(attName in copy._attachments);
- TEquals("gzip", copy._attachments[attName].encoding);
- TEquals("number", typeof copy._attachments[attName].length);
- TEquals("number", typeof copy._attachments[attName].encoded_length);
- T(copy._attachments[attName].encoded_length < copy._attachments[attName].length);
- }
-
- delete bigTextAtt;
- // restore original settings
- enableAttCompression(compressionLevel, compressibleTypes);
-
- //
- // test replication triggered by non admins
- //
-
- // case 1) user triggering the replication is not a DB admin of the target DB
- var joeUserDoc = CouchDB.prepareUserDoc({
- name: "joe",
- roles: ["erlanger"]
- }, "erly");
- var defaultUsersDb = new CouchDB("_users", {"X-Couch-Full-Commit":"false"});
- try { defaultUsersDb.createDb(); } catch (e) { /* ignore if exists*/ }
- //var usersDb = new CouchDB("test_suite_auth", {"X-Couch-Full-Commit":"false"});
- /*var server_config = [
- {
- section: "couch_httpd_auth",
- key: "authentication_db",
- value: usersDb.name
- }
- ];*/
-
- docs = makeDocs(1, 6);
- docs.push({
- _id: "_design/foo",
- language: "javascript"
- });
-
- dbPairsPrefixes = [
- {
- source: "",
- target: ""
- },
- {
- source: CouchDB.protocol + host + "/",
- target: ""
- },
- {
- source: "",
- target: CouchDB.protocol + "joe:erly@" + host + "/"
- },
- {
- source: CouchDB.protocol + host + "/",
- target: CouchDB.protocol + "joe:erly@" + host + "/"
- }
- ];
-
- for (i = 0; i < dbPairsPrefixes.length; i++) {
- //usersDb.deleteDb();
- populateSourceDb(docs);
- populateTargetDb([]);
-
- TEquals(true, targetDb.setSecObj({
- admins: {
- names: ["superman"],
- roles: ["god"]
- }
- }).ok);
-
- // do NOT run on modified server b/c we use the default DB
- //run_on_modified_server(server_config, function() {
- delete joeUserDoc._rev;
- var prevJoeUserDoc = defaultUsersDb.open(joeUserDoc._id);
- if (prevJoeUserDoc) {
- joeUserDoc._rev = prevJoeUserDoc._rev;
- }
- if(i == 0) {
- TEquals(true, defaultUsersDb.save(joeUserDoc).ok);
- wait(5000);
- }
- TEquals(true, CouchDB.login("joe", "erly").ok);
- TEquals('joe', CouchDB.session().userCtx.name);
-
- repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name);
-
- TEquals(true, CouchDB.logout().ok);
-
- TEquals(true, repResult.ok);
- TEquals(docs.length, repResult.history[0].docs_read);
- TEquals((docs.length - 1), repResult.history[0].docs_written); // 1 ddoc
- TEquals(1, repResult.history[0].doc_write_failures);
- //});
-
- for (j = 0; j < docs.length; j++) {
- doc = docs[j];
- copy = targetDb.open(doc._id);
-
- if (doc._id.indexOf("_design/") === 0) {
- TEquals(null, copy);
- } else {
- T(copy !== null);
- TEquals(true, compareObjects(doc, copy));
- }
- }
- }
-
- // case 2) user triggering the replication is not a reader (nor admin) of the source DB
- dbPairsPrefixes = [
- {
- source: "",
- target: ""
- },
- {
- source: CouchDB.protocol + "joe:erly@" + host + "/",
- target: ""
- },
- {
- source: "",
- target: CouchDB.protocol + host + "/"
- },
- {
- source: CouchDB.protocol + "joe:erly@" + host + "/",
- target: CouchDB.protocol + host + "/"
- }
- ];
-
- for (i = 0; i < dbPairsPrefixes.length; i++) {
- //usersDb.deleteDb();
- populateSourceDb(docs);
- populateTargetDb([]);
-
- TEquals(true, sourceDb.setSecObj({
- admins: {
- names: ["superman"],
- roles: ["god"]
- },
- readers: {
- names: ["john"],
- roles: ["secret"]
- }
- }).ok);
- // check that we start OK (plus give time for sec object apply 2 avoid Heisenbugs)
- for (j = 0; j < docs.length; j++) {
- doc = docs[j];
- copy = targetDb.open(doc._id);
- TEquals(null, copy);
- }
-
- // do NOT run on modified server b/c we use the default DB
- //run_on_modified_server(server_config, function() {
- delete joeUserDoc._rev;
- var prevJoeUserDoc = defaultUsersDb.open(joeUserDoc._id);
- if (prevJoeUserDoc) {
- joeUserDoc._rev = prevJoeUserDoc._rev;
- }
- if(i == 0) {
- TEquals(true, defaultUsersDb.save(joeUserDoc).ok);
- wait(5000);
- }
-
- TEquals(true, CouchDB.login("joe", "erly").ok);
- TEquals('joe', CouchDB.session().userCtx.name);
-
- try {
- CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name);
- T(false, "should have raised an exception");
- } catch (x) {
- // TODO: small thing: DB exists but is no more found - at least we have an exception, so it's rather minor
- //TEquals("unauthorized", x.error);
- T(!!x);
- }
-
- TEquals(true, CouchDB.logout().ok);
- //});
-
- for (j = 0; j < docs.length; j++) {
- doc = docs[j];
- copy = targetDb.open(doc._id);
- TEquals(null, copy);
- }
- }
-
-
- // COUCHDB-885 - push replication of a doc with attachment causes a
- // conflict in the target.
- populateSourceDb([]);
- populateTargetDb([]);
-
- doc = {
- _id: "doc1"
- };
- TEquals(true, sourceDb.save(doc).ok);
-
- repResult = CouchDB.replicate(
- sourceDb.name,
- CouchDB.protocol + host + "/" + targetDb.name
- );
- TEquals(true, repResult.ok);
- TEquals(true, repResult.history instanceof Array);
- TEquals(1, repResult.history.length);
- TEquals(1, repResult.history[0].docs_written);
- TEquals(1, repResult.history[0].docs_read);
- TEquals(0, repResult.history[0].doc_write_failures);
-
- doc["_attachments"] = {
- "hello.txt": {
- "content_type": "text/plain",
- "data": "aGVsbG8gd29ybGQ=" // base64:encode("hello world")
- },
- "foo.dat": {
- "content_type": "not/compressible",
- "data": "aSBhbSBub3QgZ3ppcGVk" // base64:encode("i am not gziped")
- }
- };
-
- TEquals(true, sourceDb.save(doc).ok);
- repResult = CouchDB.replicate(
- sourceDb.name,
- CouchDB.protocol + host + "/" + targetDb.name
- );
- TEquals(true, repResult.ok);
- TEquals(true, repResult.history instanceof Array);
- TEquals(2, repResult.history.length);
- TEquals(1, repResult.history[0].docs_written);
- TEquals(1, repResult.history[0].docs_read);
- TEquals(0, repResult.history[0].doc_write_failures);
-
- copy = targetDb.open(doc._id, {
- conflicts: true, deleted_conflicts: true,
- attachments: true, att_encoding_info: true});
- T(copy !== null);
- TEquals("undefined", typeof copy._conflicts);
- TEquals("undefined", typeof copy._deleted_conflicts);
- TEquals("text/plain", copy._attachments["hello.txt"]["content_type"]);
- TEquals("aGVsbG8gd29ybGQ=", copy._attachments["hello.txt"]["data"]);
- TEquals("gzip", copy._attachments["hello.txt"]["encoding"]);
- TEquals("not/compressible", copy._attachments["foo.dat"]["content_type"]);
- TEquals("aSBhbSBub3QgZ3ppcGVk", copy._attachments["foo.dat"]["data"]);
- TEquals("undefined", typeof copy._attachments["foo.dat"]["encoding"]);
- // end of test for COUCHDB-885
-
- // Test for COUCHDB-1242 (reject non-string query_params)
- // TODO: non-String params crash CouchDB alltogether
- /*
- try {
- CouchDB.replicate(sourceDb, targetDb, {
- body: {
- filter : "mydesign/myfilter",
- query_params : {
- "maxvalue": 4
- }
- }
- });
- } catch (e) {
- TEquals("bad_request", e.error);
- }
- */
-
-
- // Test that we can cancel a replication just by POSTing an object
- // like {"replication_id": Id, "cancel": true}. The replication ID
- // can be obtained from a continuous replication request response
- // (_local_id field), from _active_tasks or from the log
- populateSourceDb(makeDocs(1, 6));
- populateTargetDb([]);
-
- repResult = CouchDB.replicate(
- CouchDB.protocol + host + "/" + sourceDb.name,
- targetDb.name,
- {
- body: {
- continuous: true,
- create_target: true
- }
- }
- );
- TEquals(true, repResult.ok);
- TEquals('string', typeof repResult._local_id);
- var repId = repResult._local_id;
-
- var task = getTask(repId, 3000);
- T(task != null);
-
- TEquals(task["replication_id"], repId, "Replication found in _active_tasks");
- xhr = CouchDB.request(
- "POST", "/_replicate", {
- body: JSON.stringify({"replication_id": repId, "cancel": true}),
- headers: {"Content-Type": "application/json"}
- });
- TEquals(200, xhr.status, "Replication cancel request success");
- waitReplicationTaskStop(repId);
- task = getTask(repId);
- TEquals(null, task, "Replication was canceled");
-
- xhr = CouchDB.request(
- "POST", "/_replicate", {
- body: JSON.stringify({"replication_id": repId, "cancel": true}),
- headers: {"Content-Type": "application/json"}
- });
- TEquals(404, xhr.status, "2nd replication cancel failed");
-
- // Non-admin user can not cancel replications triggered by other users
- var userDoc = CouchDB.prepareUserDoc({
- name: "tony",
- roles: ["mafia"]
- }, "soprano");
- // again, due doe _security not there, we use the default users DB
- defaultUsersDb = new CouchDB("_users", {"X-Couch-Full-Commit":"false"});
- //usersDb = new CouchDB("test_suite_auth", {"X-Couch-Full-Commit":"false"});
- // (and leave the server alone)
- /*server_config = [
- {
- section: "couch_httpd_auth",
- key: "authentication_db",
- value: usersDb.name
- }
- ];*/
-
- //run_on_modified_server(server_config, function() {
- populateSourceDb(makeDocs(1, 6));
- populateTargetDb([]);
- var prevUserDoc = defaultUsersDb.open(userDoc._id);
- if(prevUserDoc) {
- userDoc._rev = prevUserDoc._rev;
- }
- TEquals(true, defaultUsersDb.save(userDoc).ok);
-
- repResult = CouchDB.replicate(
- CouchDB.protocol + host + "/" + sourceDb.name,
- targetDb.name,
- {
- body: {
- continuous: true
- }
- }
- );
- TEquals(true, repResult.ok);
- TEquals('string', typeof repResult._local_id);
-
- TEquals(true, CouchDB.login("tony", "soprano").ok);
- TEquals('tony', CouchDB.session().userCtx.name);
-
- xhr = CouchDB.request(
- "POST", "/_replicate", {
- body: JSON.stringify({"replication_id": repResult._local_id, "cancel": true}),
- headers: {"Content-Type": "application/json"}
- });
- TEquals(401, xhr.status, "Unauthorized to cancel replication");
- TEquals("unauthorized", JSON.parse(xhr.responseText).error);
-
- TEquals(true, CouchDB.logout().ok);
-
- xhr = CouchDB.request(
- "POST", "/_replicate", {
- body: JSON.stringify({"replication_id": repResult._local_id, "cancel": true}),
- headers: {"Content-Type": "application/json"}
- });
- TEquals(200, xhr.status, "Authorized to cancel replication");
- //});
-
- // cleanup
- //usersDb.deleteDb();
- sourceDb.deleteDb();
- targetDb.deleteDb();
- // (not sure what this is - cleanup after 'file not found tests' poss. - not harmful anyway)
- (new CouchDB("test_suite_db")).deleteDb();
-};